repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/helpers/serializer.go | internal/helpers/serializer.go | package helpers
import "sync"
// Each call to "Enter(i)" doesn't start until "Leave(i-1)" is called
type Serializer struct {
flags []sync.WaitGroup
}
func MakeSerializer(count int) Serializer {
flags := make([]sync.WaitGroup, count)
for i := 0; i < count; i++ {
flags[i].Add(1)
}
return Serializer{flags: flags}
}
func (s *Serializer) Enter(i int) {
if i > 0 {
s.flags[i-1].Wait()
}
}
func (s *Serializer) Leave(i int) {
s.flags[i].Done()
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/helpers/dataurl.go | internal/helpers/dataurl.go | package helpers
import (
"encoding/base64"
"fmt"
"strings"
"unicode/utf8"
)
// Returns the shorter of either a base64-encoded or percent-escaped data URL
func EncodeStringAsShortestDataURL(mimeType string, text string) string {
encoded := base64.StdEncoding.EncodeToString([]byte(text))
url := fmt.Sprintf("data:%s;base64,%s", mimeType, encoded)
if percentURL, ok := EncodeStringAsPercentEscapedDataURL(mimeType, text); ok && len(percentURL) < len(url) {
return percentURL
}
return url
}
// See "scripts/dataurl-escapes.html" for how this was derived
func EncodeStringAsPercentEscapedDataURL(mimeType string, text string) (string, bool) {
hex := "0123456789ABCDEF"
sb := strings.Builder{}
n := len(text)
i := 0
runStart := 0
sb.WriteString("data:")
sb.WriteString(mimeType)
sb.WriteByte(',')
// Scan for trailing characters that need to be escaped
trailingStart := n
for trailingStart > 0 {
if c := text[trailingStart-1]; c > 0x20 || c == '\t' || c == '\n' || c == '\r' {
break
}
trailingStart--
}
for i < n {
c, width := utf8.DecodeRuneInString(text[i:])
// We can't encode invalid UTF-8 data
if c == utf8.RuneError && width == 1 {
return "", false
}
// Escape this character if needed
if c == '\t' || c == '\n' || c == '\r' || c == '#' || i >= trailingStart ||
(c == '%' && i+2 < n && isHex(text[i+1]) && isHex(text[i+2])) {
if runStart < i {
sb.WriteString(text[runStart:i])
}
sb.WriteByte('%')
sb.WriteByte(hex[c>>4])
sb.WriteByte(hex[c&15])
runStart = i + width
}
i += width
}
if runStart < n {
sb.WriteString(text[runStart:])
}
return sb.String(), true
}
func isHex(c byte) bool {
return c >= '0' && c <= '9' || c >= 'a' && c <= 'f' || c >= 'A' && c <= 'F'
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/helpers/float.go | internal/helpers/float.go | package helpers
import "math"
// This wraps float64 math operations. Why does this exist? The Go compiler
// contains some optimizations to take advantage of "fused multiply and add"
// (FMA) instructions on certain processors. These instructions lead to
// different output on those processors, which means esbuild's output is no
// longer deterministic across all platforms. From the Go specification itself
// (https://go.dev/ref/spec#Floating_point_operators):
//
// An implementation may combine multiple floating-point operations into a
// single fused operation, possibly across statements, and produce a result
// that differs from the value obtained by executing and rounding the
// instructions individually. An explicit floating-point type conversion
// rounds to the precision of the target type, preventing fusion that would
// discard that rounding.
//
// For instance, some architectures provide a "fused multiply and add" (FMA)
// instruction that computes x*y + z without rounding the intermediate result
// x*y.
//
// Therefore we need to add explicit type conversions such as "float64(x)" to
// prevent optimizations that break correctness. Rather than adding them on a
// case-by-case basis as real correctness issues are discovered, we instead
// preemptively force them to be added everywhere by using this wrapper type
// for all floating-point math.
type F64 struct {
value float64
}
func NewF64(a float64) F64 {
return F64{value: float64(a)}
}
func (a F64) Value() float64 {
return a.value
}
func (a F64) IsNaN() bool {
return math.IsNaN(a.value)
}
func (a F64) Neg() F64 {
return NewF64(-a.value)
}
func (a F64) Abs() F64 {
return NewF64(math.Abs(a.value))
}
func (a F64) Sin() F64 {
return NewF64(math.Sin(a.value))
}
func (a F64) Cos() F64 {
return NewF64(math.Cos(a.value))
}
func (a F64) Log2() F64 {
return NewF64(math.Log2(a.value))
}
func (a F64) Round() F64 {
return NewF64(math.Round(a.value))
}
func (a F64) Floor() F64 {
return NewF64(math.Floor(a.value))
}
func (a F64) Ceil() F64 {
return NewF64(math.Ceil(a.value))
}
func (a F64) Squared() F64 {
return a.Mul(a)
}
func (a F64) Cubed() F64 {
return a.Mul(a).Mul(a)
}
func (a F64) Sqrt() F64 {
return NewF64(math.Sqrt(a.value))
}
func (a F64) Cbrt() F64 {
return NewF64(math.Cbrt(a.value))
}
func (a F64) Add(b F64) F64 {
return NewF64(a.value + b.value)
}
func (a F64) AddConst(b float64) F64 {
return NewF64(a.value + b)
}
func (a F64) Sub(b F64) F64 {
return NewF64(a.value - b.value)
}
func (a F64) SubConst(b float64) F64 {
return NewF64(a.value - b)
}
func (a F64) Mul(b F64) F64 {
return NewF64(a.value * b.value)
}
func (a F64) MulConst(b float64) F64 {
return NewF64(a.value * b)
}
func (a F64) Div(b F64) F64 {
return NewF64(a.value / b.value)
}
func (a F64) DivConst(b float64) F64 {
return NewF64(a.value / b)
}
func (a F64) Pow(b F64) F64 {
return NewF64(math.Pow(a.value, b.value))
}
func (a F64) PowConst(b float64) F64 {
return NewF64(math.Pow(a.value, b))
}
func (a F64) Atan2(b F64) F64 {
return NewF64(math.Atan2(a.value, b.value))
}
func (a F64) WithSignFrom(b F64) F64 {
return NewF64(math.Copysign(a.value, b.value))
}
func Min2(a F64, b F64) F64 {
return NewF64(math.Min(a.value, b.value))
}
func Max2(a F64, b F64) F64 {
return NewF64(math.Max(a.value, b.value))
}
func Min3(a F64, b F64, c F64) F64 {
return NewF64(math.Min(math.Min(a.value, b.value), c.value))
}
func Max3(a F64, b F64, c F64) F64 {
return NewF64(math.Max(math.Max(a.value, b.value), c.value))
}
func Lerp(a F64, b F64, t F64) F64 {
return b.Sub(a).Mul(t).Add(a)
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/helpers/dataurl_test.go | internal/helpers/dataurl_test.go | package helpers_test
import (
"fmt"
"testing"
"github.com/evanw/esbuild/internal/helpers"
)
func TestEncodeDataURL(t *testing.T) {
check := func(raw string, expected string) {
url, ok := helpers.EncodeStringAsPercentEscapedDataURL("text/plain", raw)
if !ok {
t.Fatalf("Failed to encode %q", raw)
} else if url != expected {
t.Fatalf("Got %q but expected %q", url, expected)
}
}
for i := 0; i <= 0xFF; i++ {
alwaysEscape := i == '\t' || i == '\r' || i == '\n' || i == '#'
trailingEscape := i <= 0x20 || i == '#'
if trailingEscape {
check(string(rune(i)), fmt.Sprintf("data:text/plain,%%%02X", i))
check("foo"+string(rune(i)), fmt.Sprintf("data:text/plain,foo%%%02X", i))
} else {
check(string(rune(i)), fmt.Sprintf("data:text/plain,%c", i))
check("foo"+string(rune(i)), fmt.Sprintf("data:text/plain,foo%c", i))
}
if alwaysEscape {
check(string(rune(i))+"foo", fmt.Sprintf("data:text/plain,%%%02Xfoo", i))
} else {
check(string(rune(i))+"foo", fmt.Sprintf("data:text/plain,%cfoo", i))
}
}
// Test leading vs. trailing
check(" \t ", "data:text/plain, %09%20")
check(" \n ", "data:text/plain, %0A%20")
check(" \r ", "data:text/plain, %0D%20")
check(" # ", "data:text/plain, %23%20")
check("\x08#\x08", "data:text/plain,\x08%23%08")
// Only "%" symbols that could form an escape need to be escaped
check("%, %3, %33, %333", "data:text/plain,%, %3, %2533, %25333")
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/helpers/utf.go | internal/helpers/utf.go | package helpers
import (
"strings"
"unicode/utf8"
)
func ContainsNonBMPCodePoint(text string) bool {
for _, c := range text {
if c > 0xFFFF {
return true
}
}
return false
}
// This does "ContainsNonBMPCodePoint(UTF16ToString(text))" without any allocations
func ContainsNonBMPCodePointUTF16(text []uint16) bool {
if n := len(text); n > 0 {
for i, c := range text[:n-1] {
// Check for a high surrogate
if c >= 0xD800 && c <= 0xDBFF {
// Check for a low surrogate
if c2 := text[i+1]; c2 >= 0xDC00 && c2 <= 0xDFFF {
return true
}
}
}
}
return false
}
func StringToUTF16(text string) []uint16 {
decoded := make([]uint16, 0, len(text))
for _, c := range text {
if c <= 0xFFFF {
decoded = append(decoded, uint16(c))
} else {
c -= 0x10000
decoded = append(decoded, uint16(0xD800+((c>>10)&0x3FF)), uint16(0xDC00+(c&0x3FF)))
}
}
return decoded
}
func UTF16ToString(text []uint16) string {
var temp [utf8.UTFMax]byte
b := strings.Builder{}
n := len(text)
for i := 0; i < n; i++ {
r1 := rune(text[i])
if r1 >= 0xD800 && r1 <= 0xDBFF && i+1 < n {
if r2 := rune(text[i+1]); r2 >= 0xDC00 && r2 <= 0xDFFF {
r1 = (r1-0xD800)<<10 | (r2 - 0xDC00) + 0x10000
i++
}
}
width := encodeWTF8Rune(temp[:], r1)
b.Write(temp[:width])
}
return b.String()
}
func UTF16ToStringWithValidation(text []uint16) (string, uint16, bool) {
var temp [utf8.UTFMax]byte
b := strings.Builder{}
n := len(text)
for i := 0; i < n; i++ {
r1 := rune(text[i])
if r1 >= 0xD800 && r1 <= 0xDBFF {
if i+1 < n {
if r2 := rune(text[i+1]); r2 >= 0xDC00 && r2 <= 0xDFFF {
r1 = (r1-0xD800)<<10 | (r2 - 0xDC00) + 0x10000
i++
} else {
return "", uint16(r1), false
}
} else {
return "", uint16(r1), false
}
} else if r1 >= 0xDC00 && r1 <= 0xDFFF {
return "", uint16(r1), false
}
width := encodeWTF8Rune(temp[:], r1)
b.Write(temp[:width])
}
return b.String(), 0, true
}
// Does "UTF16ToString(text) == str" without a temporary allocation
func UTF16EqualsString(text []uint16, str string) bool {
if len(text) > len(str) {
// Strings can't be equal if UTF-16 encoding is longer than UTF-8 encoding
return false
}
var temp [utf8.UTFMax]byte
n := len(text)
j := 0
for i := 0; i < n; i++ {
r1 := rune(text[i])
if r1 >= 0xD800 && r1 <= 0xDBFF && i+1 < n {
if r2 := rune(text[i+1]); r2 >= 0xDC00 && r2 <= 0xDFFF {
r1 = (r1-0xD800)<<10 | (r2 - 0xDC00) + 0x10000
i++
}
}
width := encodeWTF8Rune(temp[:], r1)
if j+width > len(str) {
return false
}
for k := 0; k < width; k++ {
if temp[k] != str[j] {
return false
}
j++
}
}
return j == len(str)
}
func UTF16EqualsUTF16(a []uint16, b []uint16) bool {
if len(a) == len(b) {
for i, c := range a {
if c != b[i] {
return false
}
}
return true
}
return false
}
// This is a clone of "utf8.EncodeRune" that has been modified to encode using
// WTF-8 instead. See https://simonsapin.github.io/wtf-8/ for more info.
func encodeWTF8Rune(p []byte, r rune) int {
// Negative values are erroneous. Making it unsigned addresses the problem.
switch i := uint32(r); {
case i <= 0x7F:
p[0] = byte(r)
return 1
case i <= 0x7FF:
_ = p[1] // eliminate bounds checks
p[0] = 0xC0 | byte(r>>6)
p[1] = 0x80 | byte(r)&0x3F
return 2
case i > utf8.MaxRune:
r = utf8.RuneError
fallthrough
case i <= 0xFFFF:
_ = p[2] // eliminate bounds checks
p[0] = 0xE0 | byte(r>>12)
p[1] = 0x80 | byte(r>>6)&0x3F
p[2] = 0x80 | byte(r)&0x3F
return 3
default:
_ = p[3] // eliminate bounds checks
p[0] = 0xF0 | byte(r>>18)
p[1] = 0x80 | byte(r>>12)&0x3F
p[2] = 0x80 | byte(r>>6)&0x3F
p[3] = 0x80 | byte(r)&0x3F
return 4
}
}
// This is a clone of "utf8.DecodeRuneInString" that has been modified to
// decode using WTF-8 instead. See https://simonsapin.github.io/wtf-8/ for
// more info.
func DecodeWTF8Rune(s string) (rune, int) {
n := len(s)
if n < 1 {
return utf8.RuneError, 0
}
s0 := s[0]
if s0 < 0x80 {
return rune(s0), 1
}
var sz int
if (s0 & 0xE0) == 0xC0 {
sz = 2
} else if (s0 & 0xF0) == 0xE0 {
sz = 3
} else if (s0 & 0xF8) == 0xF0 {
sz = 4
} else {
return utf8.RuneError, 1
}
if n < sz {
return utf8.RuneError, 0
}
s1 := s[1]
if (s1 & 0xC0) != 0x80 {
return utf8.RuneError, 1
}
if sz == 2 {
cp := rune(s0&0x1F)<<6 | rune(s1&0x3F)
if cp < 0x80 {
return utf8.RuneError, 1
}
return cp, 2
}
s2 := s[2]
if (s2 & 0xC0) != 0x80 {
return utf8.RuneError, 1
}
if sz == 3 {
cp := rune(s0&0x0F)<<12 | rune(s1&0x3F)<<6 | rune(s2&0x3F)
if cp < 0x0800 {
return utf8.RuneError, 1
}
return cp, 3
}
s3 := s[3]
if (s3 & 0xC0) != 0x80 {
return utf8.RuneError, 1
}
cp := rune(s0&0x07)<<18 | rune(s1&0x3F)<<12 | rune(s2&0x3F)<<6 | rune(s3&0x3F)
if cp < 0x010000 || cp > 0x10FFFF {
return utf8.RuneError, 1
}
return cp, 4
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/helpers/typos.go | internal/helpers/typos.go | package helpers
import "unicode/utf8"
type TypoDetector struct {
oneCharTypos map[string]string
}
func MakeTypoDetector(valid []string) TypoDetector {
detector := TypoDetector{oneCharTypos: make(map[string]string)}
// Add all combinations of each valid word with one character missing
for _, correct := range valid {
if len(correct) > 3 {
for i, ch := range correct {
detector.oneCharTypos[correct[:i]+correct[i+utf8.RuneLen(ch):]] = correct
}
}
}
return detector
}
func (detector TypoDetector) MaybeCorrectTypo(typo string) (string, bool) {
// Check for a single deleted character
if corrected, ok := detector.oneCharTypos[typo]; ok {
return corrected, true
}
// Check for a single misplaced character
for i, ch := range typo {
if corrected, ok := detector.oneCharTypos[typo[:i]+typo[i+utf8.RuneLen(ch):]]; ok {
return corrected, true
}
}
return "", false
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/graph/meta.go | internal/graph/meta.go | package graph
// The code in this file represents data that is required by the compile phase
// of the bundler but that is not required by the scan phase.
import (
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/logger"
)
type WrapKind uint8
const (
WrapNone WrapKind = iota
// The module will be bundled CommonJS-style like this:
//
// // foo.ts
// let require_foo = __commonJS((exports, module) => {
// exports.foo = 123;
// });
//
// // bar.ts
// let foo = flag ? require_foo() : null;
//
WrapCJS
// The module will be bundled ESM-style like this:
//
// // foo.ts
// var foo, foo_exports = {};
// __export(foo_exports, {
// foo: () => foo
// });
// let init_foo = __esm(() => {
// foo = 123;
// });
//
// // bar.ts
// let foo = flag ? (init_foo(), __toCommonJS(foo_exports)) : null;
//
WrapESM
)
// This contains linker-specific metadata corresponding to a "file" struct
// from the initial scan phase of the bundler. It's separated out because it's
// conceptually only used for a single linking operation and because multiple
// linking operations may be happening in parallel with different metadata for
// the same file.
type JSReprMeta struct {
// This is only for TypeScript files. If an import symbol is in this map, it
// means the import couldn't be found and doesn't actually exist. This is not
// an error in TypeScript because the import is probably just a type.
//
// Normally we remove all unused imports for TypeScript files during parsing,
// which automatically removes type-only imports. But there are certain re-
// export situations where it's impossible to tell if an import is a type or
// not:
//
// import {typeOrNotTypeWhoKnows} from 'path';
// export {typeOrNotTypeWhoKnows};
//
// Really people should be using the TypeScript "isolatedModules" flag with
// bundlers like this one that compile TypeScript files independently without
// type checking. That causes the TypeScript type checker to emit the error
// "Re-exporting a type when the '--isolatedModules' flag is provided requires
// using 'export type'." But we try to be robust to such code anyway.
IsProbablyTypeScriptType map[ast.Ref]bool
// Imports are matched with exports in a separate pass from when the matched
// exports are actually bound to the imports. Here "binding" means adding non-
// local dependencies on the parts in the exporting file that declare the
// exported symbol to all parts in the importing file that use the imported
// symbol.
//
// This must be a separate pass because of the "probably TypeScript type"
// check above. We can't generate the part for the export namespace until
// we've matched imports with exports because the generated code must omit
// type-only imports in the export namespace code. And we can't bind exports
// to imports until the part for the export namespace is generated since that
// part needs to participate in the binding.
//
// This array holds the deferred imports to bind so the pass can be split
// into two separate passes.
ImportsToBind map[ast.Ref]ImportData
// This includes both named exports and re-exports.
//
// Named exports come from explicit export statements in the original file,
// and are copied from the "NamedExports" field in the AST.
//
// Re-exports come from other files and are the result of resolving export
// star statements (i.e. "export * from 'foo'").
ResolvedExports map[string]ExportData
ResolvedExportStar *ExportData
ResolvedExportTypos *helpers.TypoDetector
// Never iterate over "resolvedExports" directly. Instead, iterate over this
// array. Some exports in that map aren't meant to end up in generated code.
// This array excludes these exports and is also sorted, which avoids non-
// determinism due to random map iteration order.
SortedAndFilteredExportAliases []string
// This is merged on top of the corresponding map from the parser in the AST.
// You should call "TopLevelSymbolToParts" to access this instead of accessing
// it directly.
TopLevelSymbolToPartsOverlay map[ast.Ref][]uint32
// If this is an entry point, this array holds a reference to one free
// temporary symbol for each entry in "sortedAndFilteredExportAliases".
// These may be needed to store copies of CommonJS re-exports in ESM.
CJSExportCopies []ast.Ref
// The index of the automatically-generated part used to represent the
// CommonJS or ESM wrapper. This part is empty and is only useful for tree
// shaking and code splitting. The wrapper can't be inserted into the part
// because the wrapper contains other parts, which can't be represented by
// the current part system. Only wrapped files have one of these.
WrapperPartIndex ast.Index32
// The index of the automatically-generated part used to handle entry point
// specific stuff. If a certain part is needed by the entry point, it's added
// as a dependency of this part. This is important for parts that are marked
// as removable when unused and that are not used by anything else. Only
// entry point files have one of these.
EntryPointPartIndex ast.Index32
// This is true if this file is affected by top-level await, either by having
// a top-level await inside this file or by having an import/export statement
// that transitively imports such a file. It is forbidden to call "require()"
// on these files since they are evaluated asynchronously.
IsAsyncOrHasAsyncDependency bool
Wrap WrapKind
// If true, we need to insert "var exports = {};". This is the case for ESM
// files when the import namespace is captured via "import * as" and also
// when they are the target of a "require()" call.
NeedsExportsVariable bool
// If true, the "__export(exports, { ... })" call will be force-included even
// if there are no parts that reference "exports". Otherwise this call will
// be removed due to the tree shaking pass. This is used when for entry point
// files when code related to the current output format needs to reference
// the "exports" variable.
ForceIncludeExportsForEntryPoint bool
// This is set when we need to pull in the "__export" symbol in to the part
// at "nsExportPartIndex". This can't be done in "createExportsForFile"
// because of concurrent map hazards. Instead, it must be done later.
NeedsExportSymbolFromRuntime bool
// Wrapped files must also ensure that their dependencies are wrapped. This
// flag is used during the traversal that enforces this invariant, and is used
// to detect when the fixed point has been reached.
DidWrapDependencies bool
}
type ImportData struct {
// This is an array of intermediate statements that re-exported this symbol
// in a chain before getting to the final symbol. This can be done either with
// "export * from" or "export {} from". If this is done with "export * from"
// then this may not be the result of a single chain but may instead form
// a diamond shape if this same symbol was re-exported multiple times from
// different files.
ReExports []js_ast.Dependency
NameLoc logger.Loc // Optional, goes with sourceIndex, ignore if zero
Ref ast.Ref
SourceIndex uint32
}
type ExportData struct {
// Export star resolution happens first before import resolution. That means
// it cannot yet determine if duplicate names from export star resolution are
// ambiguous (point to different symbols) or not (point to the same symbol).
// This issue can happen in the following scenario:
//
// // entry.js
// export * from './a'
// export * from './b'
//
// // a.js
// export * from './c'
//
// // b.js
// export {x} from './c'
//
// // c.js
// export let x = 1, y = 2
//
// In this case "entry.js" should have two exports "x" and "y", neither of
// which are ambiguous. To handle this case, ambiguity resolution must be
// deferred until import resolution time. That is done using this array.
PotentiallyAmbiguousExportStarRefs []ImportData
Ref ast.Ref
// This is the file that the named export above came from. This will be
// different from the file that contains this object if this is a re-export.
NameLoc logger.Loc // Optional, goes with sourceIndex, ignore if zero
SourceIndex uint32
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/graph/input.go | internal/graph/input.go | package graph
// The code in this file mainly represents data that passes from the scan phase
// to the compile phase of the bundler. There is currently one exception: the
// "meta" member of the JavaScript file representation. That could have been
// stored separately but is stored together for convenience and to avoid an
// extra level of indirection. Instead it's kept in a separate type to keep
// things organized.
import (
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/resolver"
"github.com/evanw/esbuild/internal/sourcemap"
)
type InputFile struct {
Repr InputFileRepr
InputSourceMap *sourcemap.SourceMap
// If this file ends up being used in the bundle, these are additional files
// that must be written to the output directory. It's used by the "file" and
// "copy" loaders.
AdditionalFiles []OutputFile
UniqueKeyForAdditionalFile string
SideEffects SideEffects
Source logger.Source
Loader config.Loader
OmitFromSourceMapsAndMetafile bool
}
type OutputFile struct {
// If "AbsMetadataFile" is present, this will be filled out with information
// about this file in JSON format. This is a partial JSON file that will be
// fully assembled later.
JSONMetadataChunk string
AbsPath string
Contents []byte
IsExecutable bool
}
type SideEffects struct {
// This is optional additional information for use in error messages
Data *resolver.SideEffectsData
Kind SideEffectsKind
}
type SideEffectsKind uint8
const (
// The default value conservatively considers all files to have side effects.
HasSideEffects SideEffectsKind = iota
// This file was listed as not having side effects by a "package.json"
// file in one of our containing directories with a "sideEffects" field.
NoSideEffects_PackageJSON
// This file is considered to have no side effects because the AST was empty
// after parsing finished. This should be the case for ".d.ts" files.
NoSideEffects_EmptyAST
// This file was loaded using a data-oriented loader (e.g. "text") that is
// known to not have side effects.
NoSideEffects_PureData
// Same as above but it came from a plugin. We don't want to warn about
// unused imports to these files since running the plugin is a side effect.
// Removing the import would not call the plugin which is observable.
NoSideEffects_PureData_FromPlugin
)
type InputFileRepr interface {
ImportRecords() *[]ast.ImportRecord
}
type JSRepr struct {
Meta JSReprMeta
AST js_ast.AST
// If present, this is the CSS file that this JavaScript stub corresponds to.
// A JavaScript stub is automatically generated for a CSS file when it's
// imported from a JavaScript file.
CSSSourceIndex ast.Index32
}
func (repr *JSRepr) ImportRecords() *[]ast.ImportRecord {
return &repr.AST.ImportRecords
}
func (repr *JSRepr) TopLevelSymbolToParts(ref ast.Ref) []uint32 {
// Overlay the mutable map from the linker
if parts, ok := repr.Meta.TopLevelSymbolToPartsOverlay[ref]; ok {
return parts
}
// Fall back to the immutable map from the parser
return repr.AST.TopLevelSymbolToPartsFromParser[ref]
}
type CSSRepr struct {
AST css_ast.AST
// If present, this is the JavaScript stub corresponding to this CSS file.
// A JavaScript stub is automatically generated for a CSS file when it's
// imported from a JavaScript file.
JSSourceIndex ast.Index32
}
func (repr *CSSRepr) ImportRecords() *[]ast.ImportRecord {
return &repr.AST.ImportRecords
}
type CopyRepr struct {
// The URL that replaces the contents of any import record paths for this file
URLForCode string
}
func (repr *CopyRepr) ImportRecords() *[]ast.ImportRecord {
return nil
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/graph/graph.go | internal/graph/graph.go | package graph
// This graph represents the set of files that the linker operates on. Each
// linker has a separate one of these graphs (there is one linker when code
// splitting is on, but one linker per entry point when code splitting is off).
//
// The input data to the linker constructor must be considered immutable because
// it's shared between linker invocations and is also stored in the cache for
// incremental builds.
//
// The linker constructor makes a shallow clone of the input data and is careful
// to pre-clone ahead of time the AST fields that it may modify. The Go language
// doesn't have any type system features for immutability so this has to be
// manually enforced. Please be careful.
import (
"sort"
"sync"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/runtime"
)
type entryPointKind uint8
const (
entryPointNone entryPointKind = iota
entryPointUserSpecified
entryPointDynamicImport
)
type LinkerFile struct {
// This holds all entry points that can reach this file. It will be used to
// assign the parts in this file to a chunk.
EntryBits helpers.BitSet
// This is lazily-allocated because it's only needed if there are warnings
// logged, which should be relatively rare.
lazyLineColumnTracker *logger.LineColumnTracker
InputFile InputFile
// The minimum number of links in the module graph to get from an entry point
// to this file
DistanceFromEntryPoint uint32
// If "entryPointKind" is not "entryPointNone", this is the index of the
// corresponding entry point chunk.
EntryPointChunkIndex uint32
// This file is an entry point if and only if this is not "entryPointNone".
// Note that dynamically-imported files are allowed to also be specified by
// the user as top-level entry points, so some dynamically-imported files
// may be "entryPointUserSpecified" instead of "entryPointDynamicImport".
entryPointKind entryPointKind
// This is true if this file has been marked as live by the tree shaking
// algorithm.
IsLive bool
}
func (f *LinkerFile) IsEntryPoint() bool {
return f.entryPointKind != entryPointNone
}
func (f *LinkerFile) IsUserSpecifiedEntryPoint() bool {
return f.entryPointKind == entryPointUserSpecified
}
// Note: This is not guarded by a mutex. Make sure this isn't called from a
// parallel part of the code.
func (f *LinkerFile) LineColumnTracker() *logger.LineColumnTracker {
if f.lazyLineColumnTracker == nil {
tracker := logger.MakeLineColumnTracker(&f.InputFile.Source)
f.lazyLineColumnTracker = &tracker
}
return f.lazyLineColumnTracker
}
type EntryPoint struct {
// This may be an absolute path or a relative path. If absolute, it will
// eventually be turned into a relative path by computing the path relative
// to the "outbase" directory. Then this relative path will be joined onto
// the "outdir" directory to form the final output path for this entry point.
OutputPath string
// This is the source index of the entry point. This file must have a valid
// entry point kind (i.e. not "none").
SourceIndex uint32
// Manually specified output paths are ignored when computing the default
// "outbase" directory, which is computed as the lowest common ancestor of
// all automatically generated output paths.
OutputPathWasAutoGenerated bool
}
type LinkerGraph struct {
Files []LinkerFile
entryPoints []EntryPoint
Symbols ast.SymbolMap
// This is for cross-module inlining of TypeScript enum constants
TSEnums map[ast.Ref]map[string]js_ast.TSEnumValue
// This is for cross-module inlining of detected inlinable constants
ConstValues map[ast.Ref]js_ast.ConstValue
// We should avoid traversing all files in the bundle, because the linker
// should be able to run a linking operation on a large bundle where only
// a few files are needed (e.g. an incremental compilation scenario). This
// holds all files that could possibly be reached through the entry points.
// If you need to iterate over all files in the linking operation, iterate
// over this array. This array is also sorted in a deterministic ordering
// to help ensure deterministic builds (source indices are random).
ReachableFiles []uint32
// This maps from unstable source index to stable reachable file index. This
// is useful as a deterministic key for sorting if you need to sort something
// containing a source index (such as "ast.Ref" symbol references).
StableSourceIndices []uint32
}
func CloneLinkerGraph(
inputFiles []InputFile,
reachableFiles []uint32,
originalEntryPoints []EntryPoint,
codeSplitting bool,
) LinkerGraph {
entryPoints := append([]EntryPoint{}, originalEntryPoints...)
symbols := ast.NewSymbolMap(len(inputFiles))
files := make([]LinkerFile, len(inputFiles))
// Mark all entry points so we don't add them again for import() expressions
for _, entryPoint := range entryPoints {
files[entryPoint.SourceIndex].entryPointKind = entryPointUserSpecified
}
// Clone various things since we may mutate them later. Do this in parallel
// for a speedup (around ~2x faster for this function in the three.js
// benchmark on a 6-core laptop).
var dynamicImportEntryPoints []uint32
var dynamicImportEntryPointsMutex sync.Mutex
waitGroup := sync.WaitGroup{}
waitGroup.Add(len(reachableFiles))
stableSourceIndices := make([]uint32, len(inputFiles))
for stableIndex, sourceIndex := range reachableFiles {
// Create a way to convert source indices to a stable ordering
stableSourceIndices[sourceIndex] = uint32(stableIndex)
go func(sourceIndex uint32) {
file := &files[sourceIndex]
file.InputFile = inputFiles[sourceIndex]
switch repr := file.InputFile.Repr.(type) {
case *JSRepr:
// Clone the representation
{
clone := *repr
repr = &clone
file.InputFile.Repr = repr
}
// Clone the symbol map
fileSymbols := append([]ast.Symbol{}, repr.AST.Symbols...)
symbols.SymbolsForSource[sourceIndex] = fileSymbols
repr.AST.Symbols = nil
// Clone the parts
repr.AST.Parts = append([]js_ast.Part{}, repr.AST.Parts...)
for i := range repr.AST.Parts {
part := &repr.AST.Parts[i]
clone := make(map[ast.Ref]js_ast.SymbolUse, len(part.SymbolUses))
for ref, uses := range part.SymbolUses {
clone[ref] = uses
}
part.SymbolUses = clone
}
// Clone the import records
repr.AST.ImportRecords = append([]ast.ImportRecord{}, repr.AST.ImportRecords...)
// Add dynamic imports as additional entry points if code splitting is active
if codeSplitting {
for importRecordIndex := range repr.AST.ImportRecords {
if record := &repr.AST.ImportRecords[importRecordIndex]; record.SourceIndex.IsValid() && record.Kind == ast.ImportDynamic {
dynamicImportEntryPointsMutex.Lock()
dynamicImportEntryPoints = append(dynamicImportEntryPoints, record.SourceIndex.GetIndex())
dynamicImportEntryPointsMutex.Unlock()
// Remove import assertions for dynamic imports of additional
// entry points so that they don't mess with the run-time behavior.
// For example, "import('./foo.json', { assert: { type: 'json' } })"
// will likely be converted into an import of a JavaScript file and
// leaving the import assertion there will prevent it from working.
record.AssertOrWith = nil
}
}
}
// Clone the import map
namedImports := make(map[ast.Ref]js_ast.NamedImport, len(repr.AST.NamedImports))
for k, v := range repr.AST.NamedImports {
namedImports[k] = v
}
repr.AST.NamedImports = namedImports
// Clone the export map
resolvedExports := make(map[string]ExportData)
for alias, name := range repr.AST.NamedExports {
resolvedExports[alias] = ExportData{
Ref: name.Ref,
SourceIndex: sourceIndex,
NameLoc: name.AliasLoc,
}
}
// Clone the top-level scope so we can generate more variables
{
new := &js_ast.Scope{}
*new = *repr.AST.ModuleScope
new.Generated = append([]ast.Ref{}, new.Generated...)
repr.AST.ModuleScope = new
}
// Also associate some default metadata with the file
repr.Meta.ResolvedExports = resolvedExports
repr.Meta.IsProbablyTypeScriptType = make(map[ast.Ref]bool)
repr.Meta.ImportsToBind = make(map[ast.Ref]ImportData)
case *CSSRepr:
// Clone the representation
{
clone := *repr
repr = &clone
file.InputFile.Repr = repr
}
// Clone the symbol map
fileSymbols := append([]ast.Symbol{}, repr.AST.Symbols...)
symbols.SymbolsForSource[sourceIndex] = fileSymbols
repr.AST.Symbols = nil
// Clone the import records
repr.AST.ImportRecords = append([]ast.ImportRecord{}, repr.AST.ImportRecords...)
}
// All files start off as far as possible from an entry point
file.DistanceFromEntryPoint = ^uint32(0)
waitGroup.Done()
}(sourceIndex)
}
waitGroup.Wait()
// Process dynamic entry points after merging control flow again
stableEntryPoints := make([]int, 0, len(dynamicImportEntryPoints))
for _, sourceIndex := range dynamicImportEntryPoints {
if otherFile := &files[sourceIndex]; otherFile.entryPointKind == entryPointNone {
stableEntryPoints = append(stableEntryPoints, int(stableSourceIndices[sourceIndex]))
otherFile.entryPointKind = entryPointDynamicImport
}
}
// Make sure to add dynamic entry points in a deterministic order
sort.Ints(stableEntryPoints)
for _, stableIndex := range stableEntryPoints {
entryPoints = append(entryPoints, EntryPoint{SourceIndex: reachableFiles[stableIndex]})
}
// Do a final quick pass over all files
var tsEnums map[ast.Ref]map[string]js_ast.TSEnumValue
var constValues map[ast.Ref]js_ast.ConstValue
bitCount := uint(len(entryPoints))
for _, sourceIndex := range reachableFiles {
file := &files[sourceIndex]
// Allocate the entry bit set now that the number of entry points is known
file.EntryBits = helpers.NewBitSet(bitCount)
// Merge TypeScript enums together into one big map. There likely aren't
// too many enum definitions relative to the overall size of the code so
// it should be fine to just merge them together in serial.
if repr, ok := file.InputFile.Repr.(*JSRepr); ok && repr.AST.TSEnums != nil {
if tsEnums == nil {
tsEnums = make(map[ast.Ref]map[string]js_ast.TSEnumValue)
}
for ref, enum := range repr.AST.TSEnums {
tsEnums[ref] = enum
}
}
// Also merge const values into one big map as well
if repr, ok := file.InputFile.Repr.(*JSRepr); ok && repr.AST.ConstValues != nil {
if constValues == nil {
constValues = make(map[ast.Ref]js_ast.ConstValue)
}
for ref, value := range repr.AST.ConstValues {
constValues[ref] = value
}
}
}
return LinkerGraph{
Symbols: symbols,
TSEnums: tsEnums,
ConstValues: constValues,
entryPoints: entryPoints,
Files: files,
ReachableFiles: reachableFiles,
StableSourceIndices: stableSourceIndices,
}
}
// Prevent packages that depend on us from adding or removing entry points
func (g *LinkerGraph) EntryPoints() []EntryPoint {
return g.entryPoints
}
func (g *LinkerGraph) AddPartToFile(sourceIndex uint32, part js_ast.Part) uint32 {
// Invariant: this map is never null
if part.SymbolUses == nil {
part.SymbolUses = make(map[ast.Ref]js_ast.SymbolUse)
}
repr := g.Files[sourceIndex].InputFile.Repr.(*JSRepr)
partIndex := uint32(len(repr.AST.Parts))
repr.AST.Parts = append(repr.AST.Parts, part)
// Invariant: the parts for all top-level symbols can be found in the file-level map
for _, declaredSymbol := range part.DeclaredSymbols {
if declaredSymbol.IsTopLevel {
// Check for an existing overlay
partIndices, ok := repr.Meta.TopLevelSymbolToPartsOverlay[declaredSymbol.Ref]
// If missing, initialize using the original values from the parser
if !ok {
partIndices = append(partIndices, repr.AST.TopLevelSymbolToPartsFromParser[declaredSymbol.Ref]...)
}
// Add this part to the overlay
partIndices = append(partIndices, partIndex)
if repr.Meta.TopLevelSymbolToPartsOverlay == nil {
repr.Meta.TopLevelSymbolToPartsOverlay = make(map[ast.Ref][]uint32)
}
repr.Meta.TopLevelSymbolToPartsOverlay[declaredSymbol.Ref] = partIndices
}
}
return partIndex
}
func (g *LinkerGraph) GenerateNewSymbol(sourceIndex uint32, kind ast.SymbolKind, originalName string) ast.Ref {
sourceSymbols := &g.Symbols.SymbolsForSource[sourceIndex]
ref := ast.Ref{
SourceIndex: sourceIndex,
InnerIndex: uint32(len(*sourceSymbols)),
}
*sourceSymbols = append(*sourceSymbols, ast.Symbol{
Kind: kind,
OriginalName: originalName,
Link: ast.InvalidRef,
})
generated := &g.Files[sourceIndex].InputFile.Repr.(*JSRepr).AST.ModuleScope.Generated
*generated = append(*generated, ref)
return ref
}
func (g *LinkerGraph) GenerateSymbolImportAndUse(
sourceIndex uint32,
partIndex uint32,
ref ast.Ref,
useCount uint32,
sourceIndexToImportFrom uint32,
) {
if useCount == 0 {
return
}
repr := g.Files[sourceIndex].InputFile.Repr.(*JSRepr)
part := &repr.AST.Parts[partIndex]
// Mark this symbol as used by this part
use := part.SymbolUses[ref]
use.CountEstimate += useCount
part.SymbolUses[ref] = use
// Uphold invariants about the CommonJS "exports" and "module" symbols
if ref == repr.AST.ExportsRef {
repr.AST.UsesExportsRef = true
}
if ref == repr.AST.ModuleRef {
repr.AST.UsesModuleRef = true
}
// Track that this specific symbol was imported
if sourceIndexToImportFrom != sourceIndex {
repr.Meta.ImportsToBind[ref] = ImportData{
SourceIndex: sourceIndexToImportFrom,
Ref: ref,
}
}
// Pull in all parts that declare this symbol
targetRepr := g.Files[sourceIndexToImportFrom].InputFile.Repr.(*JSRepr)
for _, partIndex := range targetRepr.TopLevelSymbolToParts(ref) {
part.Dependencies = append(part.Dependencies, js_ast.Dependency{
SourceIndex: sourceIndexToImportFrom,
PartIndex: partIndex,
})
}
}
func (g *LinkerGraph) GenerateRuntimeSymbolImportAndUse(
sourceIndex uint32,
partIndex uint32,
name string,
useCount uint32,
) {
if useCount == 0 {
return
}
runtimeRepr := g.Files[runtime.SourceIndex].InputFile.Repr.(*JSRepr)
ref := runtimeRepr.AST.NamedExports[name].Ref
g.GenerateSymbolImportAndUse(sourceIndex, partIndex, ref, useCount, runtime.SourceIndex)
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_lexer/tables.go | internal/js_lexer/tables.go | package js_lexer
var tokenToString = map[T]string{
TEndOfFile: "end of file",
TSyntaxError: "syntax error",
THashbang: "hashbang comment",
// Literals
TNoSubstitutionTemplateLiteral: "template literal",
TNumericLiteral: "number",
TStringLiteral: "string",
TBigIntegerLiteral: "bigint",
// Pseudo-literals
TTemplateHead: "template literal",
TTemplateMiddle: "template literal",
TTemplateTail: "template literal",
// Punctuation
TAmpersand: "\"&\"",
TAmpersandAmpersand: "\"&&\"",
TAsterisk: "\"*\"",
TAsteriskAsterisk: "\"**\"",
TAt: "\"@\"",
TBar: "\"|\"",
TBarBar: "\"||\"",
TCaret: "\"^\"",
TCloseBrace: "\"}\"",
TCloseBracket: "\"]\"",
TCloseParen: "\")\"",
TColon: "\":\"",
TComma: "\",\"",
TDot: "\".\"",
TDotDotDot: "\"...\"",
TEqualsEquals: "\"==\"",
TEqualsEqualsEquals: "\"===\"",
TEqualsGreaterThan: "\"=>\"",
TExclamation: "\"!\"",
TExclamationEquals: "\"!=\"",
TExclamationEqualsEquals: "\"!==\"",
TGreaterThan: "\">\"",
TGreaterThanEquals: "\">=\"",
TGreaterThanGreaterThan: "\">>\"",
TGreaterThanGreaterThanGreaterThan: "\">>>\"",
TLessThan: "\"<\"",
TLessThanEquals: "\"<=\"",
TLessThanLessThan: "\"<<\"",
TMinus: "\"-\"",
TMinusMinus: "\"--\"",
TOpenBrace: "\"{\"",
TOpenBracket: "\"[\"",
TOpenParen: "\"(\"",
TPercent: "\"%\"",
TPlus: "\"+\"",
TPlusPlus: "\"++\"",
TQuestion: "\"?\"",
TQuestionDot: "\"?.\"",
TQuestionQuestion: "\"??\"",
TSemicolon: "\";\"",
TSlash: "\"/\"",
TTilde: "\"~\"",
// Assignments
TAmpersandAmpersandEquals: "\"&&=\"",
TAmpersandEquals: "\"&=\"",
TAsteriskAsteriskEquals: "\"**=\"",
TAsteriskEquals: "\"*=\"",
TBarBarEquals: "\"||=\"",
TBarEquals: "\"|=\"",
TCaretEquals: "\"^=\"",
TEquals: "\"=\"",
TGreaterThanGreaterThanEquals: "\">>=\"",
TGreaterThanGreaterThanGreaterThanEquals: "\">>>=\"",
TLessThanLessThanEquals: "\"<<=\"",
TMinusEquals: "\"-=\"",
TPercentEquals: "\"%=\"",
TPlusEquals: "\"+=\"",
TQuestionQuestionEquals: "\"??=\"",
TSlashEquals: "\"/=\"",
// Class-private fields and methods
TPrivateIdentifier: "private identifier",
// Identifiers
TIdentifier: "identifier",
TEscapedKeyword: "escaped keyword",
// Reserved words
TBreak: "\"break\"",
TCase: "\"case\"",
TCatch: "\"catch\"",
TClass: "\"class\"",
TConst: "\"const\"",
TContinue: "\"continue\"",
TDebugger: "\"debugger\"",
TDefault: "\"default\"",
TDelete: "\"delete\"",
TDo: "\"do\"",
TElse: "\"else\"",
TEnum: "\"enum\"",
TExport: "\"export\"",
TExtends: "\"extends\"",
TFalse: "\"false\"",
TFinally: "\"finally\"",
TFor: "\"for\"",
TFunction: "\"function\"",
TIf: "\"if\"",
TImport: "\"import\"",
TIn: "\"in\"",
TInstanceof: "\"instanceof\"",
TNew: "\"new\"",
TNull: "\"null\"",
TReturn: "\"return\"",
TSuper: "\"super\"",
TSwitch: "\"switch\"",
TThis: "\"this\"",
TThrow: "\"throw\"",
TTrue: "\"true\"",
TTry: "\"try\"",
TTypeof: "\"typeof\"",
TVar: "\"var\"",
TVoid: "\"void\"",
TWhile: "\"while\"",
TWith: "\"with\"",
}
// This is from https://github.com/microsoft/TypeScript/blob/master/src/compiler/transformers/jsx.ts
var jsxEntity = map[string]rune{
"quot": 0x0022,
"amp": 0x0026,
"apos": 0x0027,
"lt": 0x003C,
"gt": 0x003E,
"nbsp": 0x00A0,
"iexcl": 0x00A1,
"cent": 0x00A2,
"pound": 0x00A3,
"curren": 0x00A4,
"yen": 0x00A5,
"brvbar": 0x00A6,
"sect": 0x00A7,
"uml": 0x00A8,
"copy": 0x00A9,
"ordf": 0x00AA,
"laquo": 0x00AB,
"not": 0x00AC,
"shy": 0x00AD,
"reg": 0x00AE,
"macr": 0x00AF,
"deg": 0x00B0,
"plusmn": 0x00B1,
"sup2": 0x00B2,
"sup3": 0x00B3,
"acute": 0x00B4,
"micro": 0x00B5,
"para": 0x00B6,
"middot": 0x00B7,
"cedil": 0x00B8,
"sup1": 0x00B9,
"ordm": 0x00BA,
"raquo": 0x00BB,
"frac14": 0x00BC,
"frac12": 0x00BD,
"frac34": 0x00BE,
"iquest": 0x00BF,
"Agrave": 0x00C0,
"Aacute": 0x00C1,
"Acirc": 0x00C2,
"Atilde": 0x00C3,
"Auml": 0x00C4,
"Aring": 0x00C5,
"AElig": 0x00C6,
"Ccedil": 0x00C7,
"Egrave": 0x00C8,
"Eacute": 0x00C9,
"Ecirc": 0x00CA,
"Euml": 0x00CB,
"Igrave": 0x00CC,
"Iacute": 0x00CD,
"Icirc": 0x00CE,
"Iuml": 0x00CF,
"ETH": 0x00D0,
"Ntilde": 0x00D1,
"Ograve": 0x00D2,
"Oacute": 0x00D3,
"Ocirc": 0x00D4,
"Otilde": 0x00D5,
"Ouml": 0x00D6,
"times": 0x00D7,
"Oslash": 0x00D8,
"Ugrave": 0x00D9,
"Uacute": 0x00DA,
"Ucirc": 0x00DB,
"Uuml": 0x00DC,
"Yacute": 0x00DD,
"THORN": 0x00DE,
"szlig": 0x00DF,
"agrave": 0x00E0,
"aacute": 0x00E1,
"acirc": 0x00E2,
"atilde": 0x00E3,
"auml": 0x00E4,
"aring": 0x00E5,
"aelig": 0x00E6,
"ccedil": 0x00E7,
"egrave": 0x00E8,
"eacute": 0x00E9,
"ecirc": 0x00EA,
"euml": 0x00EB,
"igrave": 0x00EC,
"iacute": 0x00ED,
"icirc": 0x00EE,
"iuml": 0x00EF,
"eth": 0x00F0,
"ntilde": 0x00F1,
"ograve": 0x00F2,
"oacute": 0x00F3,
"ocirc": 0x00F4,
"otilde": 0x00F5,
"ouml": 0x00F6,
"divide": 0x00F7,
"oslash": 0x00F8,
"ugrave": 0x00F9,
"uacute": 0x00FA,
"ucirc": 0x00FB,
"uuml": 0x00FC,
"yacute": 0x00FD,
"thorn": 0x00FE,
"yuml": 0x00FF,
"OElig": 0x0152,
"oelig": 0x0153,
"Scaron": 0x0160,
"scaron": 0x0161,
"Yuml": 0x0178,
"fnof": 0x0192,
"circ": 0x02C6,
"tilde": 0x02DC,
"Alpha": 0x0391,
"Beta": 0x0392,
"Gamma": 0x0393,
"Delta": 0x0394,
"Epsilon": 0x0395,
"Zeta": 0x0396,
"Eta": 0x0397,
"Theta": 0x0398,
"Iota": 0x0399,
"Kappa": 0x039A,
"Lambda": 0x039B,
"Mu": 0x039C,
"Nu": 0x039D,
"Xi": 0x039E,
"Omicron": 0x039F,
"Pi": 0x03A0,
"Rho": 0x03A1,
"Sigma": 0x03A3,
"Tau": 0x03A4,
"Upsilon": 0x03A5,
"Phi": 0x03A6,
"Chi": 0x03A7,
"Psi": 0x03A8,
"Omega": 0x03A9,
"alpha": 0x03B1,
"beta": 0x03B2,
"gamma": 0x03B3,
"delta": 0x03B4,
"epsilon": 0x03B5,
"zeta": 0x03B6,
"eta": 0x03B7,
"theta": 0x03B8,
"iota": 0x03B9,
"kappa": 0x03BA,
"lambda": 0x03BB,
"mu": 0x03BC,
"nu": 0x03BD,
"xi": 0x03BE,
"omicron": 0x03BF,
"pi": 0x03C0,
"rho": 0x03C1,
"sigmaf": 0x03C2,
"sigma": 0x03C3,
"tau": 0x03C4,
"upsilon": 0x03C5,
"phi": 0x03C6,
"chi": 0x03C7,
"psi": 0x03C8,
"omega": 0x03C9,
"thetasym": 0x03D1,
"upsih": 0x03D2,
"piv": 0x03D6,
"ensp": 0x2002,
"emsp": 0x2003,
"thinsp": 0x2009,
"zwnj": 0x200C,
"zwj": 0x200D,
"lrm": 0x200E,
"rlm": 0x200F,
"ndash": 0x2013,
"mdash": 0x2014,
"lsquo": 0x2018,
"rsquo": 0x2019,
"sbquo": 0x201A,
"ldquo": 0x201C,
"rdquo": 0x201D,
"bdquo": 0x201E,
"dagger": 0x2020,
"Dagger": 0x2021,
"bull": 0x2022,
"hellip": 0x2026,
"permil": 0x2030,
"prime": 0x2032,
"Prime": 0x2033,
"lsaquo": 0x2039,
"rsaquo": 0x203A,
"oline": 0x203E,
"frasl": 0x2044,
"euro": 0x20AC,
"image": 0x2111,
"weierp": 0x2118,
"real": 0x211C,
"trade": 0x2122,
"alefsym": 0x2135,
"larr": 0x2190,
"uarr": 0x2191,
"rarr": 0x2192,
"darr": 0x2193,
"harr": 0x2194,
"crarr": 0x21B5,
"lArr": 0x21D0,
"uArr": 0x21D1,
"rArr": 0x21D2,
"dArr": 0x21D3,
"hArr": 0x21D4,
"forall": 0x2200,
"part": 0x2202,
"exist": 0x2203,
"empty": 0x2205,
"nabla": 0x2207,
"isin": 0x2208,
"notin": 0x2209,
"ni": 0x220B,
"prod": 0x220F,
"sum": 0x2211,
"minus": 0x2212,
"lowast": 0x2217,
"radic": 0x221A,
"prop": 0x221D,
"infin": 0x221E,
"ang": 0x2220,
"and": 0x2227,
"or": 0x2228,
"cap": 0x2229,
"cup": 0x222A,
"int": 0x222B,
"there4": 0x2234,
"sim": 0x223C,
"cong": 0x2245,
"asymp": 0x2248,
"ne": 0x2260,
"equiv": 0x2261,
"le": 0x2264,
"ge": 0x2265,
"sub": 0x2282,
"sup": 0x2283,
"nsub": 0x2284,
"sube": 0x2286,
"supe": 0x2287,
"oplus": 0x2295,
"otimes": 0x2297,
"perp": 0x22A5,
"sdot": 0x22C5,
"lceil": 0x2308,
"rceil": 0x2309,
"lfloor": 0x230A,
"rfloor": 0x230B,
"lang": 0x2329,
"rang": 0x232A,
"loz": 0x25CA,
"spades": 0x2660,
"clubs": 0x2663,
"hearts": 0x2665,
"diams": 0x2666,
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_lexer/js_lexer.go | internal/js_lexer/js_lexer.go | package js_lexer
// The lexer converts a source file to a stream of tokens. Unlike many
// compilers, esbuild does not run the lexer to completion before the parser is
// started. Instead, the lexer is called repeatedly by the parser as the parser
// parses the file. This is because many tokens are context-sensitive and need
// high-level information from the parser. Examples are regular expression
// literals and JSX elements.
//
// For efficiency, the text associated with textual tokens is stored in two
// separate ways depending on the token. Identifiers use UTF-8 encoding which
// allows them to be slices of the input file without allocating extra memory.
// Strings use UTF-16 encoding so they can represent unicode surrogates
// accurately.
import (
"fmt"
"strconv"
"strings"
"unicode/utf8"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/logger"
)
type T uint8
// If you add a new token, remember to add it to "tokenToString" too
const (
TEndOfFile T = iota
TSyntaxError
// "#!/usr/bin/env node"
THashbang
// Literals
TNoSubstitutionTemplateLiteral // Contents are in lexer.StringLiteral ([]uint16)
TNumericLiteral // Contents are in lexer.Number (float64)
TStringLiteral // Contents are in lexer.StringLiteral ([]uint16)
TBigIntegerLiteral // Contents are in lexer.Identifier (string)
// Pseudo-literals
TTemplateHead // Contents are in lexer.StringLiteral ([]uint16)
TTemplateMiddle // Contents are in lexer.StringLiteral ([]uint16)
TTemplateTail // Contents are in lexer.StringLiteral ([]uint16)
// Punctuation
TAmpersand
TAmpersandAmpersand
TAsterisk
TAsteriskAsterisk
TAt
TBar
TBarBar
TCaret
TCloseBrace
TCloseBracket
TCloseParen
TColon
TComma
TDot
TDotDotDot
TEqualsEquals
TEqualsEqualsEquals
TEqualsGreaterThan
TExclamation
TExclamationEquals
TExclamationEqualsEquals
TGreaterThan
TGreaterThanEquals
TGreaterThanGreaterThan
TGreaterThanGreaterThanGreaterThan
TLessThan
TLessThanEquals
TLessThanLessThan
TMinus
TMinusMinus
TOpenBrace
TOpenBracket
TOpenParen
TPercent
TPlus
TPlusPlus
TQuestion
TQuestionDot
TQuestionQuestion
TSemicolon
TSlash
TTilde
// Assignments (keep in sync with IsAssign() below)
TAmpersandAmpersandEquals
TAmpersandEquals
TAsteriskAsteriskEquals
TAsteriskEquals
TBarBarEquals
TBarEquals
TCaretEquals
TEquals
TGreaterThanGreaterThanEquals
TGreaterThanGreaterThanGreaterThanEquals
TLessThanLessThanEquals
TMinusEquals
TPercentEquals
TPlusEquals
TQuestionQuestionEquals
TSlashEquals
// Class-private fields and methods
TPrivateIdentifier
// Identifiers
TIdentifier // Contents are in lexer.Identifier (string)
TEscapedKeyword // A keyword that has been escaped as an identifier
// Reserved words
TBreak
TCase
TCatch
TClass
TConst
TContinue
TDebugger
TDefault
TDelete
TDo
TElse
TEnum
TExport
TExtends
TFalse
TFinally
TFor
TFunction
TIf
TImport
TIn
TInstanceof
TNew
TNull
TReturn
TSuper
TSwitch
TThis
TThrow
TTrue
TTry
TTypeof
TVar
TVoid
TWhile
TWith
)
func (t T) IsAssign() bool {
return t >= TAmpersandAmpersandEquals && t <= TSlashEquals
}
var Keywords = map[string]T{
// Reserved words
"break": TBreak,
"case": TCase,
"catch": TCatch,
"class": TClass,
"const": TConst,
"continue": TContinue,
"debugger": TDebugger,
"default": TDefault,
"delete": TDelete,
"do": TDo,
"else": TElse,
"enum": TEnum,
"export": TExport,
"extends": TExtends,
"false": TFalse,
"finally": TFinally,
"for": TFor,
"function": TFunction,
"if": TIf,
"import": TImport,
"in": TIn,
"instanceof": TInstanceof,
"new": TNew,
"null": TNull,
"return": TReturn,
"super": TSuper,
"switch": TSwitch,
"this": TThis,
"throw": TThrow,
"true": TTrue,
"try": TTry,
"typeof": TTypeof,
"var": TVar,
"void": TVoid,
"while": TWhile,
"with": TWith,
}
var StrictModeReservedWords = map[string]bool{
"implements": true,
"interface": true,
"let": true,
"package": true,
"private": true,
"protected": true,
"public": true,
"static": true,
"yield": true,
}
// This represents a string that is maybe a substring of the current file's
// "source.Contents" string. The point of doing this is that if it is a
// substring (the common case), then we can represent it more efficiently.
//
// For compactness and performance, the JS AST represents identifiers as a
// symbol reference instead of as a string. However, we need to track the
// string between the first pass and the second pass because the string is only
// resolved to a symbol in the second pass. To avoid allocating extra memory
// to store the string, we instead use an index+length slice of the original JS
// source code. That index is what "Start" represents here. The length is just
// "len(String)".
//
// Set "Start" to invalid (the zero value) if "String" is not a substring of
// "source.Contents". This is the case for escaped identifiers. For example,
// the identifier "fo\u006f" would be "MaybeSubstring{String: "foo"}". It's
// critical that any code changing the "String" also set "Start" to the zero
// value, which is best done by just overwriting the whole "MaybeSubstring".
//
// The substring range used to be recovered automatically from the string but
// that relied on the Go "unsafe" package which can hypothetically break under
// certain Go compiler optimization passes, so it has been removed and replaced
// with this more error-prone approach that doesn't use "unsafe".
type MaybeSubstring struct {
String string
Start ast.Index32
}
type Lexer struct {
LegalCommentsBeforeToken []logger.Range
CommentsBeforeToken []logger.Range
AllComments []logger.Range
Identifier MaybeSubstring
log logger.Log
source logger.Source
JSXFactoryPragmaComment logger.Span
JSXFragmentPragmaComment logger.Span
JSXRuntimePragmaComment logger.Span
JSXImportSourcePragmaComment logger.Span
SourceMappingURL logger.Span
BadArrowInTSXSuggestion string
// Escape sequences in string literals are decoded lazily because they are
// not interpreted inside tagged templates, and tagged templates can contain
// invalid escape sequences. If the decoded array is nil, the encoded value
// should be passed to "tryToDecodeEscapeSequences" first.
decodedStringLiteralOrNil []uint16
encodedStringLiteralText string
errorSuffix string
tracker logger.LineColumnTracker
encodedStringLiteralStart int
Number float64
current int
start int
end int
ApproximateNewlineCount int
CouldBeBadArrowInTSX int
BadArrowInTSXRange logger.Range
LegacyOctalLoc logger.Loc
AwaitKeywordLoc logger.Loc
FnOrArrowStartLoc logger.Loc
PreviousBackslashQuoteInJSX logger.Range
LegacyHTMLCommentRange logger.Range
codePoint rune
prevErrorLoc logger.Loc
json JSONFlavor
Token T
ts config.TSOptions
HasNewlineBefore bool
HasCommentBefore CommentBefore
IsLegacyOctalLiteral bool
PrevTokenWasAwaitKeyword bool
rescanCloseBraceAsTemplateToken bool
forGlobalName bool
// The log is disabled during speculative scans that may backtrack
IsLogDisabled bool
}
type CommentBefore uint8
const (
PureCommentBefore CommentBefore = 1 << iota
KeyCommentBefore
NoSideEffectsCommentBefore
)
type LexerPanic struct{}
func NewLexer(log logger.Log, source logger.Source, ts config.TSOptions) Lexer {
lexer := Lexer{
log: log,
source: source,
tracker: logger.MakeLineColumnTracker(&source),
prevErrorLoc: logger.Loc{Start: -1},
FnOrArrowStartLoc: logger.Loc{Start: -1},
ts: ts,
json: NotJSON,
}
lexer.step()
lexer.Next()
return lexer
}
func NewLexerGlobalName(log logger.Log, source logger.Source) Lexer {
lexer := Lexer{
log: log,
source: source,
tracker: logger.MakeLineColumnTracker(&source),
prevErrorLoc: logger.Loc{Start: -1},
FnOrArrowStartLoc: logger.Loc{Start: -1},
forGlobalName: true,
json: NotJSON,
}
lexer.step()
lexer.Next()
return lexer
}
type JSONFlavor uint8
const (
// Specification: https://json.org/
JSON JSONFlavor = iota
// TypeScript's JSON superset is not documented but appears to allow:
// - Comments: https://github.com/microsoft/TypeScript/issues/4987
// - Trailing commas
// - Full JS number syntax
TSConfigJSON
// This is used by the JavaScript lexer
NotJSON
)
func NewLexerJSON(log logger.Log, source logger.Source, json JSONFlavor, errorSuffix string) Lexer {
lexer := Lexer{
log: log,
source: source,
tracker: logger.MakeLineColumnTracker(&source),
prevErrorLoc: logger.Loc{Start: -1},
FnOrArrowStartLoc: logger.Loc{Start: -1},
errorSuffix: errorSuffix,
json: json,
}
lexer.step()
lexer.Next()
return lexer
}
func (lexer *Lexer) Loc() logger.Loc {
return logger.Loc{Start: int32(lexer.start)}
}
func (lexer *Lexer) Range() logger.Range {
return logger.Range{Loc: logger.Loc{Start: int32(lexer.start)}, Len: int32(lexer.end - lexer.start)}
}
func (lexer *Lexer) Raw() string {
return lexer.source.Contents[lexer.start:lexer.end]
}
func (lexer *Lexer) rawIdentifier() MaybeSubstring {
return MaybeSubstring{lexer.Raw(), ast.MakeIndex32(uint32(lexer.start))}
}
func (lexer *Lexer) StringLiteral() []uint16 {
if lexer.decodedStringLiteralOrNil == nil {
// Lazily decode escape sequences if needed
if decoded, ok, end := lexer.tryToDecodeEscapeSequences(lexer.encodedStringLiteralStart, lexer.encodedStringLiteralText, true /* reportErrors */); !ok {
lexer.end = end
lexer.SyntaxError()
} else {
lexer.decodedStringLiteralOrNil = decoded
}
}
return lexer.decodedStringLiteralOrNil
}
func (lexer *Lexer) CookedAndRawTemplateContents() ([]uint16, string) {
var raw string
switch lexer.Token {
case TNoSubstitutionTemplateLiteral, TTemplateTail:
// "`x`" or "}x`"
raw = lexer.source.Contents[lexer.start+1 : lexer.end-1]
case TTemplateHead, TTemplateMiddle:
// "`x${" or "}x${"
raw = lexer.source.Contents[lexer.start+1 : lexer.end-2]
}
if strings.IndexByte(raw, '\r') != -1 {
// From the specification:
//
// 11.8.6.1 Static Semantics: TV and TRV
//
// TV excludes the code units of LineContinuation while TRV includes
// them. <CR><LF> and <CR> LineTerminatorSequences are normalized to
// <LF> for both TV and TRV. An explicit EscapeSequence is needed to
// include a <CR> or <CR><LF> sequence.
bytes := []byte(raw)
end := 0
i := 0
for i < len(bytes) {
c := bytes[i]
i++
if c == '\r' {
// Convert '\r\n' into '\n'
if i < len(bytes) && bytes[i] == '\n' {
i++
}
// Convert '\r' into '\n'
c = '\n'
}
bytes[end] = c
end++
}
raw = string(bytes[:end])
}
// This will return nil on failure, which will become "undefined" for the tag
cooked, _, _ := lexer.tryToDecodeEscapeSequences(lexer.start+1, raw, false /* reportErrors */)
return cooked, raw
}
func (lexer *Lexer) IsIdentifierOrKeyword() bool {
return lexer.Token >= TIdentifier
}
func (lexer *Lexer) IsContextualKeyword(text string) bool {
return lexer.Token == TIdentifier && lexer.Raw() == text
}
func (lexer *Lexer) ExpectContextualKeyword(text string) {
if !lexer.IsContextualKeyword(text) {
lexer.ExpectedString(fmt.Sprintf("%q", text))
}
lexer.Next()
}
func (lexer *Lexer) SyntaxError() {
loc := logger.Loc{Start: int32(lexer.end)}
message := "Unexpected end of file"
if lexer.end < len(lexer.source.Contents) {
c, _ := utf8.DecodeRuneInString(lexer.source.Contents[lexer.end:])
if c < 0x20 {
message = fmt.Sprintf("Syntax error \"\\x%02X\"", c)
} else if c >= 0x80 {
message = fmt.Sprintf("Syntax error \"\\u{%x}\"", c)
} else if c != '"' {
message = fmt.Sprintf("Syntax error \"%c\"", c)
} else {
message = "Syntax error '\"'"
}
}
lexer.addRangeError(logger.Range{Loc: loc}, message)
panic(LexerPanic{})
}
func (lexer *Lexer) ExpectedString(text string) {
// Provide a friendly error message about "await" without "async"
if lexer.PrevTokenWasAwaitKeyword {
var notes []logger.MsgData
if lexer.FnOrArrowStartLoc.Start != -1 {
note := lexer.tracker.MsgData(logger.Range{Loc: lexer.FnOrArrowStartLoc},
"Consider adding the \"async\" keyword here:")
note.Location.Suggestion = "async"
notes = []logger.MsgData{note}
}
lexer.AddRangeErrorWithNotes(RangeOfIdentifier(lexer.source, lexer.AwaitKeywordLoc),
"\"await\" can only be used inside an \"async\" function",
notes)
panic(LexerPanic{})
}
found := fmt.Sprintf("%q", lexer.Raw())
if lexer.start == len(lexer.source.Contents) {
found = "end of file"
}
suggestion := ""
if strings.HasPrefix(text, "\"") && strings.HasSuffix(text, "\"") {
suggestion = text[1 : len(text)-1]
}
lexer.addRangeErrorWithSuggestion(lexer.Range(), fmt.Sprintf("Expected %s%s but found %s", text, lexer.errorSuffix, found), suggestion)
panic(LexerPanic{})
}
func (lexer *Lexer) Expected(token T) {
if text, ok := tokenToString[token]; ok {
lexer.ExpectedString(text)
} else {
lexer.Unexpected()
}
}
func (lexer *Lexer) Unexpected() {
found := fmt.Sprintf("%q", lexer.Raw())
if lexer.start == len(lexer.source.Contents) {
found = "end of file"
}
lexer.addRangeError(lexer.Range(), fmt.Sprintf("Unexpected %s%s", found, lexer.errorSuffix))
panic(LexerPanic{})
}
func (lexer *Lexer) Expect(token T) {
if lexer.Token != token {
lexer.Expected(token)
}
lexer.Next()
}
func (lexer *Lexer) ExpectOrInsertSemicolon() {
if lexer.Token == TSemicolon || (!lexer.HasNewlineBefore &&
lexer.Token != TCloseBrace && lexer.Token != TEndOfFile) {
lexer.Expect(TSemicolon)
}
}
// This parses a single "<" token. If that is the first part of a longer token,
// this function splits off the first "<" and leaves the remainder of the
// current token as another, smaller token. For example, "<<=" becomes "<=".
func (lexer *Lexer) ExpectLessThan(isInsideJSXElement bool) {
switch lexer.Token {
case TLessThan:
if isInsideJSXElement {
lexer.NextInsideJSXElement()
} else {
lexer.Next()
}
case TLessThanEquals:
lexer.Token = TEquals
lexer.start++
lexer.maybeExpandEquals()
case TLessThanLessThan:
lexer.Token = TLessThan
lexer.start++
case TLessThanLessThanEquals:
lexer.Token = TLessThanEquals
lexer.start++
default:
lexer.Expected(TLessThan)
}
}
// This parses a single ">" token. If that is the first part of a longer token,
// this function splits off the first ">" and leaves the remainder of the
// current token as another, smaller token. For example, ">>=" becomes ">=".
func (lexer *Lexer) ExpectGreaterThan(isInsideJSXElement bool) {
switch lexer.Token {
case TGreaterThan:
if isInsideJSXElement {
lexer.NextInsideJSXElement()
} else {
lexer.Next()
}
case TGreaterThanEquals:
lexer.Token = TEquals
lexer.start++
lexer.maybeExpandEquals()
case TGreaterThanGreaterThan:
lexer.Token = TGreaterThan
lexer.start++
case TGreaterThanGreaterThanEquals:
lexer.Token = TGreaterThanEquals
lexer.start++
case TGreaterThanGreaterThanGreaterThan:
lexer.Token = TGreaterThanGreaterThan
lexer.start++
case TGreaterThanGreaterThanGreaterThanEquals:
lexer.Token = TGreaterThanGreaterThanEquals
lexer.start++
default:
lexer.Expected(TGreaterThan)
}
}
func (lexer *Lexer) maybeExpandEquals() {
switch lexer.codePoint {
case '>':
// "=" + ">" = "=>"
lexer.Token = TEqualsGreaterThan
lexer.step()
case '=':
// "=" + "=" = "=="
lexer.Token = TEqualsEquals
lexer.step()
if lexer.Token == '=' {
// "=" + "==" = "==="
lexer.Token = TEqualsEqualsEquals
lexer.step()
}
}
}
func RangeOfIdentifier(source logger.Source, loc logger.Loc) logger.Range {
text := source.Contents[loc.Start:]
if len(text) == 0 {
return logger.Range{Loc: loc, Len: 0}
}
i := 0
c, _ := utf8.DecodeRuneInString(text[i:])
// Handle private names
if c == '#' {
i++
c, _ = utf8.DecodeRuneInString(text[i:])
}
if js_ast.IsIdentifierStart(c) || c == '\\' {
// Search for the end of the identifier
for i < len(text) {
c2, width2 := utf8.DecodeRuneInString(text[i:])
if c2 == '\\' {
i += width2
// Skip over bracketed unicode escapes such as "\u{10000}"
if i+2 < len(text) && text[i] == 'u' && text[i+1] == '{' {
i += 2
for i < len(text) {
if text[i] == '}' {
i++
break
}
i++
}
}
} else if !js_ast.IsIdentifierContinue(c2) {
return logger.Range{Loc: loc, Len: int32(i)}
} else {
i += width2
}
}
}
// When minifying, this identifier may have originally been a string
return source.RangeOfString(loc)
}
type KeyOrValue uint8
const (
KeyRange KeyOrValue = iota
ValueRange
KeyAndValueRange
)
func RangeOfImportAssertOrWith(source logger.Source, assertOrWith ast.AssertOrWithEntry, which KeyOrValue) logger.Range {
if which == KeyRange {
return RangeOfIdentifier(source, assertOrWith.KeyLoc)
}
if which == ValueRange {
return source.RangeOfString(assertOrWith.ValueLoc)
}
loc := RangeOfIdentifier(source, assertOrWith.KeyLoc).Loc
return logger.Range{Loc: loc, Len: source.RangeOfString(assertOrWith.ValueLoc).End() - loc.Start}
}
func (lexer *Lexer) ExpectJSXElementChild(token T) {
if lexer.Token != token {
lexer.Expected(token)
}
lexer.NextJSXElementChild()
}
func (lexer *Lexer) NextJSXElementChild() {
lexer.HasNewlineBefore = false
originalStart := lexer.end
for {
lexer.start = lexer.end
lexer.Token = 0
switch lexer.codePoint {
case -1: // This indicates the end of the file
lexer.Token = TEndOfFile
case '{':
lexer.step()
lexer.Token = TOpenBrace
case '<':
lexer.step()
lexer.Token = TLessThan
default:
needsFixing := false
stringLiteral:
for {
switch lexer.codePoint {
case -1, '{', '<':
// Stop when the string ends
break stringLiteral
case '&', '\r', '\n', '\u2028', '\u2029':
// This needs fixing if it has an entity or if it's a multi-line string
needsFixing = true
lexer.step()
case '}', '>':
// These technically aren't valid JSX: https://facebook.github.io/jsx/
//
// JSXTextCharacter :
// * SourceCharacter but not one of {, <, > or }
//
var replacement string
if lexer.codePoint == '}' {
replacement = "{'}'}"
} else {
replacement = "{'>'}"
}
msg := logger.Msg{
Kind: logger.Error,
Data: lexer.tracker.MsgData(logger.Range{Loc: logger.Loc{Start: int32(lexer.end)}, Len: 1},
fmt.Sprintf("The character \"%c\" is not valid inside a JSX element", lexer.codePoint)),
}
// Attempt to provide a better error message if this looks like an arrow function
if lexer.CouldBeBadArrowInTSX > 0 && lexer.codePoint == '>' && lexer.source.Contents[lexer.end-1] == '=' {
msg.Notes = []logger.MsgData{lexer.tracker.MsgData(lexer.BadArrowInTSXRange,
"TypeScript's TSX syntax interprets arrow functions with a single generic type parameter as an opening JSX element. "+
"If you want it to be interpreted as an arrow function instead, you need to add a trailing comma after the type parameter to disambiguate:")}
msg.Notes[0].Location.Suggestion = lexer.BadArrowInTSXSuggestion
} else {
msg.Notes = []logger.MsgData{{Text: fmt.Sprintf("Did you mean to escape it as %q instead?", replacement)}}
msg.Data.Location.Suggestion = replacement
if !lexer.ts.Parse {
// TypeScript treats this as an error but Babel doesn't treat this
// as an error yet, so allow this in JS for now. Babel version 8
// was supposed to be released in 2021 but was never released. If
// it's released in the future, this can be changed to an error too.
//
// More context:
// * TypeScript change: https://github.com/microsoft/TypeScript/issues/36341
// * Babel 8 change: https://github.com/babel/babel/issues/11042
// * Babel 8 release: https://github.com/babel/babel/issues/10746
//
msg.Kind = logger.Warning
}
}
lexer.log.AddMsg(msg)
lexer.step()
default:
// Non-ASCII strings need the slow path
if lexer.codePoint >= 0x80 {
needsFixing = true
}
lexer.step()
}
}
lexer.Token = TStringLiteral
text := lexer.source.Contents[originalStart:lexer.end]
if needsFixing {
// Slow path
lexer.decodedStringLiteralOrNil = fixWhitespaceAndDecodeJSXEntities(text)
} else {
// Fast path
n := len(text)
copy := make([]uint16, n)
for i := 0; i < n; i++ {
copy[i] = uint16(text[i])
}
lexer.decodedStringLiteralOrNil = copy
}
}
break
}
}
func (lexer *Lexer) ExpectInsideJSXElement(token T) {
if lexer.Token != token {
lexer.Expected(token)
}
lexer.NextInsideJSXElement()
}
func (lexer *Lexer) NextInsideJSXElement() {
lexer.HasNewlineBefore = false
for {
lexer.start = lexer.end
lexer.Token = 0
switch lexer.codePoint {
case -1: // This indicates the end of the file
lexer.Token = TEndOfFile
case '\r', '\n', '\u2028', '\u2029':
lexer.step()
lexer.HasNewlineBefore = true
continue
case '\t', ' ':
lexer.step()
continue
case '.':
lexer.step()
lexer.Token = TDot
case ':':
lexer.step()
lexer.Token = TColon
case '=':
lexer.step()
lexer.Token = TEquals
case '{':
lexer.step()
lexer.Token = TOpenBrace
case '}':
lexer.step()
lexer.Token = TCloseBrace
case '<':
lexer.step()
lexer.Token = TLessThan
case '>':
lexer.step()
lexer.Token = TGreaterThan
case '/':
// '/' or '//' or '/* ... */'
lexer.step()
switch lexer.codePoint {
case '/':
singleLineComment:
for {
lexer.step()
switch lexer.codePoint {
case '\r', '\n', '\u2028', '\u2029':
break singleLineComment
case -1: // This indicates the end of the file
break singleLineComment
}
}
continue
case '*':
lexer.step()
startRange := lexer.Range()
multiLineComment:
for {
switch lexer.codePoint {
case '*':
lexer.step()
if lexer.codePoint == '/' {
lexer.step()
break multiLineComment
}
case '\r', '\n', '\u2028', '\u2029':
lexer.step()
lexer.HasNewlineBefore = true
case -1: // This indicates the end of the file
lexer.start = lexer.end
lexer.AddRangeErrorWithNotes(logger.Range{Loc: lexer.Loc()}, "Expected \"*/\" to terminate multi-line comment",
[]logger.MsgData{lexer.tracker.MsgData(startRange, "The multi-line comment starts here:")})
panic(LexerPanic{})
default:
lexer.step()
}
}
continue
default:
lexer.Token = TSlash
}
case '\'', '"':
var backslash logger.Range
quote := lexer.codePoint
needsDecode := false
lexer.step()
stringLiteral:
for {
switch lexer.codePoint {
case -1: // This indicates the end of the file
lexer.SyntaxError()
case '&':
needsDecode = true
lexer.step()
case '\\':
backslash = logger.Range{Loc: logger.Loc{Start: int32(lexer.end)}, Len: 1}
lexer.step()
continue
case quote:
if backslash.Len > 0 {
backslash.Len++
lexer.PreviousBackslashQuoteInJSX = backslash
}
lexer.step()
break stringLiteral
default:
// Non-ASCII strings need the slow path
if lexer.codePoint >= 0x80 {
needsDecode = true
}
lexer.step()
}
backslash = logger.Range{}
}
lexer.Token = TStringLiteral
text := lexer.source.Contents[lexer.start+1 : lexer.end-1]
if needsDecode {
// Slow path
lexer.decodedStringLiteralOrNil = decodeJSXEntities([]uint16{}, text)
} else {
// Fast path
n := len(text)
copy := make([]uint16, n)
for i := 0; i < n; i++ {
copy[i] = uint16(text[i])
}
lexer.decodedStringLiteralOrNil = copy
}
default:
// Check for unusual whitespace characters
if js_ast.IsWhitespace(lexer.codePoint) {
lexer.step()
continue
}
if js_ast.IsIdentifierStart(lexer.codePoint) {
lexer.step()
for js_ast.IsIdentifierContinue(lexer.codePoint) || lexer.codePoint == '-' {
lexer.step()
}
lexer.Identifier = lexer.rawIdentifier()
lexer.Token = TIdentifier
break
}
lexer.end = lexer.current
lexer.Token = TSyntaxError
}
return
}
}
func (lexer *Lexer) Next() {
lexer.HasNewlineBefore = lexer.end == 0
lexer.HasCommentBefore = 0
lexer.PrevTokenWasAwaitKeyword = false
lexer.LegalCommentsBeforeToken = lexer.LegalCommentsBeforeToken[:0]
lexer.CommentsBeforeToken = lexer.CommentsBeforeToken[:0]
for {
lexer.start = lexer.end
lexer.Token = 0
switch lexer.codePoint {
case -1: // This indicates the end of the file
lexer.Token = TEndOfFile
case '#':
if lexer.start == 0 && strings.HasPrefix(lexer.source.Contents, "#!") {
// "#!/usr/bin/env node"
lexer.Token = THashbang
hashbang:
for {
lexer.step()
switch lexer.codePoint {
case '\r', '\n', '\u2028', '\u2029':
break hashbang
case -1: // This indicates the end of the file
break hashbang
}
}
lexer.Identifier = lexer.rawIdentifier()
} else {
// "#foo"
lexer.step()
if lexer.codePoint == '\\' {
lexer.Identifier, _ = lexer.scanIdentifierWithEscapes(privateIdentifier)
} else {
if !js_ast.IsIdentifierStart(lexer.codePoint) {
lexer.SyntaxError()
}
lexer.step()
for js_ast.IsIdentifierContinue(lexer.codePoint) {
lexer.step()
}
if lexer.codePoint == '\\' {
lexer.Identifier, _ = lexer.scanIdentifierWithEscapes(privateIdentifier)
} else {
lexer.Identifier = lexer.rawIdentifier()
}
}
lexer.Token = TPrivateIdentifier
}
case '\r', '\n', '\u2028', '\u2029':
lexer.step()
lexer.HasNewlineBefore = true
continue
case '\t', ' ':
lexer.step()
continue
case '(':
lexer.step()
lexer.Token = TOpenParen
case ')':
lexer.step()
lexer.Token = TCloseParen
case '[':
lexer.step()
lexer.Token = TOpenBracket
case ']':
lexer.step()
lexer.Token = TCloseBracket
case '{':
lexer.step()
lexer.Token = TOpenBrace
case '}':
lexer.step()
lexer.Token = TCloseBrace
case ',':
lexer.step()
lexer.Token = TComma
case ':':
lexer.step()
lexer.Token = TColon
case ';':
lexer.step()
lexer.Token = TSemicolon
case '@':
lexer.step()
lexer.Token = TAt
case '~':
lexer.step()
lexer.Token = TTilde
case '?':
// '?' or '?.' or '??' or '??='
lexer.step()
switch lexer.codePoint {
case '?':
lexer.step()
switch lexer.codePoint {
case '=':
lexer.step()
lexer.Token = TQuestionQuestionEquals
default:
lexer.Token = TQuestionQuestion
}
case '.':
lexer.Token = TQuestion
current := lexer.current
contents := lexer.source.Contents
// Lookahead to disambiguate with 'a?.1:b'
if current < len(contents) {
c := contents[current]
if c < '0' || c > '9' {
lexer.step()
lexer.Token = TQuestionDot
}
}
default:
lexer.Token = TQuestion
}
case '%':
// '%' or '%='
lexer.step()
switch lexer.codePoint {
case '=':
lexer.step()
lexer.Token = TPercentEquals
default:
lexer.Token = TPercent
}
case '&':
// '&' or '&=' or '&&' or '&&='
lexer.step()
switch lexer.codePoint {
case '=':
lexer.step()
lexer.Token = TAmpersandEquals
case '&':
lexer.step()
switch lexer.codePoint {
case '=':
lexer.step()
lexer.Token = TAmpersandAmpersandEquals
default:
lexer.Token = TAmpersandAmpersand
}
default:
lexer.Token = TAmpersand
}
case '|':
// '|' or '|=' or '||' or '||='
lexer.step()
switch lexer.codePoint {
case '=':
lexer.step()
lexer.Token = TBarEquals
case '|':
lexer.step()
switch lexer.codePoint {
case '=':
lexer.step()
lexer.Token = TBarBarEquals
default:
lexer.Token = TBarBar
}
default:
lexer.Token = TBar
}
case '^':
// '^' or '^='
lexer.step()
switch lexer.codePoint {
case '=':
lexer.step()
lexer.Token = TCaretEquals
default:
lexer.Token = TCaret
}
case '+':
// '+' or '+=' or '++'
lexer.step()
switch lexer.codePoint {
case '=':
lexer.step()
lexer.Token = TPlusEquals
case '+':
lexer.step()
lexer.Token = TPlusPlus
default:
lexer.Token = TPlus
}
case '-':
// '-' or '-=' or '--' or '-->'
lexer.step()
switch lexer.codePoint {
case '=':
lexer.step()
lexer.Token = TMinusEquals
case '-':
lexer.step()
// Handle legacy HTML-style comments
if lexer.codePoint == '>' && lexer.HasNewlineBefore {
lexer.step()
lexer.LegacyHTMLCommentRange = lexer.Range()
lexer.log.AddID(logger.MsgID_JS_HTMLCommentInJS, logger.Warning, &lexer.tracker, lexer.Range(),
"Treating \"-->\" as the start of a legacy HTML single-line comment")
singleLineHTMLCloseComment:
for {
switch lexer.codePoint {
case '\r', '\n', '\u2028', '\u2029':
break singleLineHTMLCloseComment
case -1: // This indicates the end of the file
break singleLineHTMLCloseComment
}
lexer.step()
}
continue
}
lexer.Token = TMinusMinus
default:
lexer.Token = TMinus
if lexer.json == JSON && lexer.codePoint != '.' && (lexer.codePoint < '0' || lexer.codePoint > '9') {
lexer.Unexpected()
}
}
case '*':
// '*' or '*=' or '**' or '**='
lexer.step()
switch lexer.codePoint {
case '=':
lexer.step()
lexer.Token = TAsteriskEquals
case '*':
lexer.step()
switch lexer.codePoint {
case '=':
lexer.step()
lexer.Token = TAsteriskAsteriskEquals
default:
lexer.Token = TAsteriskAsterisk
}
default:
lexer.Token = TAsterisk
}
case '/':
// '/' or '/=' or '//' or '/* ... */'
lexer.step()
if lexer.forGlobalName {
lexer.Token = TSlash
break
}
switch lexer.codePoint {
case '=':
lexer.step()
lexer.Token = TSlashEquals
case '/':
singleLineComment:
for {
lexer.step()
switch lexer.codePoint {
case '\r', '\n', '\u2028', '\u2029':
break singleLineComment
case -1: // This indicates the end of the file
break singleLineComment
}
}
if lexer.json == JSON {
lexer.addRangeError(lexer.Range(), "JSON does not support comments")
}
lexer.scanCommentText()
continue
case '*':
lexer.step()
startRange := lexer.Range()
multiLineComment:
for {
switch lexer.codePoint {
case '*':
lexer.step()
if lexer.codePoint == '/' {
lexer.step()
break multiLineComment
}
case '\r', '\n', '\u2028', '\u2029':
lexer.step()
lexer.HasNewlineBefore = true
case -1: // This indicates the end of the file
lexer.start = lexer.end
lexer.AddRangeErrorWithNotes(logger.Range{Loc: lexer.Loc()}, "Expected \"*/\" to terminate multi-line comment",
[]logger.MsgData{lexer.tracker.MsgData(startRange, "The multi-line comment starts here:")})
panic(LexerPanic{})
default:
lexer.step()
}
}
if lexer.json == JSON {
lexer.addRangeError(lexer.Range(), "JSON does not support comments")
}
lexer.scanCommentText()
continue
default:
lexer.Token = TSlash
}
case '=':
// '=' or '=>' or '==' or '==='
lexer.step()
switch lexer.codePoint {
case '>':
lexer.step()
lexer.Token = TEqualsGreaterThan
case '=':
lexer.step()
switch lexer.codePoint {
case '=':
lexer.step()
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_lexer/js_lexer_test.go | internal/js_lexer/js_lexer_test.go | package js_lexer
import (
"fmt"
"math"
"strings"
"testing"
"unicode/utf8"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/test"
)
func assertEqualStrings(t *testing.T, a string, b string) {
t.Helper()
pretty := func(text string) string {
builder := strings.Builder{}
builder.WriteRune('"')
i := 0
for i < len(text) {
c, width := utf8.DecodeRuneInString(text[i:])
builder.WriteString(fmt.Sprintf("\\u{%X}", c))
i += width
}
builder.WriteRune('"')
return builder.String()
}
if a != b {
t.Fatalf("%s != %s", pretty(a), pretty(b))
}
}
func lexToken(contents string) T {
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
lexer := NewLexer(log, test.SourceForTest(contents), config.TSOptions{})
return lexer.Token
}
func expectLexerError(t *testing.T, contents string, expected string) {
t.Helper()
t.Run(contents, func(t *testing.T) {
t.Helper()
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
func() {
defer func() {
r := recover()
if _, isLexerPanic := r.(LexerPanic); r != nil && !isLexerPanic {
panic(r)
}
}()
NewLexer(log, test.SourceForTest(contents), config.TSOptions{})
}()
msgs := log.Done()
var text strings.Builder
for _, msg := range msgs {
text.WriteString(msg.String(logger.OutputOptions{}, logger.TerminalInfo{}))
}
test.AssertEqual(t, text.String(), expected)
})
}
func TestComment(t *testing.T) {
expectLexerError(t, "/*", "<stdin>: ERROR: Expected \"*/\" to terminate multi-line comment\n<stdin>: NOTE: The multi-line comment starts here:\n")
expectLexerError(t, "/*/", "<stdin>: ERROR: Expected \"*/\" to terminate multi-line comment\n<stdin>: NOTE: The multi-line comment starts here:\n")
expectLexerError(t, "/**/", "")
expectLexerError(t, "//", "")
}
func expectHashbang(t *testing.T, contents string, expected string) {
t.Helper()
t.Run(contents, func(t *testing.T) {
t.Helper()
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
lexer := func() Lexer {
defer func() {
r := recover()
if _, isLexerPanic := r.(LexerPanic); r != nil && !isLexerPanic {
panic(r)
}
}()
return NewLexer(log, test.SourceForTest(contents), config.TSOptions{})
}()
msgs := log.Done()
test.AssertEqual(t, len(msgs), 0)
test.AssertEqual(t, lexer.Token, THashbang)
test.AssertEqual(t, lexer.Identifier.String, expected)
})
}
func TestHashbang(t *testing.T) {
expectHashbang(t, "#!/usr/bin/env node", "#!/usr/bin/env node")
expectHashbang(t, "#!/usr/bin/env node\n", "#!/usr/bin/env node")
expectHashbang(t, "#!/usr/bin/env node\nlet x", "#!/usr/bin/env node")
expectLexerError(t, " #!/usr/bin/env node", "<stdin>: ERROR: Syntax error \"!\"\n")
}
func expectIdentifier(t *testing.T, contents string, expected string) {
t.Helper()
t.Run(contents, func(t *testing.T) {
t.Helper()
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
lexer := func() Lexer {
defer func() {
r := recover()
if _, isLexerPanic := r.(LexerPanic); r != nil && !isLexerPanic {
panic(r)
}
}()
return NewLexer(log, test.SourceForTest(contents), config.TSOptions{})
}()
msgs := log.Done()
test.AssertEqual(t, len(msgs), 0)
test.AssertEqual(t, lexer.Token, TIdentifier)
test.AssertEqual(t, lexer.Identifier.String, expected)
})
}
func TestIdentifier(t *testing.T) {
expectIdentifier(t, "_", "_")
expectIdentifier(t, "$", "$")
expectIdentifier(t, "test", "test")
expectIdentifier(t, "t\\u0065st", "test")
expectIdentifier(t, "t\\u{65}st", "test")
expectLexerError(t, "t\\u.", "<stdin>: ERROR: Syntax error \".\"\n")
expectLexerError(t, "t\\u0.", "<stdin>: ERROR: Syntax error \".\"\n")
expectLexerError(t, "t\\u00.", "<stdin>: ERROR: Syntax error \".\"\n")
expectLexerError(t, "t\\u006.", "<stdin>: ERROR: Syntax error \".\"\n")
expectLexerError(t, "t\\u{.", "<stdin>: ERROR: Syntax error \".\"\n")
expectLexerError(t, "t\\u{0.", "<stdin>: ERROR: Syntax error \".\"\n")
expectIdentifier(t, "a\u200C", "a\u200C")
expectIdentifier(t, "a\u200D", "a\u200D")
expectIdentifier(t, "a\u200Cb", "a\u200Cb")
expectIdentifier(t, "a\u200Db", "a\u200Db")
}
func expectNumber(t *testing.T, contents string, expected float64) {
t.Helper()
t.Run(contents, func(t *testing.T) {
t.Helper()
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
lexer := func() Lexer {
defer func() {
r := recover()
if _, isLexerPanic := r.(LexerPanic); r != nil && !isLexerPanic {
panic(r)
}
}()
return NewLexer(log, test.SourceForTest(contents), config.TSOptions{})
}()
msgs := log.Done()
test.AssertEqual(t, len(msgs), 0)
test.AssertEqual(t, lexer.Token, TNumericLiteral)
test.AssertEqual(t, lexer.Number, expected)
})
}
func TestNumericLiteral(t *testing.T) {
expectNumber(t, "0", 0.0)
expectNumber(t, "000", 0.0)
expectNumber(t, "010", 8.0)
expectNumber(t, "123", 123.0)
expectNumber(t, "987", 987.0)
expectNumber(t, "0000", 0.0)
expectNumber(t, "0123", 83.0)
expectNumber(t, "0123.4567", 83.0)
expectNumber(t, "0987", 987.0)
expectNumber(t, "0987.6543", 987.6543)
expectNumber(t, "01289", 1289.0)
expectNumber(t, "01289.345", 1289.0)
expectNumber(t, "999999999", 999999999.0)
expectNumber(t, "9999999999", 9999999999.0)
expectNumber(t, "99999999999", 99999999999.0)
expectNumber(t, "123456789123456789", 123456789123456780.0)
expectNumber(t, "123456789123456789"+strings.Repeat("0", 128), 1.2345678912345679e+145)
expectNumber(t, "0b00101", 5.0)
expectNumber(t, "0B00101", 5.0)
expectNumber(t, "0b1011101011101011101011101011101011101", 100352251741.0)
expectNumber(t, "0B1011101011101011101011101011101011101", 100352251741.0)
expectLexerError(t, "0b", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "0B", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "0b012", "<stdin>: ERROR: Syntax error \"2\"\n")
expectLexerError(t, "0b018", "<stdin>: ERROR: Syntax error \"8\"\n")
expectLexerError(t, "0b01a", "<stdin>: ERROR: Syntax error \"a\"\n")
expectLexerError(t, "0b01A", "<stdin>: ERROR: Syntax error \"A\"\n")
expectNumber(t, "0o12345", 5349.0)
expectNumber(t, "0O12345", 5349.0)
expectNumber(t, "0o1234567654321", 89755965649.0)
expectNumber(t, "0O1234567654321", 89755965649.0)
expectLexerError(t, "0o", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "0O", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "0o018", "<stdin>: ERROR: Syntax error \"8\"\n")
expectLexerError(t, "0o01a", "<stdin>: ERROR: Syntax error \"a\"\n")
expectLexerError(t, "0o01A", "<stdin>: ERROR: Syntax error \"A\"\n")
expectNumber(t, "0x12345678", float64(0x12345678))
expectNumber(t, "0xFEDCBA987", float64(0xFEDCBA987))
expectNumber(t, "0x000012345678", float64(0x12345678))
expectNumber(t, "0x123456781234", float64(0x123456781234))
expectLexerError(t, "0x", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "0X", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "0xGFEDCBA", "<stdin>: ERROR: Syntax error \"G\"\n")
expectLexerError(t, "0xABCDEFG", "<stdin>: ERROR: Syntax error \"G\"\n")
expectNumber(t, "123.", 123.0)
expectNumber(t, ".0123", 0.0123)
expectNumber(t, "0.0123", 0.0123)
expectNumber(t, "2.2250738585072014e-308", 2.2250738585072014e-308)
expectNumber(t, "1.7976931348623157e+308", 1.7976931348623157e+308)
// Underflow
expectNumber(t, "4.9406564584124654417656879286822e-324", 5e-324)
expectNumber(t, "5e-324", 5e-324)
expectNumber(t, "1e-325", 0.0)
// Overflow
expectNumber(t, "1.797693134862315708145274237317e+308", 1.7976931348623157e+308)
expectNumber(t, "1.797693134862315808e+308", math.Inf(1))
expectNumber(t, "1e+309", math.Inf(1))
// int32
expectNumber(t, "0x7fff_ffff", 2147483647.0)
expectNumber(t, "0x8000_0000", 2147483648.0)
expectNumber(t, "0x8000_0001", 2147483649.0)
// uint32
expectNumber(t, "0xffff_ffff", 4294967295.0)
expectNumber(t, "0x1_0000_0000", 4294967296.0)
expectNumber(t, "0x1_0000_0001", 4294967297.0)
// int64
expectNumber(t, "0x7fff_ffff_ffff_fdff", 9223372036854774784)
expectNumber(t, "0x8000_0000_0000_0000", 9.223372036854776e+18)
expectNumber(t, "0x8000_0000_0000_3000", 9.223372036854788e+18)
// uint64
expectNumber(t, "0xffff_ffff_ffff_fbff", 1.844674407370955e+19)
expectNumber(t, "0x1_0000_0000_0000_0000", 1.8446744073709552e+19)
expectNumber(t, "0x1_0000_0000_0000_1000", 1.8446744073709556e+19)
expectNumber(t, "1.", 1.0)
expectNumber(t, ".1", 0.1)
expectNumber(t, "1.1", 1.1)
expectNumber(t, "1e1", 10.0)
expectNumber(t, "1e+1", 10.0)
expectNumber(t, "1e-1", 0.1)
expectNumber(t, ".1e1", 1.0)
expectNumber(t, ".1e+1", 1.0)
expectNumber(t, ".1e-1", 0.01)
expectNumber(t, "1.e1", 10.0)
expectNumber(t, "1.e+1", 10.0)
expectNumber(t, "1.e-1", 0.1)
expectNumber(t, "1.1e1", 11.0)
expectNumber(t, "1.1e+1", 11.0)
expectNumber(t, "1.1e-1", 0.11)
expectLexerError(t, "1e", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, ".1e", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "1.e", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "1.1e", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "1e+", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, ".1e+", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "1.e+", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "1.1e+", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "1e-", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, ".1e-", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "1.e-", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "1.1e-", "<stdin>: ERROR: Unexpected end of file\n")
expectLexerError(t, "1e+-1", "<stdin>: ERROR: Syntax error \"-\"\n")
expectLexerError(t, "1e-+1", "<stdin>: ERROR: Syntax error \"+\"\n")
expectLexerError(t, "1z", "<stdin>: ERROR: Syntax error \"z\"\n")
expectLexerError(t, "1.z", "<stdin>: ERROR: Syntax error \"z\"\n")
expectLexerError(t, "1.0f", "<stdin>: ERROR: Syntax error \"f\"\n")
expectLexerError(t, "0b1z", "<stdin>: ERROR: Syntax error \"z\"\n")
expectLexerError(t, "0o1z", "<stdin>: ERROR: Syntax error \"z\"\n")
expectLexerError(t, "0x1z", "<stdin>: ERROR: Syntax error \"z\"\n")
expectLexerError(t, "1e1z", "<stdin>: ERROR: Syntax error \"z\"\n")
expectNumber(t, "1_2_3", 123)
expectNumber(t, ".1_2", 0.12)
expectNumber(t, "1_2.3_4", 12.34)
expectNumber(t, "1e2_3", 1e23)
expectNumber(t, "1_2e3_4", 12e34)
expectNumber(t, "1_2.3_4e5_6", 12.34e56)
expectNumber(t, "0b1_0", 2)
expectNumber(t, "0B1_0", 2)
expectNumber(t, "0o1_2", 10)
expectNumber(t, "0O1_2", 10)
expectNumber(t, "0x1_2", 0x12)
expectNumber(t, "0X1_2", 0x12)
expectNumber(t, "08.0_1", 8.01)
expectNumber(t, "09.0_1", 9.01)
expectLexerError(t, "0_0", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0_1", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0_7", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0_8", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0_9", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "00_0", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "01_0", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "07_0", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "08_0", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "09_0", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "08_0.1", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "09_0.1", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "1__2", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, ".1__2", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "1e2__3", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0b1__0", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0B1__0", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0o1__2", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0O1__2", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0x1__2", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0X1__2", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "1_", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "1._", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "1_.", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, ".1_", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "1e_", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "1e1_", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "1_e1", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, ".1_e1", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "1._2", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "1_.2", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0b_1", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0B_1", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0o_1", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0O_1", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0x_1", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0X_1", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0b1_", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0B1_", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0o1_", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0O1_", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0x1_", "<stdin>: ERROR: Syntax error \"_\"\n")
expectLexerError(t, "0X1_", "<stdin>: ERROR: Syntax error \"_\"\n")
}
func expectBigInteger(t *testing.T, contents string, expected string) {
t.Helper()
t.Run(contents, func(t *testing.T) {
t.Helper()
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
lexer := func() Lexer {
defer func() {
r := recover()
if _, isLexerPanic := r.(LexerPanic); r != nil && !isLexerPanic {
panic(r)
}
}()
return NewLexer(log, test.SourceForTest(contents), config.TSOptions{})
}()
msgs := log.Done()
test.AssertEqual(t, len(msgs), 0)
test.AssertEqual(t, lexer.Token, TBigIntegerLiteral)
test.AssertEqual(t, lexer.Identifier.String, expected)
})
}
func TestBigIntegerLiteral(t *testing.T) {
expectBigInteger(t, "0n", "0")
expectBigInteger(t, "123n", "123")
expectBigInteger(t, "9007199254740993n", "9007199254740993") // This can't fit in a float64
expectBigInteger(t, "0b00101n", "0b00101")
expectBigInteger(t, "0B00101n", "0B00101")
expectBigInteger(t, "0b1011101011101011101011101011101011101n", "0b1011101011101011101011101011101011101")
expectBigInteger(t, "0B1011101011101011101011101011101011101n", "0B1011101011101011101011101011101011101")
expectBigInteger(t, "0o12345n", "0o12345")
expectBigInteger(t, "0O12345n", "0O12345")
expectBigInteger(t, "0o1234567654321n", "0o1234567654321")
expectBigInteger(t, "0O1234567654321n", "0O1234567654321")
expectBigInteger(t, "0x12345678n", "0x12345678")
expectBigInteger(t, "0xFEDCBA987n", "0xFEDCBA987")
expectBigInteger(t, "0x000012345678n", "0x000012345678")
expectBigInteger(t, "0x123456781234n", "0x123456781234")
expectBigInteger(t, "1_2_3n", "123")
expectBigInteger(t, "0b1_0_1n", "0b101")
expectBigInteger(t, "0o1_2_3n", "0o123")
expectBigInteger(t, "0x1_2_3n", "0x123")
expectLexerError(t, "1e2n", "<stdin>: ERROR: Syntax error \"n\"\n")
expectLexerError(t, "1.0n", "<stdin>: ERROR: Syntax error \"n\"\n")
expectLexerError(t, ".1n", "<stdin>: ERROR: Syntax error \"n\"\n")
expectLexerError(t, "000n", "<stdin>: ERROR: Syntax error \"n\"\n")
expectLexerError(t, "0123n", "<stdin>: ERROR: Syntax error \"n\"\n")
expectLexerError(t, "089n", "<stdin>: ERROR: Syntax error \"n\"\n")
expectLexerError(t, "0_1n", "<stdin>: ERROR: Syntax error \"_\"\n")
}
func expectString(t *testing.T, contents string, expected string) {
t.Helper()
t.Run(contents, func(t *testing.T) {
t.Helper()
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
lexer := func() Lexer {
defer func() {
r := recover()
if _, isLexerPanic := r.(LexerPanic); r != nil && !isLexerPanic {
panic(r)
}
}()
return NewLexer(log, test.SourceForTest(contents), config.TSOptions{})
}()
text := lexer.StringLiteral()
msgs := log.Done()
test.AssertEqual(t, len(msgs), 0)
test.AssertEqual(t, lexer.Token, TStringLiteral)
assertEqualStrings(t, helpers.UTF16ToString(text), expected)
})
}
func expectLexerErrorString(t *testing.T, contents string, expected string) {
t.Helper()
t.Run(contents, func(t *testing.T) {
t.Helper()
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
func() {
defer func() {
r := recover()
if _, isLexerPanic := r.(LexerPanic); r != nil && !isLexerPanic {
panic(r)
}
}()
lexer := NewLexer(log, test.SourceForTest(contents), config.TSOptions{})
lexer.StringLiteral()
}()
msgs := log.Done()
var text strings.Builder
for _, msg := range msgs {
text.WriteString(msg.String(logger.OutputOptions{}, logger.TerminalInfo{}))
}
test.AssertEqual(t, text.String(), expected)
})
}
func TestStringLiteral(t *testing.T) {
expectString(t, "''", "")
expectString(t, "'123'", "123")
expectString(t, "'\"'", "\"")
expectString(t, "'\\''", "'")
expectString(t, "'\\\"'", "\"")
expectString(t, "'\\\\'", "\\")
expectString(t, "'\\a'", "a")
expectString(t, "'\\b'", "\b")
expectString(t, "'\\f'", "\f")
expectString(t, "'\\n'", "\n")
expectString(t, "'\\r'", "\r")
expectString(t, "'\\t'", "\t")
expectString(t, "'\\v'", "\v")
expectString(t, "'\\0'", "\000")
expectString(t, "'\\1'", "\001")
expectString(t, "'\\2'", "\002")
expectString(t, "'\\3'", "\003")
expectString(t, "'\\4'", "\004")
expectString(t, "'\\5'", "\005")
expectString(t, "'\\6'", "\006")
expectString(t, "'\\7'", "\007")
expectString(t, "'\\000'", "\000")
expectString(t, "'\\001'", "\001")
expectString(t, "'\\002'", "\002")
expectString(t, "'\\003'", "\003")
expectString(t, "'\\004'", "\004")
expectString(t, "'\\005'", "\005")
expectString(t, "'\\006'", "\006")
expectString(t, "'\\007'", "\007")
expectString(t, "'\\000'", "\000")
expectString(t, "'\\100'", "\100")
expectString(t, "'\\200'", "\u0080")
expectString(t, "'\\300'", "\u00C0")
expectString(t, "'\\377'", "\u00FF")
expectString(t, "'\\378'", "\0378")
expectString(t, "'\\400'", "\0400")
expectString(t, "'\\500'", "\0500")
expectString(t, "'\\600'", "\0600")
expectString(t, "'\\700'", "\0700")
expectString(t, "'\\x00'", "\x00")
expectString(t, "'\\X11'", "X11")
expectString(t, "'\\x71'", "\x71")
expectString(t, "'\\x7f'", "\x7f")
expectString(t, "'\\x7F'", "\x7F")
expectString(t, "'\\u0000'", "\u0000")
expectString(t, "'\\ucafe\\uCAFE\\u7FFF'", "\ucafe\uCAFE\u7FFF")
expectString(t, "'\\uD800'", "\xED\xA0\x80")
expectString(t, "'\\uDC00'", "\xED\xB0\x80")
expectString(t, "'\\U0000'", "U0000")
expectString(t, "'\\u{100000}'", "\U00100000")
expectString(t, "'\\u{10FFFF}'", "\U0010FFFF")
expectLexerErrorString(t, "'\\u{110000}'", "<stdin>: ERROR: Unicode escape sequence is out of range\n")
expectLexerErrorString(t, "'\\u{FFFFFFFF}'", "<stdin>: ERROR: Unicode escape sequence is out of range\n")
// Line continuation
expectLexerErrorString(t, "'\n'", "<stdin>: ERROR: Unterminated string literal\n")
expectLexerErrorString(t, "'\r'", "<stdin>: ERROR: Unterminated string literal\n")
expectLexerErrorString(t, "\"\n\"", "<stdin>: ERROR: Unterminated string literal\n")
expectLexerErrorString(t, "\"\r\"", "<stdin>: ERROR: Unterminated string literal\n")
expectString(t, "'\u2028'", "\u2028")
expectString(t, "'\u2029'", "\u2029")
expectString(t, "\"\u2028\"", "\u2028")
expectString(t, "\"\u2029\"", "\u2029")
expectString(t, "'1\\\r2'", "12")
expectString(t, "'1\\\n2'", "12")
expectString(t, "'1\\\r\n2'", "12")
expectString(t, "'1\\\u20282'", "12")
expectString(t, "'1\\\u20292'", "12")
expectLexerErrorString(t, "'1\\\n\r2'", "<stdin>: ERROR: Unterminated string literal\n")
expectLexerErrorString(t, "\"'", "<stdin>: ERROR: Unterminated string literal\n")
expectLexerErrorString(t, "'\"", "<stdin>: ERROR: Unterminated string literal\n")
expectLexerErrorString(t, "'\\", "<stdin>: ERROR: Unterminated string literal\n")
expectLexerErrorString(t, "'\\'", "<stdin>: ERROR: Unterminated string literal\n")
expectLexerErrorString(t, "'\\x", "<stdin>: ERROR: Unterminated string literal\n")
expectLexerErrorString(t, "'\\x'", "<stdin>: ERROR: Syntax error \"'\"\n")
expectLexerErrorString(t, "'\\xG'", "<stdin>: ERROR: Syntax error \"G\"\n")
expectLexerErrorString(t, "'\\xF'", "<stdin>: ERROR: Syntax error \"'\"\n")
expectLexerErrorString(t, "'\\xFG'", "<stdin>: ERROR: Syntax error \"G\"\n")
expectLexerErrorString(t, "'\\u", "<stdin>: ERROR: Unterminated string literal\n")
expectLexerErrorString(t, "'\\u'", "<stdin>: ERROR: Syntax error \"'\"\n")
expectLexerErrorString(t, "'\\u0'", "<stdin>: ERROR: Syntax error \"'\"\n")
expectLexerErrorString(t, "'\\u00'", "<stdin>: ERROR: Syntax error \"'\"\n")
expectLexerErrorString(t, "'\\u000'", "<stdin>: ERROR: Syntax error \"'\"\n")
}
func TestTokens(t *testing.T) {
expected := []struct {
contents string
token T
}{
{"", TEndOfFile},
{"\x00", TSyntaxError},
// "#!/usr/bin/env node"
{"#!", THashbang},
// Punctuation
{"(", TOpenParen},
{")", TCloseParen},
{"[", TOpenBracket},
{"]", TCloseBracket},
{"{", TOpenBrace},
{"}", TCloseBrace},
// Reserved words
{"break", TBreak},
{"case", TCase},
{"catch", TCatch},
{"class", TClass},
{"const", TConst},
{"continue", TContinue},
{"debugger", TDebugger},
{"default", TDefault},
{"delete", TDelete},
{"do", TDo},
{"else", TElse},
{"enum", TEnum},
{"export", TExport},
{"extends", TExtends},
{"false", TFalse},
{"finally", TFinally},
{"for", TFor},
{"function", TFunction},
{"if", TIf},
{"import", TImport},
{"in", TIn},
{"instanceof", TInstanceof},
{"new", TNew},
{"null", TNull},
{"return", TReturn},
{"super", TSuper},
{"switch", TSwitch},
{"this", TThis},
{"throw", TThrow},
{"true", TTrue},
{"try", TTry},
{"typeof", TTypeof},
{"var", TVar},
{"void", TVoid},
{"while", TWhile},
{"with", TWith},
}
for _, it := range expected {
contents := it.contents
token := it.token
t.Run(contents, func(t *testing.T) {
test.AssertEqual(t, lexToken(contents), token)
})
}
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_parser/ts_parser.go | internal/js_parser/ts_parser.go | // This file contains code for parsing TypeScript syntax. The parser just skips
// over type expressions as if they are whitespace and doesn't bother generating
// an AST because nothing uses type information.
package js_parser
import (
"fmt"
"strings"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_lexer"
"github.com/evanw/esbuild/internal/logger"
)
func (p *parser) skipTypeScriptBinding() {
switch p.lexer.Token {
case js_lexer.TIdentifier, js_lexer.TThis:
p.lexer.Next()
case js_lexer.TOpenBracket:
p.lexer.Next()
// "[, , a]"
for p.lexer.Token == js_lexer.TComma {
p.lexer.Next()
}
// "[a, b]"
for p.lexer.Token != js_lexer.TCloseBracket {
// "[...a]"
if p.lexer.Token == js_lexer.TDotDotDot {
p.lexer.Next()
}
p.skipTypeScriptBinding()
if p.lexer.Token != js_lexer.TComma {
break
}
p.lexer.Next()
}
p.lexer.Expect(js_lexer.TCloseBracket)
case js_lexer.TOpenBrace:
p.lexer.Next()
for p.lexer.Token != js_lexer.TCloseBrace {
foundIdentifier := false
switch p.lexer.Token {
case js_lexer.TDotDotDot:
p.lexer.Next()
if p.lexer.Token != js_lexer.TIdentifier {
p.lexer.Unexpected()
}
// "{...x}"
foundIdentifier = true
p.lexer.Next()
case js_lexer.TIdentifier:
// "{x}"
// "{x: y}"
foundIdentifier = true
p.lexer.Next()
// "{1: y}"
// "{'x': y}"
case js_lexer.TStringLiteral, js_lexer.TNumericLiteral:
p.lexer.Next()
default:
if p.lexer.IsIdentifierOrKeyword() {
// "{if: x}"
p.lexer.Next()
} else {
p.lexer.Unexpected()
}
}
if p.lexer.Token == js_lexer.TColon || !foundIdentifier {
p.lexer.Expect(js_lexer.TColon)
p.skipTypeScriptBinding()
}
if p.lexer.Token != js_lexer.TComma {
break
}
p.lexer.Next()
}
p.lexer.Expect(js_lexer.TCloseBrace)
default:
p.lexer.Unexpected()
}
}
func (p *parser) skipTypeScriptFnArgs() {
p.lexer.Expect(js_lexer.TOpenParen)
for p.lexer.Token != js_lexer.TCloseParen {
// "(...a)"
if p.lexer.Token == js_lexer.TDotDotDot {
p.lexer.Next()
}
p.skipTypeScriptBinding()
// "(a?)"
if p.lexer.Token == js_lexer.TQuestion {
p.lexer.Next()
}
// "(a: any)"
if p.lexer.Token == js_lexer.TColon {
p.lexer.Next()
p.skipTypeScriptType(js_ast.LLowest)
}
// "(a, b)"
if p.lexer.Token != js_lexer.TComma {
break
}
p.lexer.Next()
}
p.lexer.Expect(js_lexer.TCloseParen)
}
// This is a spot where the TypeScript grammar is highly ambiguous. Here are
// some cases that are valid:
//
// let x = (y: any): (() => {}) => { };
// let x = (y: any): () => {} => { };
// let x = (y: any): (y) => {} => { };
// let x = (y: any): (y[]) => {};
// let x = (y: any): (a | b) => {};
//
// Here are some cases that aren't valid:
//
// let x = (y: any): (y) => {};
// let x = (y: any): (y) => {return 0};
// let x = (y: any): asserts y is (y) => {};
func (p *parser) skipTypeScriptParenOrFnType() {
if p.trySkipTypeScriptArrowArgsWithBacktracking() {
p.skipTypeScriptReturnType()
} else {
p.lexer.Expect(js_lexer.TOpenParen)
p.skipTypeScriptType(js_ast.LLowest)
p.lexer.Expect(js_lexer.TCloseParen)
}
}
func (p *parser) skipTypeScriptReturnType() {
p.skipTypeScriptTypeWithFlags(js_ast.LLowest, isReturnTypeFlag)
}
func (p *parser) skipTypeScriptType(level js_ast.L) {
p.skipTypeScriptTypeWithFlags(level, 0)
}
type skipTypeFlags uint8
const (
isReturnTypeFlag skipTypeFlags = 1 << iota
isIndexSignatureFlag
allowTupleLabelsFlag
disallowConditionalTypesFlag
)
func (flags skipTypeFlags) has(flag skipTypeFlags) bool {
return (flags & flag) != 0
}
type tsTypeIdentifierKind uint8
const (
tsTypeIdentifierNormal tsTypeIdentifierKind = iota
tsTypeIdentifierUnique
tsTypeIdentifierAbstract
tsTypeIdentifierAsserts
tsTypeIdentifierPrefix
tsTypeIdentifierPrimitive
tsTypeIdentifierInfer
)
// Use a map to improve lookup speed
var tsTypeIdentifierMap = map[string]tsTypeIdentifierKind{
"unique": tsTypeIdentifierUnique,
"abstract": tsTypeIdentifierAbstract,
"asserts": tsTypeIdentifierAsserts,
"keyof": tsTypeIdentifierPrefix,
"readonly": tsTypeIdentifierPrefix,
"any": tsTypeIdentifierPrimitive,
"never": tsTypeIdentifierPrimitive,
"unknown": tsTypeIdentifierPrimitive,
"undefined": tsTypeIdentifierPrimitive,
"object": tsTypeIdentifierPrimitive,
"number": tsTypeIdentifierPrimitive,
"string": tsTypeIdentifierPrimitive,
"boolean": tsTypeIdentifierPrimitive,
"bigint": tsTypeIdentifierPrimitive,
"symbol": tsTypeIdentifierPrimitive,
"infer": tsTypeIdentifierInfer,
}
func (p *parser) skipTypeScriptTypeWithFlags(level js_ast.L, flags skipTypeFlags) {
loop:
for {
switch p.lexer.Token {
case js_lexer.TNumericLiteral, js_lexer.TBigIntegerLiteral, js_lexer.TStringLiteral,
js_lexer.TNoSubstitutionTemplateLiteral, js_lexer.TTrue, js_lexer.TFalse,
js_lexer.TNull, js_lexer.TVoid:
p.lexer.Next()
case js_lexer.TConst:
r := p.lexer.Range()
p.lexer.Next()
// "[const: number]"
if flags.has(allowTupleLabelsFlag) && p.lexer.Token == js_lexer.TColon {
p.log.AddError(&p.tracker, r, "Unexpected \"const\"")
}
case js_lexer.TThis:
p.lexer.Next()
// "function check(): this is boolean"
if p.lexer.IsContextualKeyword("is") && !p.lexer.HasNewlineBefore {
p.lexer.Next()
p.skipTypeScriptType(js_ast.LLowest)
return
}
case js_lexer.TMinus:
// "-123"
// "-123n"
p.lexer.Next()
if p.lexer.Token == js_lexer.TBigIntegerLiteral {
p.lexer.Next()
} else {
p.lexer.Expect(js_lexer.TNumericLiteral)
}
case js_lexer.TAmpersand:
case js_lexer.TBar:
// Support things like "type Foo = | A | B" and "type Foo = & A & B"
p.lexer.Next()
continue
case js_lexer.TImport:
// "import('fs')"
p.lexer.Next()
// "[import: number]"
// "[import?: number]"
if flags.has(allowTupleLabelsFlag) && (p.lexer.Token == js_lexer.TColon || p.lexer.Token == js_lexer.TQuestion) {
return
}
p.lexer.Expect(js_lexer.TOpenParen)
p.lexer.Expect(js_lexer.TStringLiteral)
// "import('./foo.json', { assert: { type: 'json' } })"
if p.lexer.Token == js_lexer.TComma {
p.lexer.Next()
p.skipTypeScriptObjectType()
// "import('./foo.json', { assert: { type: 'json' } }, )"
if p.lexer.Token == js_lexer.TComma {
p.lexer.Next()
}
}
p.lexer.Expect(js_lexer.TCloseParen)
case js_lexer.TNew:
// "new () => Foo"
// "new <T>() => Foo<T>"
p.lexer.Next()
// "[new: number]"
// "[new?: number]"
if flags.has(allowTupleLabelsFlag) && (p.lexer.Token == js_lexer.TColon || p.lexer.Token == js_lexer.TQuestion) {
return
}
p.skipTypeScriptTypeParameters(allowConstModifier)
p.skipTypeScriptParenOrFnType()
case js_lexer.TLessThan:
// "<T>() => Foo<T>"
p.skipTypeScriptTypeParameters(allowConstModifier)
p.skipTypeScriptParenOrFnType()
case js_lexer.TOpenParen:
// "(number | string)"
p.skipTypeScriptParenOrFnType()
case js_lexer.TIdentifier:
kind := tsTypeIdentifierMap[p.lexer.Identifier.String]
checkTypeParameters := true
switch kind {
case tsTypeIdentifierPrefix:
p.lexer.Next()
// Valid:
// "[keyof: string]"
// "[keyof?: string]"
// "{[keyof: string]: number}"
// "{[keyof in string]: number}"
//
// Invalid:
// "A extends B ? keyof : string"
//
if (p.lexer.Token != js_lexer.TColon && p.lexer.Token != js_lexer.TQuestion && p.lexer.Token != js_lexer.TIn) ||
(!flags.has(isIndexSignatureFlag) && !flags.has(allowTupleLabelsFlag)) {
p.skipTypeScriptType(js_ast.LPrefix)
}
break loop
case tsTypeIdentifierInfer:
p.lexer.Next()
// "type Foo = Bar extends [infer T] ? T : null"
// "type Foo = Bar extends [infer T extends string] ? T : null"
// "type Foo = Bar extends [infer T extends string ? infer T : never] ? T : null"
// "type Foo = { [infer in Bar]: number }"
// "type Foo = [infer: number]"
// "type Foo = [infer?: number]"
if (p.lexer.Token != js_lexer.TColon && p.lexer.Token != js_lexer.TQuestion && p.lexer.Token != js_lexer.TIn) ||
(!flags.has(isIndexSignatureFlag) && !flags.has(allowTupleLabelsFlag)) {
p.lexer.Expect(js_lexer.TIdentifier)
if p.lexer.Token == js_lexer.TExtends {
p.trySkipTypeScriptConstraintOfInferTypeWithBacktracking(flags)
}
}
break loop
case tsTypeIdentifierUnique:
p.lexer.Next()
// "let foo: unique symbol"
if p.lexer.IsContextualKeyword("symbol") {
p.lexer.Next()
break loop
}
case tsTypeIdentifierAbstract:
p.lexer.Next()
// "let foo: abstract new () => {}" added in TypeScript 4.2
if p.lexer.Token == js_lexer.TNew {
continue
}
case tsTypeIdentifierAsserts:
p.lexer.Next()
// "function assert(x: boolean): asserts x"
// "function assert(x: boolean): asserts x is boolean"
if flags.has(isReturnTypeFlag) && !p.lexer.HasNewlineBefore && (p.lexer.Token == js_lexer.TIdentifier || p.lexer.Token == js_lexer.TThis) {
p.lexer.Next()
}
case tsTypeIdentifierPrimitive:
p.lexer.Next()
checkTypeParameters = false
default:
p.lexer.Next()
}
// "function assert(x: any): x is boolean"
if p.lexer.IsContextualKeyword("is") && !p.lexer.HasNewlineBefore {
p.lexer.Next()
p.skipTypeScriptType(js_ast.LLowest)
return
}
// "let foo: any \n <number>foo" must not become a single type
if checkTypeParameters && !p.lexer.HasNewlineBefore {
p.skipTypeScriptTypeArguments(skipTypeScriptTypeArgumentsOpts{})
}
case js_lexer.TTypeof:
p.lexer.Next()
// "[typeof: number]"
// "[typeof?: number]"
if flags.has(allowTupleLabelsFlag) && (p.lexer.Token == js_lexer.TColon || p.lexer.Token == js_lexer.TQuestion) {
return
}
if p.lexer.Token == js_lexer.TImport {
// "typeof import('fs')"
continue
} else {
// "typeof x"
if !p.lexer.IsIdentifierOrKeyword() {
p.lexer.Expected(js_lexer.TIdentifier)
}
p.lexer.Next()
// "typeof x.y"
// "typeof x.#y"
for p.lexer.Token == js_lexer.TDot {
p.lexer.Next()
if !p.lexer.IsIdentifierOrKeyword() && p.lexer.Token != js_lexer.TPrivateIdentifier {
p.lexer.Expected(js_lexer.TIdentifier)
}
p.lexer.Next()
}
if !p.lexer.HasNewlineBefore {
p.skipTypeScriptTypeArguments(skipTypeScriptTypeArgumentsOpts{})
}
}
case js_lexer.TOpenBracket:
// "[number, string]"
// "[first: number, second: string]"
p.lexer.Next()
for p.lexer.Token != js_lexer.TCloseBracket {
if p.lexer.Token == js_lexer.TDotDotDot {
p.lexer.Next()
}
p.skipTypeScriptTypeWithFlags(js_ast.LLowest, allowTupleLabelsFlag)
if p.lexer.Token == js_lexer.TQuestion {
p.lexer.Next()
}
if p.lexer.Token == js_lexer.TColon {
p.lexer.Next()
p.skipTypeScriptType(js_ast.LLowest)
}
if p.lexer.Token != js_lexer.TComma {
break
}
p.lexer.Next()
}
p.lexer.Expect(js_lexer.TCloseBracket)
case js_lexer.TOpenBrace:
p.skipTypeScriptObjectType()
case js_lexer.TTemplateHead:
// "`${'a' | 'b'}-${'c' | 'd'}`"
for {
p.lexer.Next()
p.skipTypeScriptType(js_ast.LLowest)
p.lexer.RescanCloseBraceAsTemplateToken()
if p.lexer.Token == js_lexer.TTemplateTail {
p.lexer.Next()
break
}
}
default:
// "[function: number]"
// "[function?: number]"
if flags.has(allowTupleLabelsFlag) && p.lexer.IsIdentifierOrKeyword() {
if p.lexer.Token != js_lexer.TFunction {
p.log.AddError(&p.tracker, p.lexer.Range(), fmt.Sprintf("Unexpected %q", p.lexer.Raw()))
}
p.lexer.Next()
if p.lexer.Token != js_lexer.TColon && p.lexer.Token != js_lexer.TQuestion {
p.lexer.Expect(js_lexer.TColon)
}
return
}
p.lexer.Unexpected()
}
break
}
for {
switch p.lexer.Token {
case js_lexer.TBar:
if level >= js_ast.LBitwiseOr {
return
}
p.lexer.Next()
p.skipTypeScriptTypeWithFlags(js_ast.LBitwiseOr, flags)
case js_lexer.TAmpersand:
if level >= js_ast.LBitwiseAnd {
return
}
p.lexer.Next()
p.skipTypeScriptTypeWithFlags(js_ast.LBitwiseAnd, flags)
case js_lexer.TExclamation:
// A postfix "!" is allowed in JSDoc types in TypeScript, which are only
// present in comments. While it's not valid in a non-comment position,
// it's still parsed and turned into a soft error by the TypeScript
// compiler. It turns out parsing this is important for correctness for
// "as" casts because the "!" token must still be consumed.
if p.lexer.HasNewlineBefore {
return
}
p.lexer.Next()
case js_lexer.TDot:
p.lexer.Next()
if !p.lexer.IsIdentifierOrKeyword() {
p.lexer.Expect(js_lexer.TIdentifier)
}
p.lexer.Next()
// "{ <A extends B>(): c.d \n <E extends F>(): g.h }" must not become a single type
if !p.lexer.HasNewlineBefore {
p.skipTypeScriptTypeArguments(skipTypeScriptTypeArgumentsOpts{})
}
case js_lexer.TOpenBracket:
// "{ ['x']: string \n ['y']: string }" must not become a single type
if p.lexer.HasNewlineBefore {
return
}
p.lexer.Next()
if p.lexer.Token != js_lexer.TCloseBracket {
p.skipTypeScriptType(js_ast.LLowest)
}
p.lexer.Expect(js_lexer.TCloseBracket)
case js_lexer.TExtends:
// "{ x: number \n extends: boolean }" must not become a single type
if p.lexer.HasNewlineBefore || flags.has(disallowConditionalTypesFlag) {
return
}
p.lexer.Next()
// The type following "extends" is not permitted to be another conditional type
p.skipTypeScriptTypeWithFlags(js_ast.LLowest, disallowConditionalTypesFlag)
p.lexer.Expect(js_lexer.TQuestion)
p.skipTypeScriptType(js_ast.LLowest)
p.lexer.Expect(js_lexer.TColon)
p.skipTypeScriptType(js_ast.LLowest)
default:
return
}
}
}
func (p *parser) skipTypeScriptObjectType() {
p.lexer.Expect(js_lexer.TOpenBrace)
for p.lexer.Token != js_lexer.TCloseBrace {
// "{ -readonly [K in keyof T]: T[K] }"
// "{ +readonly [K in keyof T]: T[K] }"
if p.lexer.Token == js_lexer.TPlus || p.lexer.Token == js_lexer.TMinus {
p.lexer.Next()
}
// Skip over modifiers and the property identifier
foundKey := false
for p.lexer.IsIdentifierOrKeyword() ||
p.lexer.Token == js_lexer.TStringLiteral ||
p.lexer.Token == js_lexer.TNumericLiteral {
p.lexer.Next()
foundKey = true
}
if p.lexer.Token == js_lexer.TOpenBracket {
// Index signature or computed property
p.lexer.Next()
p.skipTypeScriptTypeWithFlags(js_ast.LLowest, isIndexSignatureFlag)
// "{ [key: string]: number }"
// "{ readonly [K in keyof T]: T[K] }"
if p.lexer.Token == js_lexer.TColon {
p.lexer.Next()
p.skipTypeScriptType(js_ast.LLowest)
} else if p.lexer.Token == js_lexer.TIn {
p.lexer.Next()
p.skipTypeScriptType(js_ast.LLowest)
if p.lexer.IsContextualKeyword("as") {
// "{ [K in keyof T as `get-${K}`]: T[K] }"
p.lexer.Next()
p.skipTypeScriptType(js_ast.LLowest)
}
}
p.lexer.Expect(js_lexer.TCloseBracket)
// "{ [K in keyof T]+?: T[K] }"
// "{ [K in keyof T]-?: T[K] }"
if p.lexer.Token == js_lexer.TPlus || p.lexer.Token == js_lexer.TMinus {
p.lexer.Next()
}
foundKey = true
}
// "?" indicates an optional property
// "!" indicates an initialization assertion
if foundKey && (p.lexer.Token == js_lexer.TQuestion || p.lexer.Token == js_lexer.TExclamation) {
p.lexer.Next()
}
// Type parameters come right after the optional mark
p.skipTypeScriptTypeParameters(allowConstModifier)
switch p.lexer.Token {
case js_lexer.TColon:
// Regular property
if !foundKey {
p.lexer.Expect(js_lexer.TIdentifier)
}
p.lexer.Next()
p.skipTypeScriptType(js_ast.LLowest)
case js_lexer.TOpenParen:
// Method signature
p.skipTypeScriptFnArgs()
if p.lexer.Token == js_lexer.TColon {
p.lexer.Next()
p.skipTypeScriptReturnType()
}
default:
if !foundKey {
p.lexer.Unexpected()
}
}
switch p.lexer.Token {
case js_lexer.TCloseBrace:
case js_lexer.TComma, js_lexer.TSemicolon:
p.lexer.Next()
default:
if !p.lexer.HasNewlineBefore {
p.lexer.Unexpected()
}
}
}
p.lexer.Expect(js_lexer.TCloseBrace)
}
type typeParameterFlags uint8
const (
// TypeScript 4.7
allowInOutVarianceAnnotations typeParameterFlags = 1 << iota
// TypeScript 5.0
allowConstModifier
// Allow "<>" without any type parameters
allowEmptyTypeParameters
)
type skipTypeScriptTypeParametersResult uint8
const (
didNotSkipAnything skipTypeScriptTypeParametersResult = iota
couldBeTypeCast
definitelyTypeParameters
)
// This is the type parameter declarations that go with other symbol
// declarations (class, function, type, etc.)
func (p *parser) skipTypeScriptTypeParameters(flags typeParameterFlags) skipTypeScriptTypeParametersResult {
if p.lexer.Token != js_lexer.TLessThan {
return didNotSkipAnything
}
p.lexer.Next()
result := couldBeTypeCast
if (flags&allowEmptyTypeParameters) != 0 && p.lexer.Token == js_lexer.TGreaterThan {
p.lexer.Next()
return definitelyTypeParameters
}
for {
hasIn := false
hasOut := false
expectIdentifier := true
invalidModifierRange := logger.Range{}
// Scan over a sequence of "in" and "out" modifiers (a.k.a. optional
// variance annotations) as well as "const" modifiers
for {
if p.lexer.Token == js_lexer.TConst {
if invalidModifierRange.Len == 0 && (flags&allowConstModifier) == 0 {
// Valid:
// "class Foo<const T> {}"
// Invalid:
// "interface Foo<const T> {}"
invalidModifierRange = p.lexer.Range()
}
result = definitelyTypeParameters
p.lexer.Next()
expectIdentifier = true
continue
}
if p.lexer.Token == js_lexer.TIn {
if invalidModifierRange.Len == 0 && ((flags&allowInOutVarianceAnnotations) == 0 || hasIn || hasOut) {
// Valid:
// "type Foo<in T> = T"
// Invalid:
// "type Foo<in in T> = T"
// "type Foo<out in T> = T"
invalidModifierRange = p.lexer.Range()
}
p.lexer.Next()
hasIn = true
expectIdentifier = true
continue
}
if p.lexer.IsContextualKeyword("out") {
r := p.lexer.Range()
if invalidModifierRange.Len == 0 && (flags&allowInOutVarianceAnnotations) == 0 {
invalidModifierRange = r
}
p.lexer.Next()
if invalidModifierRange.Len == 0 && hasOut && (p.lexer.Token == js_lexer.TIn || p.lexer.Token == js_lexer.TIdentifier) {
// Valid:
// "type Foo<out T> = T"
// "type Foo<out out> = T"
// "type Foo<out out, T> = T"
// "type Foo<out out = T> = T"
// "type Foo<out out extends T> = T"
// Invalid:
// "type Foo<out out in T> = T"
// "type Foo<out out T> = T"
invalidModifierRange = r
}
hasOut = true
expectIdentifier = false
continue
}
break
}
// Only report an error for the first invalid modifier
if invalidModifierRange.Len > 0 {
p.log.AddError(&p.tracker, invalidModifierRange, fmt.Sprintf(
"The modifier %q is not valid here:", p.source.TextForRange(invalidModifierRange)))
}
// expectIdentifier => Mandatory identifier (e.g. after "type Foo <in ___")
// !expectIdentifier => Optional identifier (e.g. after "type Foo <out ___" since "out" may be the identifier)
if expectIdentifier || p.lexer.Token == js_lexer.TIdentifier {
p.lexer.Expect(js_lexer.TIdentifier)
}
// "class Foo<T extends number> {}"
if p.lexer.Token == js_lexer.TExtends {
result = definitelyTypeParameters
p.lexer.Next()
p.skipTypeScriptType(js_ast.LLowest)
}
// "class Foo<T = void> {}"
if p.lexer.Token == js_lexer.TEquals {
result = definitelyTypeParameters
p.lexer.Next()
p.skipTypeScriptType(js_ast.LLowest)
}
if p.lexer.Token != js_lexer.TComma {
break
}
p.lexer.Next()
if p.lexer.Token == js_lexer.TGreaterThan {
result = definitelyTypeParameters
break
}
}
p.lexer.ExpectGreaterThan(false /* isInsideJSXElement */)
return result
}
type skipTypeScriptTypeArgumentsOpts struct {
isInsideJSXElement bool
isParseTypeArgumentsInExpression bool
}
func (p *parser) skipTypeScriptTypeArguments(opts skipTypeScriptTypeArgumentsOpts) bool {
switch p.lexer.Token {
case js_lexer.TLessThan, js_lexer.TLessThanEquals,
js_lexer.TLessThanLessThan, js_lexer.TLessThanLessThanEquals:
default:
return false
}
p.lexer.ExpectLessThan(false /* isInsideJSXElement */)
for {
p.skipTypeScriptType(js_ast.LLowest)
if p.lexer.Token != js_lexer.TComma {
break
}
p.lexer.Next()
}
// This type argument list must end with a ">"
if !opts.isParseTypeArgumentsInExpression {
// Normally TypeScript allows any token starting with ">". For example,
// "Array<Array<number>>()" is a type argument list even though there's a
// ">>" token, because ">>" starts with ">".
p.lexer.ExpectGreaterThan(opts.isInsideJSXElement)
} else {
// However, if we're emulating the TypeScript compiler's function called
// "parseTypeArgumentsInExpression" function, then we must only allow the
// ">" token itself. For example, "x < y >= z" is not a type argument list.
//
// This doesn't detect ">>" in "Array<Array<number>>()" because the inner
// type argument list isn't a call to "parseTypeArgumentsInExpression"
// because it's within a type context, not an expression context. So the
// token that we see here is ">" in that case because the first ">" has
// already been stripped off of the ">>" by the inner call.
if opts.isInsideJSXElement {
p.lexer.ExpectInsideJSXElement(js_lexer.TGreaterThan)
} else {
p.lexer.Expect(js_lexer.TGreaterThan)
}
}
return true
}
func (p *parser) trySkipTypeArgumentsInExpressionWithBacktracking() bool {
oldLexer := p.lexer
p.lexer.IsLogDisabled = true
// Implement backtracking by restoring the lexer's memory to its original state
defer func() {
r := recover()
if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
p.lexer = oldLexer
} else if r != nil {
panic(r)
}
}()
if p.skipTypeScriptTypeArguments(skipTypeScriptTypeArgumentsOpts{isParseTypeArgumentsInExpression: true}) {
// Check the token after the type argument list and backtrack if it's invalid
if !p.tsCanFollowTypeArgumentsInExpression() {
p.lexer.Unexpected()
}
}
// Restore the log disabled flag. Note that we can't just set it back to false
// because it may have been true to start with.
p.lexer.IsLogDisabled = oldLexer.IsLogDisabled
return true
}
func (p *parser) trySkipTypeScriptTypeParametersThenOpenParenWithBacktracking() skipTypeScriptTypeParametersResult {
oldLexer := p.lexer
p.lexer.IsLogDisabled = true
// Implement backtracking by restoring the lexer's memory to its original state
defer func() {
r := recover()
if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
p.lexer = oldLexer
} else if r != nil {
panic(r)
}
}()
result := p.skipTypeScriptTypeParameters(allowConstModifier)
if p.lexer.Token != js_lexer.TOpenParen {
p.lexer.Unexpected()
}
// Restore the log disabled flag. Note that we can't just set it back to false
// because it may have been true to start with.
p.lexer.IsLogDisabled = oldLexer.IsLogDisabled
return result
}
func (p *parser) trySkipTypeScriptArrowReturnTypeWithBacktracking() bool {
oldLexer := p.lexer
p.lexer.IsLogDisabled = true
// Implement backtracking by restoring the lexer's memory to its original state
defer func() {
r := recover()
if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
p.lexer = oldLexer
} else if r != nil {
panic(r)
}
}()
p.lexer.Expect(js_lexer.TColon)
p.skipTypeScriptReturnType()
// Check the token after this and backtrack if it's the wrong one
if p.lexer.Token != js_lexer.TEqualsGreaterThan {
p.lexer.Unexpected()
}
// Restore the log disabled flag. Note that we can't just set it back to false
// because it may have been true to start with.
p.lexer.IsLogDisabled = oldLexer.IsLogDisabled
return true
}
// This is a very specific function that determines whether a colon token is a
// TypeScript arrow function return type in the case where the arrow function
// is the middle expression of a JavaScript ternary operator (i.e. is between
// the "?" and ":" tokens). It's separate from the other function above called
// "trySkipTypeScriptArrowReturnTypeWithBacktracking" because it's much more
// expensive, and likely not as robust.
func (originalParser *parser) isTypeScriptArrowReturnTypeAfterQuestionAndBeforeColon(await awaitOrYield) bool {
// Implement "backtracking" by swallowing lexer errors on a temporary parser
defer func() {
r := recover()
if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
return // Swallow this error
} else if r != nil {
panic(r)
}
}()
// THIS IS A GROSS HACK. Some context:
//
// JavaScript is designed to not require a backtracking parser. Generally a
// backtracking parser is not regarded as a good thing and you try to avoid
// having one if it's not necessary.
//
// However, TypeScript's parser does do backtracking in an (admittedly noble)
// effort to retrofit nice type syntax onto JavaScript. Up until this edge
// case was discovered, this backtracking was limited to type syntax so
// esbuild could deal with it by using a backtracking lexer without needing a
// backtracking parser.
//
// This edge case requires a backtracking parser. The TypeScript compiler's
// algorithm for parsing this is to try to parse the entire arrow function
// body and then reset all the way back to the colon for the arrow function
// return type if the token following the arrow function body is not another
// colon. For example:
//
// x = a ? (b) : c => d;
// y = a ? (b) : c => d : e;
//
// The first colon of "x" pairs with the "?" because the arrow function
// "(b) : c => d" is not followed by a colon. However, the first colon of "y"
// starts a return type because the arrow function "(b) : c => d" is followed
// by a colon. In other words, the first ":" before the arrow function body
// must pair with the "?" unless there is another ":" to pair with it after
// the function body.
//
// I'm not going to rewrite esbuild's parser to support backtracking for this
// one edge case. So instead, esbuild tries to parse the arrow function body
// using a rough copy of the parser and then always throws the result away.
// So arrow function bodies will always be parsed twice for this edge case.
//
// This is a hack instead of a good solution because the parser isn't designed
// for this, and doing this is not going to have good test coverage given that
// it's an edge case. We can't prevent parser code (either currently or in the
// future) from accidentally depending on some parser state that isn't cloned
// here. That could result in a parser panic when parsing a more complex
// version of this edge case.
p := newParser(logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil), originalParser.source, originalParser.lexer, &originalParser.options)
// Clone all state that the parser needs to parse this arrow function body
p.allowIn = originalParser.allowIn
p.lexer.IsLogDisabled = true
p.pushScopeForParsePass(js_ast.ScopeEntry, logger.Loc{Start: 0})
p.pushScopeForParsePass(js_ast.ScopeFunctionArgs, logger.Loc{Start: 1})
// Parse the return type
p.lexer.Expect(js_lexer.TColon)
p.skipTypeScriptReturnType()
// Parse the body and throw it out (with the side effect of maybe throwing an error)
_ = p.parseArrowBody([]js_ast.Arg{}, fnOrArrowDataParse{await: await})
// There must be a colon following the arrow function body to pair with the leading "?"
p.lexer.Expect(js_lexer.TColon)
// Parsing was successful if we get here
return true
}
func (p *parser) trySkipTypeScriptArrowArgsWithBacktracking() bool {
oldLexer := p.lexer
p.lexer.IsLogDisabled = true
// Implement backtracking by restoring the lexer's memory to its original state
defer func() {
r := recover()
if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
p.lexer = oldLexer
} else if r != nil {
panic(r)
}
}()
p.skipTypeScriptFnArgs()
p.lexer.Expect(js_lexer.TEqualsGreaterThan)
// Restore the log disabled flag. Note that we can't just set it back to false
// because it may have been true to start with.
p.lexer.IsLogDisabled = oldLexer.IsLogDisabled
return true
}
func (p *parser) trySkipTypeScriptConstraintOfInferTypeWithBacktracking(flags skipTypeFlags) bool {
oldLexer := p.lexer
p.lexer.IsLogDisabled = true
// Implement backtracking by restoring the lexer's memory to its original state
defer func() {
r := recover()
if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
p.lexer = oldLexer
} else if r != nil {
panic(r)
}
}()
p.lexer.Expect(js_lexer.TExtends)
p.skipTypeScriptTypeWithFlags(js_ast.LPrefix, disallowConditionalTypesFlag)
if !flags.has(disallowConditionalTypesFlag) && p.lexer.Token == js_lexer.TQuestion {
p.lexer.Unexpected()
}
// Restore the log disabled flag. Note that we can't just set it back to false
// because it may have been true to start with.
p.lexer.IsLogDisabled = oldLexer.IsLogDisabled
return true
}
// Returns true if the current less-than token is considered to be an arrow
// function under TypeScript's rules for files containing JSX syntax
func (p *parser) isTSArrowFnJSX() (isTSArrowFn bool) {
oldLexer := p.lexer
p.lexer.Next()
// Look ahead to see if this should be an arrow function instead
if p.lexer.Token == js_lexer.TConst {
p.lexer.Next()
}
if p.lexer.Token == js_lexer.TIdentifier {
p.lexer.Next()
if p.lexer.Token == js_lexer.TComma || p.lexer.Token == js_lexer.TEquals {
isTSArrowFn = true
} else if p.lexer.Token == js_lexer.TExtends {
p.lexer.Next()
isTSArrowFn = p.lexer.Token != js_lexer.TEquals && p.lexer.Token != js_lexer.TGreaterThan && p.lexer.Token != js_lexer.TSlash
}
}
// Restore the lexer
p.lexer = oldLexer
return
}
// This function is taken from the official TypeScript compiler source code:
// https://github.com/microsoft/TypeScript/blob/master/src/compiler/parser.ts
//
// This function is pretty inefficient as written, and could be collapsed into
// a single switch statement. But that would make it harder to keep this in
// sync with the TypeScript compiler's source code, so we keep doing it the
// slow way.
func (p *parser) tsCanFollowTypeArgumentsInExpression() bool {
switch p.lexer.Token {
case
// These tokens can follow a type argument list in a call expression.
js_lexer.TOpenParen, // foo<x>(
js_lexer.TNoSubstitutionTemplateLiteral, // foo<T> `...`
js_lexer.TTemplateHead: // foo<T> `...${100}...`
return true
// A type argument list followed by `<` never makes sense, and a type argument list followed
// by `>` is ambiguous with a (re-scanned) `>>` operator, so we disqualify both. Also, in
// this context, `+` and `-` are unary operators, not binary operators.
case js_lexer.TLessThan,
js_lexer.TGreaterThan,
js_lexer.TPlus,
js_lexer.TMinus,
// TypeScript always sees "TGreaterThan" instead of these tokens since
// their scanner works a little differently than our lexer. So since
// "TGreaterThan" is forbidden above, we also forbid these too.
js_lexer.TGreaterThanEquals,
js_lexer.TGreaterThanGreaterThan,
js_lexer.TGreaterThanGreaterThanEquals,
js_lexer.TGreaterThanGreaterThanGreaterThan,
js_lexer.TGreaterThanGreaterThanGreaterThanEquals:
return false
}
// We favor the type argument list interpretation when it is immediately followed by
// a line break, a binary operator, or something that can't start an expression.
return p.lexer.HasNewlineBefore || p.tsIsBinaryOperator() || !p.tsIsStartOfExpression()
}
// This function is taken from the official TypeScript compiler source code:
// https://github.com/microsoft/TypeScript/blob/master/src/compiler/parser.ts
func (p *parser) tsIsBinaryOperator() bool {
switch p.lexer.Token {
case js_lexer.TIn:
return p.allowIn
case
js_lexer.TQuestionQuestion,
js_lexer.TBarBar,
js_lexer.TAmpersandAmpersand,
js_lexer.TBar,
js_lexer.TCaret,
js_lexer.TAmpersand,
js_lexer.TEqualsEquals,
js_lexer.TExclamationEquals,
js_lexer.TEqualsEqualsEquals,
js_lexer.TExclamationEqualsEquals,
js_lexer.TLessThan,
js_lexer.TGreaterThan,
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_parser/js_parser_lower.go | internal/js_parser/js_parser_lower.go | // This file contains code for "lowering" syntax, which means converting it to
// older JavaScript. For example, "a ** b" becomes a call to "Math.pow(a, b)"
// when lowered. Which syntax is lowered is determined by the language target.
package js_parser
import (
"fmt"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/logger"
)
func (p *parser) markSyntaxFeature(feature compat.JSFeature, r logger.Range) (didGenerateError bool) {
didGenerateError = true
if !p.options.unsupportedJSFeatures.Has(feature) {
if feature == compat.TopLevelAwait && !p.options.outputFormat.KeepESMImportExportSyntax() {
p.log.AddError(&p.tracker, r, fmt.Sprintf(
"Top-level await is currently not supported with the %q output format", p.options.outputFormat.String()))
return
}
didGenerateError = false
return
}
var name string
where := config.PrettyPrintTargetEnvironment(p.options.originalTargetEnv, p.options.unsupportedJSFeatureOverridesMask)
switch feature {
case compat.DefaultArgument:
name = "default arguments"
case compat.RestArgument:
name = "rest arguments"
case compat.ArraySpread:
name = "array spread"
case compat.ForOf:
name = "for-of loops"
case compat.ObjectAccessors:
name = "object accessors"
case compat.ObjectExtensions:
name = "object literal extensions"
case compat.Destructuring:
name = "destructuring"
case compat.NewTarget:
name = "new.target"
case compat.ConstAndLet:
name = p.source.TextForRange(r)
case compat.Class:
name = "class syntax"
case compat.Generator:
name = "generator functions"
case compat.AsyncAwait:
name = "async functions"
case compat.AsyncGenerator:
name = "async generator functions"
case compat.ForAwait:
name = "for-await loops"
case compat.NestedRestBinding:
name = "non-identifier array rest patterns"
case compat.ImportAttributes:
p.log.AddError(&p.tracker, r, fmt.Sprintf(
"Using an arbitrary value as the second argument to \"import()\" is not possible in %s", where))
return
case compat.TopLevelAwait:
p.log.AddError(&p.tracker, r, fmt.Sprintf(
"Top-level await is not available in %s", where))
return
case compat.ImportDefer:
p.log.AddError(&p.tracker, r, fmt.Sprintf(
"Deferred imports are not available in %s", where))
return
case compat.ImportSource:
p.log.AddError(&p.tracker, r, fmt.Sprintf(
"Source phase imports are not available in %s", where))
return
case compat.Bigint:
// This can't be polyfilled
kind := logger.Warning
if p.suppressWarningsAboutWeirdCode || p.fnOrArrowDataVisit.tryBodyCount > 0 {
kind = logger.Debug
}
p.log.AddID(logger.MsgID_JS_BigInt, kind, &p.tracker, r, fmt.Sprintf(
"Big integer literals are not available in %s and may crash at run-time", where))
return
case compat.ImportMeta:
// This can't be polyfilled
kind := logger.Warning
if p.suppressWarningsAboutWeirdCode || p.fnOrArrowDataVisit.tryBodyCount > 0 {
kind = logger.Debug
}
p.log.AddID(logger.MsgID_JS_EmptyImportMeta, kind, &p.tracker, r, fmt.Sprintf(
"\"import.meta\" is not available in %s and will be empty", where))
return
default:
p.log.AddError(&p.tracker, r, fmt.Sprintf(
"This feature is not available in %s", where))
return
}
p.log.AddError(&p.tracker, r, fmt.Sprintf(
"Transforming %s to %s is not supported yet", name, where))
return
}
func (p *parser) isStrictMode() bool {
return p.currentScope.StrictMode != js_ast.SloppyMode
}
func (p *parser) isStrictModeOutputFormat() bool {
return p.options.outputFormat == config.FormatESModule
}
type strictModeFeature uint8
const (
withStatement strictModeFeature = iota
deleteBareName
forInVarInit
evalOrArguments
reservedWord
legacyOctalLiteral
legacyOctalEscape
ifElseFunctionStmt
labelFunctionStmt
duplicateLexicallyDeclaredNames
)
func (p *parser) markStrictModeFeature(feature strictModeFeature, r logger.Range, detail string) {
var text string
canBeTransformed := false
switch feature {
case withStatement:
text = "With statements"
case deleteBareName:
text = "Delete of a bare identifier"
case forInVarInit:
text = "Variable initializers inside for-in loops"
canBeTransformed = true
case evalOrArguments:
text = fmt.Sprintf("Declarations with the name %q", detail)
case reservedWord:
text = fmt.Sprintf("%q is a reserved word and", detail)
case legacyOctalLiteral:
text = "Legacy octal literals"
case legacyOctalEscape:
text = "Legacy octal escape sequences"
case ifElseFunctionStmt:
text = "Function declarations inside if statements"
case labelFunctionStmt:
text = "Function declarations inside labels"
case duplicateLexicallyDeclaredNames:
text = "Duplicate lexically-declared names"
default:
text = "This feature"
}
if p.isStrictMode() {
where, notes := p.whyStrictMode(p.currentScope)
p.log.AddErrorWithNotes(&p.tracker, r,
fmt.Sprintf("%s cannot be used %s", text, where), notes)
} else if !canBeTransformed && p.isStrictModeOutputFormat() {
p.log.AddError(&p.tracker, r,
fmt.Sprintf("%s cannot be used with the \"esm\" output format due to strict mode", text))
}
}
func (p *parser) whyStrictMode(scope *js_ast.Scope) (where string, notes []logger.MsgData) {
where = "in strict mode"
switch scope.StrictMode {
case js_ast.ImplicitStrictModeClass:
notes = []logger.MsgData{p.tracker.MsgData(p.enclosingClassKeyword,
"All code inside a class is implicitly in strict mode")}
case js_ast.ImplicitStrictModeTSAlwaysStrict:
tsAlwaysStrict := p.options.tsAlwaysStrict
t := logger.MakeLineColumnTracker(&tsAlwaysStrict.Source)
notes = []logger.MsgData{t.MsgData(tsAlwaysStrict.Range, fmt.Sprintf(
"TypeScript's %q setting was enabled here:", tsAlwaysStrict.Name))}
case js_ast.ImplicitStrictModeJSXAutomaticRuntime:
notes = []logger.MsgData{p.tracker.MsgData(logger.Range{Loc: p.firstJSXElementLoc, Len: 1},
"This file is implicitly in strict mode due to the JSX element here:"),
{Text: "When React's \"automatic\" JSX transform is enabled, using a JSX element automatically inserts " +
"an \"import\" statement at the top of the file for the corresponding the JSX helper function. " +
"This means the file is considered an ECMAScript module, and all ECMAScript modules use strict mode."}}
case js_ast.ExplicitStrictMode:
notes = []logger.MsgData{p.tracker.MsgData(p.source.RangeOfString(scope.UseStrictLoc),
"Strict mode is triggered by the \"use strict\" directive here:")}
case js_ast.ImplicitStrictModeESM:
_, notes = p.whyESModule()
where = "in an ECMAScript module"
}
return
}
func (p *parser) markAsyncFn(asyncRange logger.Range, isGenerator bool) (didGenerateError bool) {
// Lowered async functions are implemented in terms of generators. So if
// generators aren't supported, async functions aren't supported either.
// But if generators are supported, then async functions are unconditionally
// supported because we can use generators to implement them.
if !p.options.unsupportedJSFeatures.Has(compat.Generator) {
return false
}
feature := compat.AsyncAwait
if isGenerator {
feature = compat.AsyncGenerator
}
return p.markSyntaxFeature(feature, asyncRange)
}
func (p *parser) captureThis() ast.Ref {
if p.fnOnlyDataVisit.thisCaptureRef == nil {
ref := p.newSymbol(ast.SymbolHoisted, "_this")
p.fnOnlyDataVisit.thisCaptureRef = &ref
}
ref := *p.fnOnlyDataVisit.thisCaptureRef
p.recordUsage(ref)
return ref
}
func (p *parser) captureArguments() ast.Ref {
if p.fnOnlyDataVisit.argumentsCaptureRef == nil {
ref := p.newSymbol(ast.SymbolHoisted, "_arguments")
p.fnOnlyDataVisit.argumentsCaptureRef = &ref
}
ref := *p.fnOnlyDataVisit.argumentsCaptureRef
p.recordUsage(ref)
return ref
}
func (p *parser) lowerFunction(
isAsync *bool,
isGenerator *bool,
args *[]js_ast.Arg,
bodyLoc logger.Loc,
bodyBlock *js_ast.SBlock,
preferExpr *bool,
hasRestArg *bool,
isArrow bool,
) {
// Lower object rest binding patterns in function arguments
if p.options.unsupportedJSFeatures.Has(compat.ObjectRestSpread) {
var prefixStmts []js_ast.Stmt
// Lower each argument individually instead of lowering all arguments
// together. There is a correctness tradeoff here around default values
// for function arguments, with no right answer.
//
// Lowering all arguments together will preserve the order of side effects
// for default values, but will mess up their scope:
//
// // Side effect order: a(), b(), c()
// function foo([{[a()]: w, ...x}, y = b()], z = c()) {}
//
// // Side effect order is correct but scope is wrong
// function foo(_a, _b) {
// var [[{[a()]: w, ...x}, y = b()], z = c()] = [_a, _b]
// }
//
// Lowering each argument individually will preserve the scope for default
// values that don't contain object rest binding patterns, but will mess up
// the side effect order:
//
// // Side effect order: a(), b(), c()
// function foo([{[a()]: w, ...x}, y = b()], z = c()) {}
//
// // Side effect order is wrong but scope for c() is correct
// function foo(_a, z = c()) {
// var [{[a()]: w, ...x}, y = b()] = _a
// }
//
// This transform chooses to lower each argument individually with the
// thinking that perhaps scope matters more in real-world code than side
// effect order.
for i, arg := range *args {
if bindingHasObjectRest(arg.Binding) {
ref := p.generateTempRef(tempRefNoDeclare, "")
target := js_ast.ConvertBindingToExpr(arg.Binding, nil)
init := js_ast.Expr{Loc: arg.Binding.Loc, Data: &js_ast.EIdentifier{Ref: ref}}
p.recordUsage(ref)
if decls, ok := p.lowerObjectRestToDecls(target, init, nil); ok {
// Replace the binding but leave the default value intact
(*args)[i].Binding.Data = &js_ast.BIdentifier{Ref: ref}
// Append a variable declaration to the function body
prefixStmts = append(prefixStmts, js_ast.Stmt{Loc: arg.Binding.Loc,
Data: &js_ast.SLocal{Kind: js_ast.LocalVar, Decls: decls}})
}
}
}
if len(prefixStmts) > 0 {
bodyBlock.Stmts = append(prefixStmts, bodyBlock.Stmts...)
}
}
// Lower async functions and async generator functions
if *isAsync && (p.options.unsupportedJSFeatures.Has(compat.AsyncAwait) || (isGenerator != nil && *isGenerator && p.options.unsupportedJSFeatures.Has(compat.AsyncGenerator))) {
// Use the shortened form if we're an arrow function
if preferExpr != nil {
*preferExpr = true
}
// Determine the value for "this"
thisValue, hasThisValue := p.valueForThis(
bodyLoc,
false, /* shouldWarn */
js_ast.AssignTargetNone,
false, /* isCallTarget */
false, /* isDeleteTarget */
)
if isArrow && !p.fnOnlyDataVisit.hasThisUsage {
thisValue = js_ast.Expr{Loc: bodyLoc, Data: js_ast.ENullShared}
} else if !hasThisValue {
thisValue = js_ast.Expr{Loc: bodyLoc, Data: js_ast.EThisShared}
}
// Move the code into a nested generator function
fn := js_ast.Fn{
IsGenerator: true,
Body: js_ast.FnBody{Loc: bodyLoc, Block: *bodyBlock},
}
bodyBlock.Stmts = nil
// Errors thrown during argument evaluation must reject the
// resulting promise, which needs more complex code to handle
couldThrowErrors := false
for _, arg := range *args {
if _, ok := arg.Binding.Data.(*js_ast.BIdentifier); !ok ||
(arg.DefaultOrNil.Data != nil && couldPotentiallyThrow(arg.DefaultOrNil.Data)) {
couldThrowErrors = true
break
}
}
// Forward the arguments to the wrapper function
usesArgumentsRef := !isArrow && p.fnOnlyDataVisit.argumentsRef != nil &&
p.symbolUses[*p.fnOnlyDataVisit.argumentsRef].CountEstimate > 0
var forwardedArgs js_ast.Expr
if !couldThrowErrors && !usesArgumentsRef {
// Simple case: the arguments can stay on the outer function. It's
// worth separating out the simple case because it's the common case
// and it generates smaller code.
forwardedArgs = js_ast.Expr{Loc: bodyLoc, Data: js_ast.ENullShared}
} else {
// If code uses "arguments" then we must move the arguments to the inner
// function. This is because you can modify arguments by assigning to
// elements in the "arguments" object:
//
// async function foo(x) {
// arguments[0] = 1;
// // "x" must be 1 here
// }
//
// Complex case: the arguments must be moved to the inner function
fn.Args = *args
fn.HasRestArg = *hasRestArg
*args = nil
*hasRestArg = false
// Make sure to not change the value of the "length" property. This is
// done by generating dummy arguments for the outer function equal to
// the expected length of the function:
//
// async function foo(a, b, c = d, ...e) {
// }
//
// This turns into:
//
// function foo(_0, _1) {
// return __async(this, arguments, function* (a, b, c = d, ...e) {
// });
// }
//
// The "_0" and "_1" are dummy variables to ensure "foo.length" is 2.
for i, arg := range fn.Args {
if arg.DefaultOrNil.Data != nil || fn.HasRestArg && i+1 == len(fn.Args) {
// Arguments from here on don't add to the "length"
break
}
// Generate a dummy variable
argRef := p.newSymbol(ast.SymbolOther, fmt.Sprintf("_%d", i))
p.currentScope.Generated = append(p.currentScope.Generated, argRef)
*args = append(*args, js_ast.Arg{Binding: js_ast.Binding{Loc: arg.Binding.Loc, Data: &js_ast.BIdentifier{Ref: argRef}}})
}
// Forward all arguments from the outer function to the inner function
if !isArrow {
// Normal functions can just use "arguments" to forward everything
forwardedArgs = js_ast.Expr{Loc: bodyLoc, Data: &js_ast.EIdentifier{Ref: *p.fnOnlyDataVisit.argumentsRef}}
} else {
// Arrow functions can't use "arguments", so we need to forward
// the arguments manually.
//
// Note that if the arrow function references "arguments" in its body
// (even if it's inside another nested arrow function), that reference
// to "arguments" will have to be substituted with a captured variable.
// This is because we're changing the arrow function into a generator
// function, which introduces a variable named "arguments". This is
// handled separately during symbol resolution instead of being handled
// here so we don't need to re-traverse the arrow function body.
// If we need to forward more than the current number of arguments,
// add a rest argument to the set of forwarding variables. This is the
// case if the arrow function has rest or default arguments.
if len(*args) < len(fn.Args) {
argRef := p.newSymbol(ast.SymbolOther, fmt.Sprintf("_%d", len(*args)))
p.currentScope.Generated = append(p.currentScope.Generated, argRef)
*args = append(*args, js_ast.Arg{Binding: js_ast.Binding{Loc: bodyLoc, Data: &js_ast.BIdentifier{Ref: argRef}}})
*hasRestArg = true
}
// Forward all of the arguments
items := make([]js_ast.Expr, 0, len(*args))
for i, arg := range *args {
id := arg.Binding.Data.(*js_ast.BIdentifier)
item := js_ast.Expr{Loc: arg.Binding.Loc, Data: &js_ast.EIdentifier{Ref: id.Ref}}
if *hasRestArg && i+1 == len(*args) {
item.Data = &js_ast.ESpread{Value: item}
}
items = append(items, item)
}
forwardedArgs = js_ast.Expr{Loc: bodyLoc, Data: &js_ast.EArray{Items: items, IsSingleLine: true}}
}
}
var name string
if isGenerator != nil && *isGenerator {
// "async function* foo(a, b) { stmts }" => "function foo(a, b) { return __asyncGenerator(this, null, function* () { stmts }) }"
name = "__asyncGenerator"
*isGenerator = false
} else {
// "async function foo(a, b) { stmts }" => "function foo(a, b) { return __async(this, null, function* () { stmts }) }"
name = "__async"
}
*isAsync = false
callAsync := p.callRuntime(bodyLoc, name, []js_ast.Expr{
thisValue,
forwardedArgs,
{Loc: bodyLoc, Data: &js_ast.EFunction{Fn: fn}},
})
bodyBlock.Stmts = []js_ast.Stmt{{Loc: bodyLoc, Data: &js_ast.SReturn{ValueOrNil: callAsync}}}
}
}
func (p *parser) lowerOptionalChain(expr js_ast.Expr, in exprIn, childOut exprOut) (js_ast.Expr, exprOut) {
valueWhenUndefined := js_ast.Expr{Loc: expr.Loc, Data: js_ast.EUndefinedShared}
endsWithPropertyAccess := false
containsPrivateName := false
startsWithCall := false
originalExpr := expr
chain := []js_ast.Expr{}
loc := expr.Loc
// Step 1: Get an array of all expressions in the chain. We're traversing the
// chain from the outside in, so the array will be filled in "backwards".
flatten:
for {
chain = append(chain, expr)
switch e := expr.Data.(type) {
case *js_ast.EDot:
expr = e.Target
if len(chain) == 1 {
endsWithPropertyAccess = true
}
if e.OptionalChain == js_ast.OptionalChainStart {
break flatten
}
case *js_ast.EIndex:
expr = e.Target
if len(chain) == 1 {
endsWithPropertyAccess = true
}
// If this is a private name that needs to be lowered, the entire chain
// itself will have to be lowered even if the language target supports
// optional chaining. This is because there's no way to use our shim
// function for private names with optional chaining syntax.
if private, ok := e.Index.Data.(*js_ast.EPrivateIdentifier); ok && p.privateSymbolNeedsToBeLowered(private) {
containsPrivateName = true
}
if e.OptionalChain == js_ast.OptionalChainStart {
break flatten
}
case *js_ast.ECall:
expr = e.Target
if e.OptionalChain == js_ast.OptionalChainStart {
startsWithCall = true
break flatten
}
case *js_ast.EUnary: // UnOpDelete
valueWhenUndefined = js_ast.Expr{Loc: loc, Data: &js_ast.EBoolean{Value: true}}
expr = e.Value
default:
panic("Internal error")
}
}
// Stop now if we can strip the whole chain as dead code. Since the chain is
// lazily evaluated, it's safe to just drop the code entirely.
if p.options.minifySyntax {
if isNullOrUndefined, sideEffects, ok := js_ast.ToNullOrUndefinedWithSideEffects(expr.Data); ok && isNullOrUndefined {
if sideEffects == js_ast.CouldHaveSideEffects {
return js_ast.JoinWithComma(p.astHelpers.SimplifyUnusedExpr(expr, p.options.unsupportedJSFeatures), valueWhenUndefined), exprOut{}
}
return valueWhenUndefined, exprOut{}
}
} else {
switch expr.Data.(type) {
case *js_ast.ENull, *js_ast.EUndefined:
return valueWhenUndefined, exprOut{}
}
}
// We need to lower this if this is an optional call off of a private name
// such as "foo.#bar?.()" because the value of "this" must be captured.
if _, _, private := p.extractPrivateIndex(expr); private != nil {
containsPrivateName = true
}
// Don't lower this if we don't need to. This check must be done here instead
// of earlier so we can do the dead code elimination above when the target is
// null or undefined.
if !p.options.unsupportedJSFeatures.Has(compat.OptionalChain) && !containsPrivateName {
return originalExpr, exprOut{}
}
// Step 2: Figure out if we need to capture the value for "this" for the
// initial ECall. This will be passed to ".call(this, ...args)" later.
var thisArg js_ast.Expr
var targetWrapFunc func(js_ast.Expr) js_ast.Expr
if startsWithCall {
if childOut.thisArgFunc != nil {
// The initial value is a nested optional chain that ended in a property
// access. The nested chain was processed first and has saved the
// appropriate value for "this". The callback here will return a
// reference to that saved location.
thisArg = childOut.thisArgFunc()
} else {
// The initial value is a normal expression. If it's a property access,
// strip the property off and save the target of the property access to
// be used as the value for "this".
switch e := expr.Data.(type) {
case *js_ast.EDot:
if _, ok := e.Target.Data.(*js_ast.ESuper); ok {
// Lower "super.prop" if necessary
if p.shouldLowerSuperPropertyAccess(e.Target) {
key := js_ast.Expr{Loc: e.NameLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16(e.Name)}}
expr = p.lowerSuperPropertyGet(expr.Loc, key)
}
// Special-case "super.foo?.()" to avoid a syntax error. Without this,
// we would generate:
//
// (_b = (_a = super).foo) == null ? void 0 : _b.call(_a)
//
// which is a syntax error. Now we generate this instead:
//
// (_a = super.foo) == null ? void 0 : _a.call(this)
//
thisArg = js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
} else {
targetFunc, wrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, e.Target, valueDefinitelyNotMutated)
expr = js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
Target: targetFunc(),
Name: e.Name,
NameLoc: e.NameLoc,
}}
thisArg = targetFunc()
targetWrapFunc = wrapFunc
}
case *js_ast.EIndex:
if _, ok := e.Target.Data.(*js_ast.ESuper); ok {
// Lower "super[prop]" if necessary
if p.shouldLowerSuperPropertyAccess(e.Target) {
expr = p.lowerSuperPropertyGet(expr.Loc, e.Index)
}
// See the comment above about a similar special case for EDot
thisArg = js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
} else {
targetFunc, wrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, e.Target, valueDefinitelyNotMutated)
targetWrapFunc = wrapFunc
// Capture the value of "this" if the target of the starting call
// expression is a private property access
if private, ok := e.Index.Data.(*js_ast.EPrivateIdentifier); ok && p.privateSymbolNeedsToBeLowered(private) {
// "foo().#bar?.()" must capture "foo()" for "this"
expr = p.lowerPrivateGet(targetFunc(), e.Index.Loc, private)
thisArg = targetFunc()
break
}
expr = js_ast.Expr{Loc: loc, Data: &js_ast.EIndex{
Target: targetFunc(),
Index: e.Index,
}}
thisArg = targetFunc()
}
}
}
}
// Step 3: Figure out if we need to capture the starting value. We don't need
// to capture it if it doesn't have any side effects (e.g. it's just a bare
// identifier). Skipping the capture reduces code size and matches the output
// of the TypeScript compiler.
exprFunc, exprWrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, expr, valueDefinitelyNotMutated)
expr = exprFunc()
result := exprFunc()
// Step 4: Wrap the starting value by each expression in the chain. We
// traverse the chain in reverse because we want to go from the inside out
// and the chain was built from the outside in.
var parentThisArgFunc func() js_ast.Expr
var parentThisArgWrapFunc func(js_ast.Expr) js_ast.Expr
var privateThisFunc func() js_ast.Expr
var privateThisWrapFunc func(js_ast.Expr) js_ast.Expr
for i := len(chain) - 1; i >= 0; i-- {
// Save a reference to the value of "this" for our parent ECall
if i == 0 && in.storeThisArgForParentOptionalChain && endsWithPropertyAccess {
parentThisArgFunc, parentThisArgWrapFunc = p.captureValueWithPossibleSideEffects(result.Loc, 2, result, valueDefinitelyNotMutated)
result = parentThisArgFunc()
}
switch e := chain[i].Data.(type) {
case *js_ast.EDot:
result = js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
Target: result,
Name: e.Name,
NameLoc: e.NameLoc,
}}
case *js_ast.EIndex:
if private, ok := e.Index.Data.(*js_ast.EPrivateIdentifier); ok && p.privateSymbolNeedsToBeLowered(private) {
// If this is private name property access inside a call expression and
// the call expression is part of this chain, then the call expression
// is going to need a copy of the property access target as the value
// for "this" for the call. Example for this case: "foo.#bar?.()"
if i > 0 {
if _, ok := chain[i-1].Data.(*js_ast.ECall); ok {
privateThisFunc, privateThisWrapFunc = p.captureValueWithPossibleSideEffects(loc, 2, result, valueDefinitelyNotMutated)
result = privateThisFunc()
}
}
result = p.lowerPrivateGet(result, e.Index.Loc, private)
continue
}
result = js_ast.Expr{Loc: loc, Data: &js_ast.EIndex{
Target: result,
Index: e.Index,
}}
case *js_ast.ECall:
// If this is the initial ECall in the chain and it's being called off of
// a property access, invoke the function using ".call(this, ...args)" to
// explicitly provide the value for "this".
if i == len(chain)-1 && thisArg.Data != nil {
result = js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
Target: js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
Target: result,
Name: "call",
NameLoc: loc,
}},
Args: append([]js_ast.Expr{thisArg}, e.Args...),
CanBeUnwrappedIfUnused: e.CanBeUnwrappedIfUnused,
IsMultiLine: e.IsMultiLine,
Kind: js_ast.TargetWasOriginallyPropertyAccess,
}}
break
}
// If the target of this call expression is a private name property
// access that's also part of this chain, then we must use the copy of
// the property access target that was stashed away earlier as the value
// for "this" for the call. Example for this case: "foo.#bar?.()"
if privateThisFunc != nil {
result = privateThisWrapFunc(js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
Target: js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
Target: result,
Name: "call",
NameLoc: loc,
}},
Args: append([]js_ast.Expr{privateThisFunc()}, e.Args...),
CanBeUnwrappedIfUnused: e.CanBeUnwrappedIfUnused,
IsMultiLine: e.IsMultiLine,
Kind: js_ast.TargetWasOriginallyPropertyAccess,
}})
privateThisFunc = nil
break
}
result = js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
Target: result,
Args: e.Args,
CanBeUnwrappedIfUnused: e.CanBeUnwrappedIfUnused,
IsMultiLine: e.IsMultiLine,
Kind: e.Kind,
}}
case *js_ast.EUnary:
result = js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{
Op: js_ast.UnOpDelete,
Value: result,
// If a delete of an optional chain takes place, it behaves as if the
// optional chain isn't there with regard to the "delete" semantics.
WasOriginallyDeleteOfIdentifierOrPropertyAccess: e.WasOriginallyDeleteOfIdentifierOrPropertyAccess,
}}
default:
panic("Internal error")
}
}
// Step 5: Wrap it all in a conditional that returns the chain or the default
// value if the initial value is null/undefined. The default value is usually
// "undefined" but is "true" if the chain ends in a "delete" operator.
// "x?.y" => "x == null ? void 0 : x.y"
// "x()?.y()" => "(_a = x()) == null ? void 0 : _a.y()"
result = js_ast.Expr{Loc: loc, Data: &js_ast.EIf{
Test: js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
Op: js_ast.BinOpLooseEq,
Left: expr,
Right: js_ast.Expr{Loc: loc, Data: js_ast.ENullShared},
}},
Yes: valueWhenUndefined,
No: result,
}}
if exprWrapFunc != nil {
result = exprWrapFunc(result)
}
if targetWrapFunc != nil {
result = targetWrapFunc(result)
}
if childOut.thisArgWrapFunc != nil {
result = childOut.thisArgWrapFunc(result)
}
return result, exprOut{
thisArgFunc: parentThisArgFunc,
thisArgWrapFunc: parentThisArgWrapFunc,
}
}
func (p *parser) lowerParenthesizedOptionalChain(loc logger.Loc, e *js_ast.ECall, childOut exprOut) js_ast.Expr {
return childOut.thisArgWrapFunc(js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
Target: js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
Target: e.Target,
Name: "call",
NameLoc: loc,
}},
Args: append(append(make([]js_ast.Expr, 0, len(e.Args)+1), childOut.thisArgFunc()), e.Args...),
IsMultiLine: e.IsMultiLine,
Kind: js_ast.TargetWasOriginallyPropertyAccess,
}})
}
func (p *parser) lowerAssignmentOperator(value js_ast.Expr, callback func(js_ast.Expr, js_ast.Expr) js_ast.Expr) js_ast.Expr {
switch left := value.Data.(type) {
case *js_ast.EDot:
if left.OptionalChain == js_ast.OptionalChainNone {
referenceFunc, wrapFunc := p.captureValueWithPossibleSideEffects(value.Loc, 2, left.Target, valueDefinitelyNotMutated)
return wrapFunc(callback(
js_ast.Expr{Loc: value.Loc, Data: &js_ast.EDot{
Target: referenceFunc(),
Name: left.Name,
NameLoc: left.NameLoc,
}},
js_ast.Expr{Loc: value.Loc, Data: &js_ast.EDot{
Target: referenceFunc(),
Name: left.Name,
NameLoc: left.NameLoc,
}},
))
}
case *js_ast.EIndex:
if left.OptionalChain == js_ast.OptionalChainNone {
targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(value.Loc, 2, left.Target, valueDefinitelyNotMutated)
indexFunc, indexWrapFunc := p.captureValueWithPossibleSideEffects(value.Loc, 2, left.Index, valueDefinitelyNotMutated)
return targetWrapFunc(indexWrapFunc(callback(
js_ast.Expr{Loc: value.Loc, Data: &js_ast.EIndex{
Target: targetFunc(),
Index: indexFunc(),
}},
js_ast.Expr{Loc: value.Loc, Data: &js_ast.EIndex{
Target: targetFunc(),
Index: indexFunc(),
}},
)))
}
case *js_ast.EIdentifier:
return callback(
js_ast.Expr{Loc: value.Loc, Data: &js_ast.EIdentifier{Ref: left.Ref}},
value,
)
}
// We shouldn't get here with valid syntax? Just let this through for now
// since there's currently no assignment target validation. Garbage in,
// garbage out.
return value
}
func (p *parser) lowerExponentiationAssignmentOperator(loc logger.Loc, e *js_ast.EBinary) js_ast.Expr {
if target, privateLoc, private := p.extractPrivateIndex(e.Left); private != nil {
// "a.#b **= c" => "__privateSet(a, #b, __pow(__privateGet(a, #b), c))"
targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, target, valueDefinitelyNotMutated)
return targetWrapFunc(p.lowerPrivateSet(targetFunc(), privateLoc, private,
p.callRuntime(loc, "__pow", []js_ast.Expr{
p.lowerPrivateGet(targetFunc(), privateLoc, private),
e.Right,
})))
}
return p.lowerAssignmentOperator(e.Left, func(a js_ast.Expr, b js_ast.Expr) js_ast.Expr {
// "a **= b" => "a = __pow(a, b)"
return js_ast.Assign(a, p.callRuntime(loc, "__pow", []js_ast.Expr{b, e.Right}))
})
}
func (p *parser) lowerNullishCoalescingAssignmentOperator(loc logger.Loc, e *js_ast.EBinary) (js_ast.Expr, bool) {
if target, privateLoc, private := p.extractPrivateIndex(e.Left); private != nil {
if p.options.unsupportedJSFeatures.Has(compat.NullishCoalescing) {
// "a.#b ??= c" => "(_a = __privateGet(a, #b)) != null ? _a : __privateSet(a, #b, c)"
targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, target, valueDefinitelyNotMutated)
left := p.lowerPrivateGet(targetFunc(), privateLoc, private)
right := p.lowerPrivateSet(targetFunc(), privateLoc, private, e.Right)
return targetWrapFunc(p.lowerNullishCoalescing(loc, left, right)), true
}
// "a.#b ??= c" => "__privateGet(a, #b) ?? __privateSet(a, #b, c)"
targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, target, valueDefinitelyNotMutated)
return targetWrapFunc(js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
Op: js_ast.BinOpNullishCoalescing,
Left: p.lowerPrivateGet(targetFunc(), privateLoc, private),
Right: p.lowerPrivateSet(targetFunc(), privateLoc, private, e.Right),
}}), true
}
if p.options.unsupportedJSFeatures.Has(compat.LogicalAssignment) {
return p.lowerAssignmentOperator(e.Left, func(a js_ast.Expr, b js_ast.Expr) js_ast.Expr {
if p.options.unsupportedJSFeatures.Has(compat.NullishCoalescing) {
// "a ??= b" => "(_a = a) != null ? _a : a = b"
return p.lowerNullishCoalescing(loc, a, js_ast.Assign(b, e.Right))
}
// "a ??= b" => "a ?? (a = b)"
return js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
Op: js_ast.BinOpNullishCoalescing,
Left: a,
Right: js_ast.Assign(b, e.Right),
}}
}), true
}
return js_ast.Expr{}, false
}
func (p *parser) lowerLogicalAssignmentOperator(loc logger.Loc, e *js_ast.EBinary, op js_ast.OpCode) (js_ast.Expr, bool) {
if target, privateLoc, private := p.extractPrivateIndex(e.Left); private != nil {
// "a.#b &&= c" => "__privateGet(a, #b) && __privateSet(a, #b, c)"
// "a.#b ||= c" => "__privateGet(a, #b) || __privateSet(a, #b, c)"
targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, target, valueDefinitelyNotMutated)
return targetWrapFunc(js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
Op: op,
Left: p.lowerPrivateGet(targetFunc(), privateLoc, private),
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_parser/sourcemap_parser.go | internal/js_parser/sourcemap_parser.go | package js_parser
import (
"fmt"
"net/url"
"sort"
"strings"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/sourcemap"
)
// New specification: https://tc39.es/ecma426/
// Old specification: https://sourcemaps.info/spec.html
func ParseSourceMap(log logger.Log, source logger.Source) *sourcemap.SourceMap {
expr, ok := ParseJSON(log, source, JSONOptions{ErrorSuffix: " in source map"})
if !ok {
return nil
}
obj, ok := expr.Data.(*js_ast.EObject)
tracker := logger.MakeLineColumnTracker(&source)
if !ok {
log.AddError(&tracker, logger.Range{Loc: expr.Loc}, "Invalid source map")
return nil
}
type sourceMapSection struct {
lineOffset int32
columnOffset int32
sourceMap *js_ast.EObject
}
var sections []sourceMapSection
hasSections := false
for _, prop := range obj.Properties {
if !helpers.UTF16EqualsString(prop.Key.Data.(*js_ast.EString).Value, "sections") {
continue
}
if value, ok := prop.ValueOrNil.Data.(*js_ast.EArray); ok {
for _, item := range value.Items {
if element, ok := item.Data.(*js_ast.EObject); ok {
var sectionLineOffset int32
var sectionColumnOffset int32
var sectionSourceMap *js_ast.EObject
for _, sectionProp := range element.Properties {
switch helpers.UTF16ToString(sectionProp.Key.Data.(*js_ast.EString).Value) {
case "offset":
if offsetValue, ok := sectionProp.ValueOrNil.Data.(*js_ast.EObject); ok {
for _, offsetProp := range offsetValue.Properties {
switch helpers.UTF16ToString(offsetProp.Key.Data.(*js_ast.EString).Value) {
case "line":
if lineValue, ok := offsetProp.ValueOrNil.Data.(*js_ast.ENumber); ok {
sectionLineOffset = int32(lineValue.Value)
}
case "column":
if columnValue, ok := offsetProp.ValueOrNil.Data.(*js_ast.ENumber); ok {
sectionColumnOffset = int32(columnValue.Value)
}
}
}
} else {
log.AddError(&tracker, logger.Range{Loc: sectionProp.ValueOrNil.Loc}, "Expected \"offset\" to be an object")
return nil
}
case "map":
if mapValue, ok := sectionProp.ValueOrNil.Data.(*js_ast.EObject); ok {
sectionSourceMap = mapValue
} else {
log.AddError(&tracker, logger.Range{Loc: sectionProp.ValueOrNil.Loc}, "Expected \"map\" to be an object")
return nil
}
}
}
if sectionSourceMap != nil {
sections = append(sections, sourceMapSection{
lineOffset: sectionLineOffset,
columnOffset: sectionColumnOffset,
sourceMap: sectionSourceMap,
})
}
}
}
} else {
log.AddError(&tracker, logger.Range{Loc: prop.ValueOrNil.Loc}, "Expected \"sections\" to be an array")
return nil
}
hasSections = true
break
}
if !hasSections {
sections = append(sections, sourceMapSection{
sourceMap: obj,
})
}
var sources []string
var sourcesContent []sourcemap.SourceContent
var names []string
var mappings mappingArray
var generatedLine int32
var generatedColumn int32
needSort := false
for _, section := range sections {
var sourcesArray []js_ast.Expr
var sourcesContentArray []js_ast.Expr
var namesArray []js_ast.Expr
var mappingsRaw []uint16
var mappingsStart int32
var sourceRoot string
hasVersion := false
for _, prop := range section.sourceMap.Properties {
switch helpers.UTF16ToString(prop.Key.Data.(*js_ast.EString).Value) {
case "version":
if value, ok := prop.ValueOrNil.Data.(*js_ast.ENumber); ok && value.Value == 3 {
hasVersion = true
}
case "mappings":
if value, ok := prop.ValueOrNil.Data.(*js_ast.EString); ok {
mappingsRaw = value.Value
mappingsStart = prop.ValueOrNil.Loc.Start + 1
}
case "sourceRoot":
if value, ok := prop.ValueOrNil.Data.(*js_ast.EString); ok {
sourceRoot = helpers.UTF16ToString(value.Value)
}
case "sources":
if value, ok := prop.ValueOrNil.Data.(*js_ast.EArray); ok {
sourcesArray = value.Items
}
case "sourcesContent":
if value, ok := prop.ValueOrNil.Data.(*js_ast.EArray); ok {
sourcesContentArray = value.Items
}
case "names":
if value, ok := prop.ValueOrNil.Data.(*js_ast.EArray); ok {
namesArray = value.Items
}
}
}
// Silently ignore the section if the version was missing or incorrect
if !hasVersion {
continue
}
mappingsLen := len(mappingsRaw)
sourcesLen := len(sourcesArray)
namesLen := len(namesArray)
// Silently ignore the section if the source map is pointless (i.e. empty)
if mappingsLen == 0 || sourcesLen == 0 {
continue
}
if section.lineOffset < generatedLine || (section.lineOffset == generatedLine && section.columnOffset < generatedColumn) {
needSort = true
}
lineOffset := section.lineOffset
columnOffset := section.columnOffset
sourceOffset := int32(len(sources))
nameOffset := int32(len(names))
generatedLine = lineOffset
generatedColumn = columnOffset
sourceIndex := sourceOffset
var originalLine int32
var originalColumn int32
originalName := nameOffset
current := 0
errorText := ""
errorLen := 0
// Parse the mappings
for current < mappingsLen {
// Handle a line break
if mappingsRaw[current] == ';' {
generatedLine++
generatedColumn = 0
current++
continue
}
// Read the generated column
generatedColumnDelta, i, ok := sourcemap.DecodeVLQUTF16(mappingsRaw[current:])
if !ok {
errorText = "Missing generated column"
errorLen = i
break
}
if generatedColumnDelta < 0 {
// This would mess up binary search
needSort = true
}
generatedColumn += generatedColumnDelta
if (generatedLine == lineOffset && generatedColumn < columnOffset) || generatedColumn < 0 {
errorText = fmt.Sprintf("Invalid generated column value: %d", generatedColumn)
errorLen = i
break
}
current += i
// According to the specification, it's valid for a mapping to have 1,
// 4, or 5 variable-length fields. Having one field means there's no
// original location information, which is pretty useless. Just ignore
// those entries.
if current == mappingsLen {
break
}
switch mappingsRaw[current] {
case ',':
current++
continue
case ';':
continue
}
// Read the original source
sourceIndexDelta, i, ok := sourcemap.DecodeVLQUTF16(mappingsRaw[current:])
if !ok {
errorText = "Missing source index"
errorLen = i
break
}
sourceIndex += sourceIndexDelta
if sourceIndex < sourceOffset || sourceIndex >= sourceOffset+int32(sourcesLen) {
errorText = fmt.Sprintf("Invalid source index value: %d", sourceIndex)
errorLen = i
break
}
current += i
// Read the original line
originalLineDelta, i, ok := sourcemap.DecodeVLQUTF16(mappingsRaw[current:])
if !ok {
errorText = "Missing original line"
errorLen = i
break
}
originalLine += originalLineDelta
if originalLine < 0 {
errorText = fmt.Sprintf("Invalid original line value: %d", originalLine)
errorLen = i
break
}
current += i
// Read the original column
originalColumnDelta, i, ok := sourcemap.DecodeVLQUTF16(mappingsRaw[current:])
if !ok {
errorText = "Missing original column"
errorLen = i
break
}
originalColumn += originalColumnDelta
if originalColumn < 0 {
errorText = fmt.Sprintf("Invalid original column value: %d", originalColumn)
errorLen = i
break
}
current += i
// Read the original name
var optionalName ast.Index32
if originalNameDelta, i, ok := sourcemap.DecodeVLQUTF16(mappingsRaw[current:]); ok {
originalName += originalNameDelta
if originalName < nameOffset || originalName >= nameOffset+int32(namesLen) {
errorText = fmt.Sprintf("Invalid name index value: %d", originalName)
errorLen = i
break
}
optionalName = ast.MakeIndex32(uint32(originalName))
current += i
}
// Handle the next character
if current < mappingsLen {
if c := mappingsRaw[current]; c == ',' {
current++
} else if c != ';' {
errorText = fmt.Sprintf("Invalid character after mapping: %q",
helpers.UTF16ToString(mappingsRaw[current:current+1]))
errorLen = 1
break
}
}
mappings = append(mappings, sourcemap.Mapping{
GeneratedLine: generatedLine,
GeneratedColumn: generatedColumn,
SourceIndex: sourceIndex,
OriginalLine: originalLine,
OriginalColumn: originalColumn,
OriginalName: optionalName,
})
}
if errorText != "" {
r := logger.Range{Loc: logger.Loc{Start: mappingsStart + int32(current)}, Len: int32(errorLen)}
log.AddID(logger.MsgID_SourceMap_InvalidSourceMappings, logger.Warning, &tracker, r,
fmt.Sprintf("Bad \"mappings\" data in source map at character %d: %s", current, errorText))
return nil
}
// Try resolving relative source URLs into absolute source URLs.
// See https://tc39.es/ecma426/#resolving-sources for details.
var sourceURLPrefix string
var baseURL *url.URL
if sourceRoot != "" {
if index := strings.LastIndexByte(sourceRoot, '/'); index != -1 {
sourceURLPrefix = sourceRoot[:index+1]
} else {
sourceURLPrefix = sourceRoot + "/"
}
}
if source.KeyPath.Namespace == "file" {
baseURL = helpers.FileURLFromFilePath(source.KeyPath.Text)
}
for _, item := range sourcesArray {
if element, ok := item.Data.(*js_ast.EString); ok {
sourcePath := sourceURLPrefix + helpers.UTF16ToString(element.Value)
sourceURL, err := url.Parse(sourcePath)
// Ignore URL parse errors (such as "%XY" being an invalid escape)
if err != nil {
sources = append(sources, sourcePath)
continue
}
// Resolve this URL relative to the enclosing directory
if baseURL != nil {
sourceURL = baseURL.ResolveReference(sourceURL)
}
sources = append(sources, sourceURL.String())
} else {
sources = append(sources, "")
}
}
if len(sourcesContentArray) > 0 {
// It's possible that one of the source maps inside "sections" has
// different lengths for the "sources" and "sourcesContent" arrays.
// This is bad because we need to us a single index to get the name
// of the source from "sources[i]" and the content of the source
// from "sourcesContent[i]".
//
// So if a previous source map had a shorter "sourcesContent" array
// than its "sources" array (or if the previous source map just had
// no "sourcesContent" array), expand our aggregated array to the
// right length by padding it out with empty entries.
sourcesContent = append(sourcesContent, make([]sourcemap.SourceContent, int(sourceOffset)-len(sourcesContent))...)
for i, item := range sourcesContentArray {
// Make sure we don't ever record more "sourcesContent" entries
// than there are "sources" entries, which is possible because
// these are two separate arrays in the source map JSON. We need
// to avoid this because that would mess up our shared indexing
// of the "sources" and "sourcesContent" arrays. See the above
// comment for more details.
if i == sourcesLen {
break
}
if element, ok := item.Data.(*js_ast.EString); ok {
sourcesContent = append(sourcesContent, sourcemap.SourceContent{
Value: element.Value,
Quoted: source.TextForRange(source.RangeOfString(item.Loc)),
})
} else {
sourcesContent = append(sourcesContent, sourcemap.SourceContent{})
}
}
}
for _, item := range namesArray {
if element, ok := item.Data.(*js_ast.EString); ok {
names = append(names, helpers.UTF16ToString(element.Value))
} else {
names = append(names, "")
}
}
}
// Silently fail if the source map is pointless (i.e. empty)
if len(sources) == 0 || len(mappings) == 0 {
return nil
}
if needSort {
// If we get here, some mappings are out of order. Lines can't be out of
// order by construction but columns can. This is a pretty rare situation
// because almost all source map generators always write out mappings in
// order as they write the output instead of scrambling the order.
sort.Stable(mappings)
}
return &sourcemap.SourceMap{
Sources: sources,
SourcesContent: sourcesContent,
Mappings: mappings,
Names: names,
}
}
// This type is just so we can use Go's native sort function
type mappingArray []sourcemap.Mapping
func (a mappingArray) Len() int { return len(a) }
func (a mappingArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
func (a mappingArray) Less(i int, j int) bool {
ai := a[i]
aj := a[j]
return ai.GeneratedLine < aj.GeneratedLine || (ai.GeneratedLine == aj.GeneratedLine && ai.GeneratedColumn <= aj.GeneratedColumn)
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_parser/js_parser_lower_class.go | internal/js_parser/js_parser_lower_class.go | package js_parser
import (
"fmt"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/logger"
)
func (p *parser) privateSymbolNeedsToBeLowered(private *js_ast.EPrivateIdentifier) bool {
symbol := &p.symbols[private.Ref.InnerIndex]
return p.options.unsupportedJSFeatures.Has(compat.SymbolFeature(symbol.Kind)) || symbol.Flags.Has(ast.PrivateSymbolMustBeLowered)
}
func (p *parser) lowerPrivateBrandCheck(target js_ast.Expr, loc logger.Loc, private *js_ast.EPrivateIdentifier) js_ast.Expr {
// "#field in this" => "__privateIn(#field, this)"
return p.callRuntime(loc, "__privateIn", []js_ast.Expr{
{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
target,
})
}
func (p *parser) lowerPrivateGet(target js_ast.Expr, loc logger.Loc, private *js_ast.EPrivateIdentifier) js_ast.Expr {
switch p.symbols[private.Ref.InnerIndex].Kind {
case ast.SymbolPrivateMethod, ast.SymbolPrivateStaticMethod:
// "this.#method" => "__privateMethod(this, #method, method_fn)"
fnRef := p.privateGetters[private.Ref]
p.recordUsage(fnRef)
return p.callRuntime(target.Loc, "__privateMethod", []js_ast.Expr{
target,
{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
{Loc: loc, Data: &js_ast.EIdentifier{Ref: fnRef}},
})
case ast.SymbolPrivateGet, ast.SymbolPrivateStaticGet,
ast.SymbolPrivateGetSetPair, ast.SymbolPrivateStaticGetSetPair:
// "this.#getter" => "__privateGet(this, #getter, getter_get)"
fnRef := p.privateGetters[private.Ref]
p.recordUsage(fnRef)
return p.callRuntime(target.Loc, "__privateGet", []js_ast.Expr{
target,
{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
{Loc: loc, Data: &js_ast.EIdentifier{Ref: fnRef}},
})
default:
// "this.#field" => "__privateGet(this, #field)"
return p.callRuntime(target.Loc, "__privateGet", []js_ast.Expr{
target,
{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
})
}
}
func (p *parser) lowerPrivateSet(
target js_ast.Expr,
loc logger.Loc,
private *js_ast.EPrivateIdentifier,
value js_ast.Expr,
) js_ast.Expr {
switch p.symbols[private.Ref.InnerIndex].Kind {
case ast.SymbolPrivateSet, ast.SymbolPrivateStaticSet,
ast.SymbolPrivateGetSetPair, ast.SymbolPrivateStaticGetSetPair:
// "this.#setter = 123" => "__privateSet(this, #setter, 123, setter_set)"
fnRef := p.privateSetters[private.Ref]
p.recordUsage(fnRef)
return p.callRuntime(target.Loc, "__privateSet", []js_ast.Expr{
target,
{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
value,
{Loc: loc, Data: &js_ast.EIdentifier{Ref: fnRef}},
})
default:
// "this.#field = 123" => "__privateSet(this, #field, 123)"
return p.callRuntime(target.Loc, "__privateSet", []js_ast.Expr{
target,
{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
value,
})
}
}
func (p *parser) lowerPrivateSetUnOp(target js_ast.Expr, loc logger.Loc, private *js_ast.EPrivateIdentifier, op js_ast.OpCode) js_ast.Expr {
kind := p.symbols[private.Ref.InnerIndex].Kind
// Determine the setter, if any
var setter js_ast.Expr
switch kind {
case ast.SymbolPrivateSet, ast.SymbolPrivateStaticSet,
ast.SymbolPrivateGetSetPair, ast.SymbolPrivateStaticGetSetPair:
ref := p.privateSetters[private.Ref]
p.recordUsage(ref)
setter = js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
}
// Determine the getter, if any
var getter js_ast.Expr
switch kind {
case ast.SymbolPrivateGet, ast.SymbolPrivateStaticGet,
ast.SymbolPrivateGetSetPair, ast.SymbolPrivateStaticGetSetPair:
ref := p.privateGetters[private.Ref]
p.recordUsage(ref)
getter = js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
}
// Only include necessary arguments
args := []js_ast.Expr{
target,
{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
}
if setter.Data != nil {
args = append(args, setter)
}
if getter.Data != nil {
if setter.Data == nil {
args = append(args, js_ast.Expr{Loc: loc, Data: js_ast.ENullShared})
}
args = append(args, getter)
}
// "target.#private++" => "__privateWrapper(target, #private, private_set, private_get)._++"
return js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{
Op: op,
Value: js_ast.Expr{Loc: target.Loc, Data: &js_ast.EDot{
Target: p.callRuntime(target.Loc, "__privateWrapper", args),
NameLoc: target.Loc,
Name: "_",
}},
}}
}
func (p *parser) lowerPrivateSetBinOp(target js_ast.Expr, loc logger.Loc, private *js_ast.EPrivateIdentifier, op js_ast.OpCode, value js_ast.Expr) js_ast.Expr {
// "target.#private += 123" => "__privateSet(target, #private, __privateGet(target, #private) + 123)"
targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(target.Loc, 2, target, valueDefinitelyNotMutated)
return targetWrapFunc(p.lowerPrivateSet(targetFunc(), loc, private, js_ast.Expr{Loc: value.Loc, Data: &js_ast.EBinary{
Op: op,
Left: p.lowerPrivateGet(targetFunc(), loc, private),
Right: value,
}}))
}
// Returns valid data if target is an expression of the form "foo.#bar" and if
// the language target is such that private members must be lowered
func (p *parser) extractPrivateIndex(target js_ast.Expr) (js_ast.Expr, logger.Loc, *js_ast.EPrivateIdentifier) {
if index, ok := target.Data.(*js_ast.EIndex); ok {
if private, ok := index.Index.Data.(*js_ast.EPrivateIdentifier); ok && p.privateSymbolNeedsToBeLowered(private) {
return index.Target, index.Index.Loc, private
}
}
return js_ast.Expr{}, logger.Loc{}, nil
}
// Returns a valid property if target is an expression of the form "super.bar"
// or "super[bar]" and if the situation is such that it must be lowered
func (p *parser) extractSuperProperty(target js_ast.Expr) js_ast.Expr {
switch e := target.Data.(type) {
case *js_ast.EDot:
if p.shouldLowerSuperPropertyAccess(e.Target) {
return js_ast.Expr{Loc: e.NameLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16(e.Name)}}
}
case *js_ast.EIndex:
if p.shouldLowerSuperPropertyAccess(e.Target) {
return e.Index
}
}
return js_ast.Expr{}
}
func (p *parser) lowerSuperPropertyOrPrivateInAssign(expr js_ast.Expr) (js_ast.Expr, bool) {
didLower := false
switch e := expr.Data.(type) {
case *js_ast.ESpread:
if value, ok := p.lowerSuperPropertyOrPrivateInAssign(e.Value); ok {
e.Value = value
didLower = true
}
case *js_ast.EDot:
// "[super.foo] = [bar]" => "[__superWrapper(this, 'foo')._] = [bar]"
if p.shouldLowerSuperPropertyAccess(e.Target) {
key := js_ast.Expr{Loc: e.NameLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16(e.Name)}}
expr = p.callSuperPropertyWrapper(expr.Loc, key)
didLower = true
}
case *js_ast.EIndex:
// "[super[foo]] = [bar]" => "[__superWrapper(this, foo)._] = [bar]"
if p.shouldLowerSuperPropertyAccess(e.Target) {
expr = p.callSuperPropertyWrapper(expr.Loc, e.Index)
didLower = true
break
}
// "[a.#b] = [c]" => "[__privateWrapper(a, #b)._] = [c]"
if private, ok := e.Index.Data.(*js_ast.EPrivateIdentifier); ok && p.privateSymbolNeedsToBeLowered(private) {
var target js_ast.Expr
switch p.symbols[private.Ref.InnerIndex].Kind {
case ast.SymbolPrivateSet, ast.SymbolPrivateStaticSet,
ast.SymbolPrivateGetSetPair, ast.SymbolPrivateStaticGetSetPair:
// "this.#setter" => "__privateWrapper(this, #setter, setter_set)"
fnRef := p.privateSetters[private.Ref]
p.recordUsage(fnRef)
target = p.callRuntime(expr.Loc, "__privateWrapper", []js_ast.Expr{
e.Target,
{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: fnRef}},
})
default:
// "this.#field" => "__privateWrapper(this, #field)"
target = p.callRuntime(expr.Loc, "__privateWrapper", []js_ast.Expr{
e.Target,
{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
})
}
// "__privateWrapper(this, #field)" => "__privateWrapper(this, #field)._"
expr.Data = &js_ast.EDot{Target: target, Name: "_", NameLoc: expr.Loc}
didLower = true
}
case *js_ast.EArray:
for i, item := range e.Items {
if item, ok := p.lowerSuperPropertyOrPrivateInAssign(item); ok {
e.Items[i] = item
didLower = true
}
}
case *js_ast.EObject:
for i, property := range e.Properties {
if property.ValueOrNil.Data != nil {
if value, ok := p.lowerSuperPropertyOrPrivateInAssign(property.ValueOrNil); ok {
e.Properties[i].ValueOrNil = value
didLower = true
}
}
}
}
return expr, didLower
}
func (p *parser) shouldLowerSuperPropertyAccess(expr js_ast.Expr) bool {
if p.fnOrArrowDataVisit.shouldLowerSuperPropertyAccess {
_, isSuper := expr.Data.(*js_ast.ESuper)
return isSuper
}
return false
}
func (p *parser) callSuperPropertyWrapper(loc logger.Loc, key js_ast.Expr) js_ast.Expr {
ref := *p.fnOnlyDataVisit.innerClassNameRef
p.recordUsage(ref)
class := js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
this := js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
// Handle "this" in lowered static class field initializers
if p.fnOnlyDataVisit.shouldReplaceThisWithInnerClassNameRef {
p.recordUsage(ref)
this.Data = &js_ast.EIdentifier{Ref: ref}
}
if !p.fnOnlyDataVisit.isInStaticClassContext {
// "super.foo" => "__superWrapper(Class.prototype, this, 'foo')._"
// "super[foo]" => "__superWrapper(Class.prototype, this, foo)._"
class.Data = &js_ast.EDot{Target: class, NameLoc: loc, Name: "prototype"}
}
return js_ast.Expr{Loc: loc, Data: &js_ast.EDot{Target: p.callRuntime(loc, "__superWrapper", []js_ast.Expr{
class,
this,
key,
}), Name: "_", NameLoc: loc}}
}
func (p *parser) lowerSuperPropertyGet(loc logger.Loc, key js_ast.Expr) js_ast.Expr {
ref := *p.fnOnlyDataVisit.innerClassNameRef
p.recordUsage(ref)
class := js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
this := js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
// Handle "this" in lowered static class field initializers
if p.fnOnlyDataVisit.shouldReplaceThisWithInnerClassNameRef {
p.recordUsage(ref)
this.Data = &js_ast.EIdentifier{Ref: ref}
}
if !p.fnOnlyDataVisit.isInStaticClassContext {
// "super.foo" => "__superGet(Class.prototype, this, 'foo')"
// "super[foo]" => "__superGet(Class.prototype, this, foo)"
class.Data = &js_ast.EDot{Target: class, NameLoc: loc, Name: "prototype"}
}
return p.callRuntime(loc, "__superGet", []js_ast.Expr{
class,
this,
key,
})
}
func (p *parser) lowerSuperPropertySet(loc logger.Loc, key js_ast.Expr, value js_ast.Expr) js_ast.Expr {
// "super.foo = bar" => "__superSet(Class, this, 'foo', bar)"
// "super[foo] = bar" => "__superSet(Class, this, foo, bar)"
ref := *p.fnOnlyDataVisit.innerClassNameRef
p.recordUsage(ref)
class := js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
this := js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
// Handle "this" in lowered static class field initializers
if p.fnOnlyDataVisit.shouldReplaceThisWithInnerClassNameRef {
p.recordUsage(ref)
this.Data = &js_ast.EIdentifier{Ref: ref}
}
if !p.fnOnlyDataVisit.isInStaticClassContext {
// "super.foo = bar" => "__superSet(Class.prototype, this, 'foo', bar)"
// "super[foo] = bar" => "__superSet(Class.prototype, this, foo, bar)"
class.Data = &js_ast.EDot{Target: class, NameLoc: loc, Name: "prototype"}
}
return p.callRuntime(loc, "__superSet", []js_ast.Expr{
class,
this,
key,
value,
})
}
func (p *parser) lowerSuperPropertySetBinOp(loc logger.Loc, property js_ast.Expr, op js_ast.OpCode, value js_ast.Expr) js_ast.Expr {
// "super.foo += bar" => "__superSet(Class, this, 'foo', __superGet(Class, this, 'foo') + bar)"
// "super[foo] += bar" => "__superSet(Class, this, foo, __superGet(Class, this, foo) + bar)"
// "super[foo()] += bar" => "__superSet(Class, this, _a = foo(), __superGet(Class, this, _a) + bar)"
targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(property.Loc, 2, property, valueDefinitelyNotMutated)
return targetWrapFunc(p.lowerSuperPropertySet(loc, targetFunc(), js_ast.Expr{Loc: value.Loc, Data: &js_ast.EBinary{
Op: op,
Left: p.lowerSuperPropertyGet(loc, targetFunc()),
Right: value,
}}))
}
func (p *parser) maybeLowerSuperPropertyGetInsideCall(call *js_ast.ECall) {
var key js_ast.Expr
switch e := call.Target.Data.(type) {
case *js_ast.EDot:
// Lower "super.prop" if necessary
if !p.shouldLowerSuperPropertyAccess(e.Target) {
return
}
key = js_ast.Expr{Loc: e.NameLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16(e.Name)}}
case *js_ast.EIndex:
// Lower "super[prop]" if necessary
if !p.shouldLowerSuperPropertyAccess(e.Target) {
return
}
key = e.Index
default:
return
}
// "super.foo(a, b)" => "__superGet(Class, this, 'foo').call(this, a, b)"
call.Target.Data = &js_ast.EDot{
Target: p.lowerSuperPropertyGet(call.Target.Loc, key),
NameLoc: key.Loc,
Name: "call",
}
thisExpr := js_ast.Expr{Loc: call.Target.Loc, Data: js_ast.EThisShared}
call.Args = append([]js_ast.Expr{thisExpr}, call.Args...)
}
type classLoweringInfo struct {
lowerAllInstanceFields bool
lowerAllStaticFields bool
shimSuperCtorCalls bool
}
func (p *parser) computeClassLoweringInfo(class *js_ast.Class) (result classLoweringInfo) {
// Name keeping for classes is implemented with a static block. So we need to
// lower all static fields if static blocks are unsupported so that the name
// keeping comes first before other static initializers.
if p.options.keepNames && p.options.unsupportedJSFeatures.Has(compat.ClassStaticBlocks) {
result.lowerAllStaticFields = true
}
// TypeScript's "experimentalDecorators" feature replaces all references of
// the class name with the decorated class after class decorators have run.
// This cannot be done by only reassigning to the class symbol in JavaScript
// because it's shadowed by the class name within the class body. Instead,
// we need to hoist all code in static contexts out of the class body so
// that it's no longer shadowed:
//
// const decorate = x => ({ x })
// @decorate
// class Foo {
// static oldFoo = Foo
// static newFoo = () => Foo
// }
// console.log('This must be false:', Foo.x.oldFoo === Foo.x.newFoo())
//
if p.options.ts.Parse && p.options.ts.Config.ExperimentalDecorators == config.True && len(class.Decorators) > 0 {
result.lowerAllStaticFields = true
}
// If something has decorators, just lower everything for now. It's possible
// that we could avoid lowering in certain cases, but doing so is very tricky
// due to the complexity of the decorator specification. The specification is
// also still evolving so trying to optimize it now is also potentially
// premature.
if class.ShouldLowerStandardDecorators {
for _, prop := range class.Properties {
if len(prop.Decorators) > 0 {
for _, prop := range class.Properties {
if private, ok := prop.Key.Data.(*js_ast.EPrivateIdentifier); ok {
p.symbols[private.Ref.InnerIndex].Flags |= ast.PrivateSymbolMustBeLowered
}
}
result.lowerAllStaticFields = true
result.lowerAllInstanceFields = true
break
}
}
}
// Conservatively lower fields of a given type (instance or static) when any
// member of that type needs to be lowered. This must be done to preserve
// evaluation order. For example:
//
// class Foo {
// #foo = 123
// bar = this.#foo
// }
//
// It would be bad if we transformed that into something like this:
//
// var _foo;
// class Foo {
// constructor() {
// _foo.set(this, 123);
// }
// bar = __privateGet(this, _foo);
// }
// _foo = new WeakMap();
//
// That evaluates "bar" then "foo" instead of "foo" then "bar" like the
// original code. We need to do this instead:
//
// var _foo;
// class Foo {
// constructor() {
// _foo.set(this, 123);
// __publicField(this, "bar", __privateGet(this, _foo));
// }
// }
// _foo = new WeakMap();
//
for _, prop := range class.Properties {
if prop.Kind == js_ast.PropertyClassStaticBlock {
if p.options.unsupportedJSFeatures.Has(compat.ClassStaticBlocks) {
result.lowerAllStaticFields = true
}
continue
}
if private, ok := prop.Key.Data.(*js_ast.EPrivateIdentifier); ok {
if prop.Flags.Has(js_ast.PropertyIsStatic) {
if p.privateSymbolNeedsToBeLowered(private) {
result.lowerAllStaticFields = true
}
} else {
if p.privateSymbolNeedsToBeLowered(private) {
result.lowerAllInstanceFields = true
// We can't transform this:
//
// class Foo {
// #foo = 123
// static bar = new Foo().#foo
// }
//
// into this:
//
// var _foo;
// const _Foo = class {
// constructor() {
// _foo.set(this, 123);
// }
// static bar = __privateGet(new _Foo(), _foo);
// };
// let Foo = _Foo;
// _foo = new WeakMap();
//
// because "_Foo" won't be initialized in the initializer for "bar".
// So we currently lower all static fields in this case too. This
// isn't great and it would be good to find a way to avoid this.
// The inner class name symbol substitution mechanism should probably
// be rethought.
result.lowerAllStaticFields = true
}
}
continue
}
if prop.Kind == js_ast.PropertyAutoAccessor {
if prop.Flags.Has(js_ast.PropertyIsStatic) {
if p.options.unsupportedJSFeatures.Has(compat.ClassPrivateStaticField) {
result.lowerAllStaticFields = true
}
} else {
if p.options.unsupportedJSFeatures.Has(compat.ClassPrivateField) {
result.lowerAllInstanceFields = true
result.lowerAllStaticFields = true
}
}
continue
}
// This doesn't come before the private member check above because
// unsupported private methods must also trigger field lowering:
//
// class Foo {
// bar = this.#foo()
// #foo() {}
// }
//
// It would be bad if we transformed that to something like this:
//
// var _foo, foo_fn;
// class Foo {
// constructor() {
// _foo.add(this);
// }
// bar = __privateMethod(this, _foo, foo_fn).call(this);
// }
// _foo = new WeakSet();
// foo_fn = function() {
// };
//
// In that case the initializer of "bar" would fail to call "#foo" because
// it's only added to the instance in the body of the constructor.
if prop.Kind.IsMethodDefinition() {
// We need to shim "super()" inside the constructor if this is a derived
// class and the constructor has any parameter properties, since those
// use "this" and we can only access "this" after "super()" is called
if class.ExtendsOrNil.Data != nil {
if key, ok := prop.Key.Data.(*js_ast.EString); ok && helpers.UTF16EqualsString(key.Value, "constructor") {
if fn, ok := prop.ValueOrNil.Data.(*js_ast.EFunction); ok {
for _, arg := range fn.Fn.Args {
if arg.IsTypeScriptCtorField {
result.shimSuperCtorCalls = true
break
}
}
}
}
}
continue
}
if prop.Flags.Has(js_ast.PropertyIsStatic) {
// Static fields must be lowered if the target doesn't support them
if p.options.unsupportedJSFeatures.Has(compat.ClassStaticField) {
result.lowerAllStaticFields = true
}
// Convert static fields to assignment statements if the TypeScript
// setting for this is enabled. I don't think this matters for private
// fields because there's no way for this to call a setter in the base
// class, so this isn't done for private fields.
//
// If class static blocks are supported, then we can do this inline
// without needing to move the initializers outside of the class body.
// Otherwise, we need to lower all static class fields.
if p.options.ts.Parse && !class.UseDefineForClassFields && p.options.unsupportedJSFeatures.Has(compat.ClassStaticBlocks) {
result.lowerAllStaticFields = true
}
} else {
if p.options.ts.Parse && !class.UseDefineForClassFields {
// Convert instance fields to assignment statements if the TypeScript
// setting for this is enabled. I don't think this matters for private
// fields because there's no way for this to call a setter in the base
// class, so this isn't done for private fields.
if prop.InitializerOrNil.Data != nil {
// We can skip lowering all instance fields if all instance fields
// disappear completely when lowered. This happens when
// "useDefineForClassFields" is false and there is no initializer.
result.lowerAllInstanceFields = true
}
} else if p.options.unsupportedJSFeatures.Has(compat.ClassField) {
// Instance fields must be lowered if the target doesn't support them
result.lowerAllInstanceFields = true
}
}
}
// We need to shim "super()" inside the constructor if this is a derived
// class and there are any instance fields that need to be lowered, since
// those use "this" and we can only access "this" after "super()" is called
if result.lowerAllInstanceFields && class.ExtendsOrNil.Data != nil {
result.shimSuperCtorCalls = true
}
return
}
type classKind uint8
const (
classKindExpr classKind = iota
classKindStmt
classKindExportStmt
classKindExportDefaultStmt
)
type lowerClassContext struct {
nameToKeep string
kind classKind
class *js_ast.Class
classLoc logger.Loc
classExpr js_ast.Expr // Only for "kind == classKindExpr", may be replaced by "nameFunc()"
defaultName ast.LocRef
ctor *js_ast.EFunction
extendsRef ast.Ref
parameterFields []js_ast.Stmt
instanceMembers []js_ast.Stmt
instancePrivateMethods []js_ast.Stmt
autoAccessorCount int
// These expressions are generated after the class body, in this order
computedPropertyChain js_ast.Expr
privateMembers []js_ast.Expr
staticMembers []js_ast.Expr
staticPrivateMethods []js_ast.Expr
// These contain calls to "__decorateClass" for TypeScript experimental decorators
instanceExperimentalDecorators []js_ast.Expr
staticExperimentalDecorators []js_ast.Expr
// These are used for implementing JavaScript decorators
decoratorContextRef ast.Ref
decoratorClassDecorators js_ast.Expr
decoratorPropertyToInitializerMap map[int]int
decoratorCallInstanceMethodExtraInitializers bool
decoratorCallStaticMethodExtraInitializers bool
decoratorStaticNonFieldElements []js_ast.Expr
decoratorInstanceNonFieldElements []js_ast.Expr
decoratorStaticFieldElements []js_ast.Expr
decoratorInstanceFieldElements []js_ast.Expr
// These are used by "lowerMethod"
privateInstanceMethodRef ast.Ref
privateStaticMethodRef ast.Ref
// These are only for class expressions that need to be captured
nameFunc func() js_ast.Expr
wrapFunc func(js_ast.Expr) js_ast.Expr
didCaptureClassExpr bool
}
// Apply all relevant transforms to a class object (either a statement or an
// expression) including:
//
// - Transforming class fields for older environments
// - Transforming static blocks for older environments
// - Transforming TypeScript experimental decorators into JavaScript
// - Transforming TypeScript class fields into assignments for "useDefineForClassFields"
//
// Note that this doesn't transform any nested AST subtrees inside the class
// body (e.g. the contents of initializers, methods, and static blocks). Those
// have already been transformed by "visitClass" by this point. It's done that
// way for performance so that we don't need to do another AST pass.
func (p *parser) lowerClass(stmt js_ast.Stmt, expr js_ast.Expr, result visitClassResult, nameToKeep string) ([]js_ast.Stmt, js_ast.Expr) {
ctx := lowerClassContext{
nameToKeep: nameToKeep,
extendsRef: ast.InvalidRef,
decoratorContextRef: ast.InvalidRef,
privateInstanceMethodRef: ast.InvalidRef,
privateStaticMethodRef: ast.InvalidRef,
}
// Unpack the class from the statement or expression
if stmt.Data == nil {
e, _ := expr.Data.(*js_ast.EClass)
ctx.class = &e.Class
ctx.classExpr = expr
ctx.kind = classKindExpr
if ctx.class.Name != nil {
symbol := &p.symbols[ctx.class.Name.Ref.InnerIndex]
ctx.nameToKeep = symbol.OriginalName
// The inner class name inside the class expression should be the same as
// the class expression name itself
if result.innerClassNameRef != ast.InvalidRef {
p.mergeSymbols(result.innerClassNameRef, ctx.class.Name.Ref)
}
// Remove unused class names when minifying. Check this after we merge in
// the inner class name above since that will adjust the use count.
if p.options.minifySyntax && symbol.UseCountEstimate == 0 {
ctx.class.Name = nil
}
}
} else if s, ok := stmt.Data.(*js_ast.SClass); ok {
ctx.class = &s.Class
if ctx.class.Name != nil {
ctx.nameToKeep = p.symbols[ctx.class.Name.Ref.InnerIndex].OriginalName
}
if s.IsExport {
ctx.kind = classKindExportStmt
} else {
ctx.kind = classKindStmt
}
} else {
s, _ := stmt.Data.(*js_ast.SExportDefault)
s2, _ := s.Value.Data.(*js_ast.SClass)
ctx.class = &s2.Class
if ctx.class.Name != nil {
ctx.nameToKeep = p.symbols[ctx.class.Name.Ref.InnerIndex].OriginalName
}
ctx.defaultName = s.DefaultName
ctx.kind = classKindExportDefaultStmt
}
if stmt.Data == nil {
ctx.classLoc = expr.Loc
} else {
ctx.classLoc = stmt.Loc
}
classLoweringInfo := p.computeClassLoweringInfo(ctx.class)
ctx.enableNameCapture(p, result)
ctx.processProperties(p, classLoweringInfo, result)
ctx.insertInitializersIntoConstructor(p, classLoweringInfo, result)
return ctx.finishAndGenerateCode(p, result)
}
func (ctx *lowerClassContext) enableNameCapture(p *parser, result visitClassResult) {
// Class statements can be missing a name if they are in an
// "export default" statement:
//
// export default class {
// static foo = 123
// }
//
ctx.nameFunc = func() js_ast.Expr {
if ctx.kind == classKindExpr {
// If this is a class expression, capture and store it. We have to
// do this even if it has a name since the name isn't exposed
// outside the class body.
classExpr := &js_ast.EClass{Class: *ctx.class}
ctx.class = &classExpr.Class
ctx.nameFunc, ctx.wrapFunc = p.captureValueWithPossibleSideEffects(ctx.classLoc, 2, js_ast.Expr{Loc: ctx.classLoc, Data: classExpr}, valueDefinitelyNotMutated)
ctx.classExpr = ctx.nameFunc()
ctx.didCaptureClassExpr = true
name := ctx.nameFunc()
// If we're storing the class expression in a variable, remove the class
// name and rewrite all references to the class name with references to
// the temporary variable holding the class expression. This ensures that
// references to the class expression by name in any expressions that end
// up being pulled outside of the class body still work. For example:
//
// let Bar = class Foo {
// static foo = 123
// static bar = Foo.foo
// }
//
// This might be converted into the following:
//
// var _a;
// let Bar = (_a = class {
// }, _a.foo = 123, _a.bar = _a.foo, _a);
//
if ctx.class.Name != nil {
p.mergeSymbols(ctx.class.Name.Ref, name.Data.(*js_ast.EIdentifier).Ref)
ctx.class.Name = nil
}
return name
} else {
// If anything referenced the inner class name, then we should use that
// name for any automatically-generated initialization code, since it
// will come before the outer class name is initialized.
if result.innerClassNameRef != ast.InvalidRef {
p.recordUsage(result.innerClassNameRef)
return js_ast.Expr{Loc: ctx.class.Name.Loc, Data: &js_ast.EIdentifier{Ref: result.innerClassNameRef}}
}
// Otherwise we should just use the outer class name
if ctx.class.Name == nil {
if ctx.kind == classKindExportDefaultStmt {
ctx.class.Name = &ctx.defaultName
} else {
ctx.class.Name = &ast.LocRef{Loc: ctx.classLoc, Ref: p.generateTempRef(tempRefNoDeclare, "")}
}
}
p.recordUsage(ctx.class.Name.Ref)
return js_ast.Expr{Loc: ctx.class.Name.Loc, Data: &js_ast.EIdentifier{Ref: ctx.class.Name.Ref}}
}
}
}
// Handle lowering of instance and static fields. Move their initializers
// from the class body to either the constructor (instance fields) or after
// the class (static fields).
//
// If this returns true, the return property should be added to the class
// body. Otherwise the property should be omitted from the class body.
func (ctx *lowerClassContext) lowerField(
p *parser,
prop js_ast.Property,
private *js_ast.EPrivateIdentifier,
shouldOmitFieldInitializer bool,
staticFieldToBlockAssign bool,
initializerIndex int,
) (js_ast.Property, ast.Ref, bool) {
mustLowerPrivate := private != nil && p.privateSymbolNeedsToBeLowered(private)
ref := ast.InvalidRef
// The TypeScript compiler doesn't follow the JavaScript spec for
// uninitialized fields. They are supposed to be set to undefined but the
// TypeScript compiler just omits them entirely.
if !shouldOmitFieldInitializer {
loc := prop.Loc
// Determine where to store the field
var target js_ast.Expr
if prop.Flags.Has(js_ast.PropertyIsStatic) && !staticFieldToBlockAssign {
target = ctx.nameFunc()
} else {
target = js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
}
// Generate the assignment initializer
var init js_ast.Expr
if prop.InitializerOrNil.Data != nil {
init = prop.InitializerOrNil
} else {
init = js_ast.Expr{Loc: loc, Data: js_ast.EUndefinedShared}
}
// Optionally call registered decorator initializers
if initializerIndex != -1 {
var value js_ast.Expr
if prop.Flags.Has(js_ast.PropertyIsStatic) {
value = ctx.nameFunc()
} else {
value = js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
}
args := []js_ast.Expr{
{Loc: loc, Data: &js_ast.EIdentifier{Ref: ctx.decoratorContextRef}},
{Loc: loc, Data: &js_ast.ENumber{Value: float64((4 + 2*initializerIndex) << 1)}},
value,
}
if _, ok := init.Data.(*js_ast.EUndefined); !ok {
args = append(args, init)
}
init = p.callRuntime(init.Loc, "__runInitializers", args)
p.recordUsage(ctx.decoratorContextRef)
}
// Generate the assignment target
var memberExpr js_ast.Expr
if mustLowerPrivate {
// Generate a new symbol for this private field
ref = p.generateTempRef(tempRefNeedsDeclare, "_"+p.symbols[private.Ref.InnerIndex].OriginalName[1:])
p.symbols[private.Ref.InnerIndex].Link = ref
// Initialize the private field to a new WeakMap
if p.weakMapRef == ast.InvalidRef {
p.weakMapRef = p.newSymbol(ast.SymbolUnbound, "WeakMap")
p.moduleScope.Generated = append(p.moduleScope.Generated, p.weakMapRef)
}
ctx.privateMembers = append(ctx.privateMembers, js_ast.Assign(
js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.EIdentifier{Ref: ref}},
js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.ENew{Target: js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.EIdentifier{Ref: p.weakMapRef}}}},
))
p.recordUsage(ref)
// Add every newly-constructed instance into this map
key := js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.EIdentifier{Ref: ref}}
args := []js_ast.Expr{target, key}
if _, ok := init.Data.(*js_ast.EUndefined); !ok {
args = append(args, init)
}
memberExpr = p.callRuntime(loc, "__privateAdd", args)
p.recordUsage(ref)
} else if private == nil && ctx.class.UseDefineForClassFields {
if p.shouldAddKeyComment {
if str, ok := prop.Key.Data.(*js_ast.EString); ok {
str.HasPropertyKeyComment = true
}
}
args := []js_ast.Expr{target, prop.Key}
if _, ok := init.Data.(*js_ast.EUndefined); !ok {
args = append(args, init)
}
memberExpr = js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
Target: p.importFromRuntime(loc, "__publicField"),
Args: args,
}}
} else {
if key, ok := prop.Key.Data.(*js_ast.EString); ok && !prop.Flags.Has(js_ast.PropertyIsComputed) && !prop.Flags.Has(js_ast.PropertyPreferQuotedKey) {
target = js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
Target: target,
Name: helpers.UTF16ToString(key.Value),
NameLoc: prop.Key.Loc,
}}
} else {
target = js_ast.Expr{Loc: loc, Data: &js_ast.EIndex{
Target: target,
Index: prop.Key,
}}
}
memberExpr = js_ast.Assign(target, init)
}
// Run extra initializers
if initializerIndex != -1 {
var value js_ast.Expr
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_parser/js_parser_lower_test.go | internal/js_parser/js_parser_lower_test.go | package js_parser
import (
"fmt"
"testing"
"github.com/evanw/esbuild/internal/compat"
)
func TestLowerFunctionArgumentScope(t *testing.T) {
templates := []string{
"(x = %s) => {\n};\n",
"(function(x = %s) {\n});\n",
"function foo(x = %s) {\n}\n",
"({ [%s]: x }) => {\n};\n",
"(function({ [%s]: x }) {\n});\n",
"function foo({ [%s]: x }) {\n}\n",
"({ x = %s }) => {\n};\n",
"(function({ x = %s }) {\n});\n",
"function foo({ x = %s }) {\n}\n",
}
for _, template := range templates {
test := func(before string, after string) {
expectPrintedTarget(t, 2015, fmt.Sprintf(template, before), fmt.Sprintf(template, after))
}
test("a() ?? b", "((_a) => (_a = a()) != null ? _a : b)()")
test("a()?.b", "((_a) => (_a = a()) == null ? void 0 : _a.b)()")
test("a?.b?.()", "((_a) => (_a = a == null ? void 0 : a.b) == null ? void 0 : _a.call(a))()")
test("a.b.c?.()", "((_a) => ((_b) => (_b = (_a = a.b).c) == null ? void 0 : _b.call(_a))())()")
test("class { static a }", "((_a) => (_a = class {\n}, __publicField(_a, \"a\"), _a))()")
}
}
func TestLowerArrowFunction(t *testing.T) {
expectPrintedTarget(t, 5, "function foo(a) { arr.forEach(e => this.foo(e)) }",
"function foo(a) {\n var _this = this;\n arr.forEach(function(e) {\n return _this.foo(e);\n });\n}\n")
expectPrintedTarget(t, 5, "function foo(a) { return () => arguments[0] }",
"function foo(a) {\n var _arguments = arguments;\n return function() {\n return _arguments[0];\n };\n}\n")
expectPrintedTarget(t, 5, "function foo(a) { arr.forEach(function(e) { return this.foo(e) }) }",
"function foo(a) {\n arr.forEach(function(e) {\n return this.foo(e);\n });\n}\n")
expectPrintedTarget(t, 5, "function foo(a) { return function() { return arguments[0] } }",
"function foo(a) {\n return function() {\n return arguments[0];\n };\n}\n")
// Handling this case isn't implemented yet
expectPrintedTarget(t, 5, "var foo = () => this",
"var foo = function() {\n return this;\n};\n")
}
func TestLowerNullishCoalescing(t *testing.T) {
expectParseError(t, "a ?? b && c",
"<stdin>: ERROR: Cannot use \"&&\" with \"??\" without parentheses\n"+
"NOTE: Expressions of the form \"x ?? y && z\" are not allowed in JavaScript. "+
"You must disambiguate between \"(x ?? y) && z\" and \"x ?? (y && z)\" by adding parentheses.\n")
expectParseError(t, "a ?? b || c",
"<stdin>: ERROR: Cannot use \"||\" with \"??\" without parentheses\n"+
"NOTE: Expressions of the form \"x ?? y || z\" are not allowed in JavaScript. "+
"You must disambiguate between \"(x ?? y) || z\" and \"x ?? (y || z)\" by adding parentheses.\n")
expectParseError(t, "a ?? b && c || d",
"<stdin>: ERROR: Cannot use \"&&\" with \"??\" without parentheses\n"+
"NOTE: Expressions of the form \"x ?? y && z\" are not allowed in JavaScript. "+
"You must disambiguate between \"(x ?? y) && z\" and \"x ?? (y && z)\" by adding parentheses.\n"+
"<stdin>: ERROR: Cannot use \"||\" with \"??\" without parentheses\n"+
"NOTE: Expressions of the form \"x ?? y || z\" are not allowed in JavaScript. "+
"You must disambiguate between \"(x ?? y) || z\" and \"x ?? (y || z)\" by adding parentheses.\n")
expectParseError(t, "a ?? b || c && d",
"<stdin>: ERROR: Cannot use \"||\" with \"??\" without parentheses\n"+
"NOTE: Expressions of the form \"x ?? y || z\" are not allowed in JavaScript. "+
"You must disambiguate between \"(x ?? y) || z\" and \"x ?? (y || z)\" by adding parentheses.\n")
expectParseError(t, "a && b ?? c",
"<stdin>: ERROR: Cannot use \"??\" with \"&&\" without parentheses\n"+
"NOTE: Expressions of the form \"x && y ?? z\" are not allowed in JavaScript. "+
"You must disambiguate between \"(x && y) ?? z\" and \"x && (y ?? z)\" by adding parentheses.\n")
expectParseError(t, "a || b ?? c",
"<stdin>: ERROR: Cannot use \"??\" with \"||\" without parentheses\n"+
"NOTE: Expressions of the form \"x || y ?? z\" are not allowed in JavaScript. "+
"You must disambiguate between \"(x || y) ?? z\" and \"x || (y ?? z)\" by adding parentheses.\n")
expectParseError(t, "a && b || c ?? c",
"<stdin>: ERROR: Cannot use \"??\" with \"||\" without parentheses\n"+
"NOTE: Expressions of the form \"x || y ?? z\" are not allowed in JavaScript. "+
"You must disambiguate between \"(x || y) ?? z\" and \"x || (y ?? z)\" by adding parentheses.\n")
expectParseError(t, "a || b && c ?? d",
"<stdin>: ERROR: Cannot use \"??\" with \"||\" without parentheses\n"+
"NOTE: Expressions of the form \"x || y ?? z\" are not allowed in JavaScript. "+
"You must disambiguate between \"(x || y) ?? z\" and \"x || (y ?? z)\" by adding parentheses.\n")
expectPrinted(t, "a ?? b, b && c", "a ?? b, b && c;\n")
expectPrinted(t, "a ?? b, b || c", "a ?? b, b || c;\n")
expectPrinted(t, "a && b, b ?? c", "a && b, b ?? c;\n")
expectPrinted(t, "a || b, b ?? c", "a || b, b ?? c;\n")
expectPrintedTarget(t, 2020, "a ?? b", "a ?? b;\n")
expectPrintedTarget(t, 2019, "a ?? b", "a != null ? a : b;\n")
expectPrintedTarget(t, 2019, "a() ?? b()", "var _a;\n(_a = a()) != null ? _a : b();\n")
expectPrintedTarget(t, 2019, "function foo() { if (x) { a() ?? b() ?? c() } }",
"function foo() {\n var _a, _b;\n if (x) {\n (_b = (_a = a()) != null ? _a : b()) != null ? _b : c();\n }\n}\n")
expectPrintedTarget(t, 2019, "() => a ?? b", "() => a != null ? a : b;\n")
expectPrintedTarget(t, 2019, "() => a() ?? b()", "() => {\n var _a;\n return (_a = a()) != null ? _a : b();\n};\n")
// Temporary variables should not come before "use strict"
expectPrintedTarget(t, 2019, "function f() { /*! @license */ 'use strict'; a = b.c ?? d }",
"function f() {\n /*! @license */\n \"use strict\";\n var _a;\n a = (_a = b.c) != null ? _a : d;\n}\n")
}
func TestLowerNullishCoalescingAssign(t *testing.T) {
expectPrinted(t, "a ??= b", "a ??= b;\n")
expectPrintedTarget(t, 2019, "a ??= b", "a != null ? a : a = b;\n")
expectPrintedTarget(t, 2019, "a.b ??= c", "var _a;\n(_a = a.b) != null ? _a : a.b = c;\n")
expectPrintedTarget(t, 2019, "a().b ??= c", "var _a, _b;\n(_b = (_a = a()).b) != null ? _b : _a.b = c;\n")
expectPrintedTarget(t, 2019, "a[b] ??= c", "var _a;\n(_a = a[b]) != null ? _a : a[b] = c;\n")
expectPrintedTarget(t, 2019, "a()[b()] ??= c", "var _a, _b, _c;\n(_c = (_a = a())[_b = b()]) != null ? _c : _a[_b] = c;\n")
expectPrintedTarget(t, 2019, "class Foo { #x; constructor() { this.#x ??= 2 } }", `var _x;
class Foo {
constructor() {
__privateAdd(this, _x);
var _a;
(_a = __privateGet(this, _x)) != null ? _a : __privateSet(this, _x, 2);
}
}
_x = new WeakMap();
`)
expectPrintedTarget(t, 2020, "a ??= b", "a ?? (a = b);\n")
expectPrintedTarget(t, 2020, "a.b ??= c", "a.b ?? (a.b = c);\n")
expectPrintedTarget(t, 2020, "a().b ??= c", "var _a;\n(_a = a()).b ?? (_a.b = c);\n")
expectPrintedTarget(t, 2020, "a[b] ??= c", "a[b] ?? (a[b] = c);\n")
expectPrintedTarget(t, 2020, "a()[b()] ??= c", "var _a, _b;\n(_a = a())[_b = b()] ?? (_a[_b] = c);\n")
expectPrintedTarget(t, 2020, "class Foo { #x; constructor() { this.#x ??= 2 } }", `var _x;
class Foo {
constructor() {
__privateAdd(this, _x);
__privateGet(this, _x) ?? __privateSet(this, _x, 2);
}
}
_x = new WeakMap();
`)
expectPrintedTarget(t, 2021, "a ??= b", "a ??= b;\n")
expectPrintedTarget(t, 2021, "a.b ??= c", "a.b ??= c;\n")
expectPrintedTarget(t, 2021, "a().b ??= c", "a().b ??= c;\n")
expectPrintedTarget(t, 2021, "a[b] ??= c", "a[b] ??= c;\n")
expectPrintedTarget(t, 2021, "a()[b()] ??= c", "a()[b()] ??= c;\n")
expectPrintedTarget(t, 2021, "class Foo { #x; constructor() { this.#x ??= 2 } }", `var _x;
class Foo {
constructor() {
__privateAdd(this, _x);
__privateGet(this, _x) ?? __privateSet(this, _x, 2);
}
}
_x = new WeakMap();
`)
// Temporary variables should not come before "use strict"
expectPrintedTarget(t, 2019, "function f() { /*! @license */ 'use strict'; a.b ??= c.d }",
"function f() {\n /*! @license */\n \"use strict\";\n var _a;\n (_a = a.b) != null ? _a : a.b = c.d;\n}\n")
}
func TestLowerLogicalAssign(t *testing.T) {
expectPrinted(t, "a &&= b", "a &&= b;\n")
expectPrinted(t, "a ||= b", "a ||= b;\n")
expectPrintedTarget(t, 2020, "a &&= b", "a && (a = b);\n")
expectPrintedTarget(t, 2020, "a.b &&= c", "a.b && (a.b = c);\n")
expectPrintedTarget(t, 2020, "a().b &&= c", "var _a;\n(_a = a()).b && (_a.b = c);\n")
expectPrintedTarget(t, 2020, "a[b] &&= c", "a[b] && (a[b] = c);\n")
expectPrintedTarget(t, 2020, "a()[b()] &&= c", "var _a, _b;\n(_a = a())[_b = b()] && (_a[_b] = c);\n")
expectPrintedTarget(t, 2020, "class Foo { #x; constructor() { this.#x &&= 2 } }", `var _x;
class Foo {
constructor() {
__privateAdd(this, _x);
__privateGet(this, _x) && __privateSet(this, _x, 2);
}
}
_x = new WeakMap();
`)
expectPrintedTarget(t, 2021, "a &&= b", "a &&= b;\n")
expectPrintedTarget(t, 2021, "a.b &&= c", "a.b &&= c;\n")
expectPrintedTarget(t, 2021, "a().b &&= c", "a().b &&= c;\n")
expectPrintedTarget(t, 2021, "a[b] &&= c", "a[b] &&= c;\n")
expectPrintedTarget(t, 2021, "a()[b()] &&= c", "a()[b()] &&= c;\n")
expectPrintedTarget(t, 2021, "class Foo { #x; constructor() { this.#x &&= 2 } }", `var _x;
class Foo {
constructor() {
__privateAdd(this, _x);
__privateGet(this, _x) && __privateSet(this, _x, 2);
}
}
_x = new WeakMap();
`)
expectPrintedTarget(t, 2020, "a ||= b", "a || (a = b);\n")
expectPrintedTarget(t, 2020, "a.b ||= c", "a.b || (a.b = c);\n")
expectPrintedTarget(t, 2020, "a().b ||= c", "var _a;\n(_a = a()).b || (_a.b = c);\n")
expectPrintedTarget(t, 2020, "a[b] ||= c", "a[b] || (a[b] = c);\n")
expectPrintedTarget(t, 2020, "a()[b()] ||= c", "var _a, _b;\n(_a = a())[_b = b()] || (_a[_b] = c);\n")
expectPrintedTarget(t, 2020, "class Foo { #x; constructor() { this.#x ||= 2 } }", `var _x;
class Foo {
constructor() {
__privateAdd(this, _x);
__privateGet(this, _x) || __privateSet(this, _x, 2);
}
}
_x = new WeakMap();
`)
expectPrintedTarget(t, 2021, "a ||= b", "a ||= b;\n")
expectPrintedTarget(t, 2021, "a.b ||= c", "a.b ||= c;\n")
expectPrintedTarget(t, 2021, "a().b ||= c", "a().b ||= c;\n")
expectPrintedTarget(t, 2021, "a[b] ||= c", "a[b] ||= c;\n")
expectPrintedTarget(t, 2021, "a()[b()] ||= c", "a()[b()] ||= c;\n")
expectPrintedTarget(t, 2021, "class Foo { #x; constructor() { this.#x ||= 2 } }", `var _x;
class Foo {
constructor() {
__privateAdd(this, _x);
__privateGet(this, _x) || __privateSet(this, _x, 2);
}
}
_x = new WeakMap();
`)
}
func TestLowerAsyncFunctions(t *testing.T) {
// Lowered non-arrow functions with argument evaluations should merely use
// "arguments" rather than allocating a new array when forwarding arguments
expectPrintedTarget(t, 2015, "async function foo(a, b = couldThrowErrors()) {console.log(a, b);}", `function foo(_0) {
return __async(this, arguments, function* (a, b = couldThrowErrors()) {
console.log(a, b);
});
}
`)
// Skip forwarding altogether when parameter evaluation obviously cannot throw
expectPrintedTarget(t, 2015, "async (a, b = 123) => {console.log(a, b);}", `(a, b = 123) => __async(null, null, function* () {
console.log(a, b);
});
`)
}
func TestLowerClassSideEffectOrder(t *testing.T) {
// The order of computed property side effects must not change
expectPrintedTarget(t, 2015, `class Foo {
[a()]() {}
[b()];
[c()] = 1;
[d()]() {}
static [e()];
static [f()] = 1;
static [g()]() {}
[h()];
}
`, `var _a, _b, _c, _d, _e, _f;
class Foo {
constructor() {
__publicField(this, _f);
__publicField(this, _e, 1);
__publicField(this, _a);
}
[a()]() {
}
[(_f = b(), _e = c(), d())]() {
}
static [(_d = e(), _c = f(), _b = g(), _a = h(), _b)]() {
}
}
__publicField(Foo, _d);
__publicField(Foo, _c, 1);
`)
}
func TestLowerClassInstance(t *testing.T) {
expectPrintedTarget(t, 2015, "class Foo {}", "class Foo {\n}\n")
expectPrintedTarget(t, 2015, "class Foo { foo }", "class Foo {\n constructor() {\n __publicField(this, \"foo\");\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { foo = null }", "class Foo {\n constructor() {\n __publicField(this, \"foo\", null);\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { 123 }", "class Foo {\n constructor() {\n __publicField(this, 123);\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { 123 = null }", "class Foo {\n constructor() {\n __publicField(this, 123, null);\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { [foo] }", "var _a;\n_a = foo;\nclass Foo {\n constructor() {\n __publicField(this, _a);\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { [foo] = null }", "var _a;\n_a = foo;\nclass Foo {\n constructor() {\n __publicField(this, _a, null);\n }\n}\n")
expectPrintedTarget(t, 2015, "(class {})", "(class {\n});\n")
expectPrintedTarget(t, 2015, "(class { foo })", "(class {\n constructor() {\n __publicField(this, \"foo\");\n }\n});\n")
expectPrintedTarget(t, 2015, "(class { foo = null })", "(class {\n constructor() {\n __publicField(this, \"foo\", null);\n }\n});\n")
expectPrintedTarget(t, 2015, "(class { 123 })", "(class {\n constructor() {\n __publicField(this, 123);\n }\n});\n")
expectPrintedTarget(t, 2015, "(class { 123 = null })", "(class {\n constructor() {\n __publicField(this, 123, null);\n }\n});\n")
expectPrintedTarget(t, 2015, "(class { [foo] })", "var _a;\n_a = foo, class {\n constructor() {\n __publicField(this, _a);\n }\n};\n")
expectPrintedTarget(t, 2015, "(class { [foo] = null })", "var _a;\n_a = foo, class {\n constructor() {\n __publicField(this, _a, null);\n }\n};\n")
expectPrintedTarget(t, 2015, "class Foo extends Bar {}", `class Foo extends Bar {
}
`)
expectPrintedTarget(t, 2015, "class Foo extends Bar { bar() {} constructor() { super() } }", `class Foo extends Bar {
bar() {
}
constructor() {
super();
}
}
`)
expectPrintedTarget(t, 2015, "class Foo extends Bar { bar() {} foo }", `class Foo extends Bar {
constructor() {
super(...arguments);
__publicField(this, "foo");
}
bar() {
}
}
`)
expectPrintedTarget(t, 2015, "class Foo extends Bar { bar() {} foo; constructor() { super() } }", `class Foo extends Bar {
constructor() {
super();
__publicField(this, "foo");
}
bar() {
}
}
`)
expectPrintedTarget(t, 2015, "class Foo extends Bar { bar() {} foo; constructor({ ...args }) { super() } }", `class Foo extends Bar {
constructor(_a) {
var args = __objRest(_a, []);
super();
__publicField(this, "foo");
}
bar() {
}
}
`)
}
func TestLowerClassStatic(t *testing.T) {
expectPrintedTarget(t, 2015, "class Foo { static foo }", "class Foo {\n}\n__publicField(Foo, \"foo\");\n")
expectPrintedTarget(t, 2015, "class Foo { static foo = null }", "class Foo {\n}\n__publicField(Foo, \"foo\", null);\n")
expectPrintedTarget(t, 2015, "class Foo { static foo(a, b) {} }", "class Foo {\n static foo(a, b) {\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { static get foo() {} }", "class Foo {\n static get foo() {\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { static set foo(a) {} }", "class Foo {\n static set foo(a) {\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { static 123 }", "class Foo {\n}\n__publicField(Foo, 123);\n")
expectPrintedTarget(t, 2015, "class Foo { static 123 = null }", "class Foo {\n}\n__publicField(Foo, 123, null);\n")
expectPrintedTarget(t, 2015, "class Foo { static 123(a, b) {} }", "class Foo {\n static 123(a, b) {\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { static get 123() {} }", "class Foo {\n static get 123() {\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { static set 123(a) {} }", "class Foo {\n static set 123(a) {\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { static [foo] }", "var _a;\n_a = foo;\nclass Foo {\n}\n__publicField(Foo, _a);\n")
expectPrintedTarget(t, 2015, "class Foo { static [foo] = null }", "var _a;\n_a = foo;\nclass Foo {\n}\n__publicField(Foo, _a, null);\n")
expectPrintedTarget(t, 2015, "class Foo { static [foo](a, b) {} }", "class Foo {\n static [foo](a, b) {\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { static get [foo]() {} }", "class Foo {\n static get [foo]() {\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { static set [foo](a) {} }", "class Foo {\n static set [foo](a) {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class Foo { static foo }", "export default class Foo {\n}\n__publicField(Foo, \"foo\");\n")
expectPrintedTarget(t, 2015, "export default class Foo { static foo = null }", "export default class Foo {\n}\n__publicField(Foo, \"foo\", null);\n")
expectPrintedTarget(t, 2015, "export default class Foo { static foo(a, b) {} }", "export default class Foo {\n static foo(a, b) {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class Foo { static get foo() {} }", "export default class Foo {\n static get foo() {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class Foo { static set foo(a) {} }", "export default class Foo {\n static set foo(a) {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class Foo { static 123 }", "export default class Foo {\n}\n__publicField(Foo, 123);\n")
expectPrintedTarget(t, 2015, "export default class Foo { static 123 = null }", "export default class Foo {\n}\n__publicField(Foo, 123, null);\n")
expectPrintedTarget(t, 2015, "export default class Foo { static 123(a, b) {} }", "export default class Foo {\n static 123(a, b) {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class Foo { static get 123() {} }", "export default class Foo {\n static get 123() {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class Foo { static set 123(a) {} }", "export default class Foo {\n static set 123(a) {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class Foo { static [foo] }", "var _a;\n_a = foo;\nexport default class Foo {\n}\n__publicField(Foo, _a);\n")
expectPrintedTarget(t, 2015, "export default class Foo { static [foo] = null }", "var _a;\n_a = foo;\nexport default class Foo {\n}\n__publicField(Foo, _a, null);\n")
expectPrintedTarget(t, 2015, "export default class Foo { static [foo](a, b) {} }", "export default class Foo {\n static [foo](a, b) {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class Foo { static get [foo]() {} }", "export default class Foo {\n static get [foo]() {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class Foo { static set [foo](a) {} }", "export default class Foo {\n static set [foo](a) {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class { static foo }",
"export default class stdin_default {\n}\n__publicField(stdin_default, \"foo\");\n")
expectPrintedTarget(t, 2015, "export default class { static foo = null }",
"export default class stdin_default {\n}\n__publicField(stdin_default, \"foo\", null);\n")
expectPrintedTarget(t, 2015, "export default class { static foo(a, b) {} }", "export default class {\n static foo(a, b) {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class { static get foo() {} }", "export default class {\n static get foo() {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class { static set foo(a) {} }", "export default class {\n static set foo(a) {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class { static 123 }",
"export default class stdin_default {\n}\n__publicField(stdin_default, 123);\n")
expectPrintedTarget(t, 2015, "export default class { static 123 = null }",
"export default class stdin_default {\n}\n__publicField(stdin_default, 123, null);\n")
expectPrintedTarget(t, 2015, "export default class { static 123(a, b) {} }", "export default class {\n static 123(a, b) {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class { static get 123() {} }", "export default class {\n static get 123() {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class { static set 123(a) {} }", "export default class {\n static set 123(a) {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class { static [foo] }",
"var _a;\n_a = foo;\nexport default class stdin_default {\n}\n__publicField(stdin_default, _a);\n")
expectPrintedTarget(t, 2015, "export default class { static [foo] = null }",
"var _a;\n_a = foo;\nexport default class stdin_default {\n}\n__publicField(stdin_default, _a, null);\n")
expectPrintedTarget(t, 2015, "export default class { static [foo](a, b) {} }", "export default class {\n static [foo](a, b) {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class { static get [foo]() {} }", "export default class {\n static get [foo]() {\n }\n}\n")
expectPrintedTarget(t, 2015, "export default class { static set [foo](a) {} }", "export default class {\n static set [foo](a) {\n }\n}\n")
expectPrintedTarget(t, 2015, "(class Foo { static foo })", "var _a;\n_a = class {\n}, __publicField(_a, \"foo\"), _a;\n")
expectPrintedTarget(t, 2015, "(class Foo { static foo = null })", "var _a;\n_a = class {\n}, __publicField(_a, \"foo\", null), _a;\n")
expectPrintedTarget(t, 2015, "(class Foo { static foo(a, b) {} })", "(class Foo {\n static foo(a, b) {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class Foo { static get foo() {} })", "(class Foo {\n static get foo() {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class Foo { static set foo(a) {} })", "(class Foo {\n static set foo(a) {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class Foo { static 123 })", "var _a;\n_a = class {\n}, __publicField(_a, 123), _a;\n")
expectPrintedTarget(t, 2015, "(class Foo { static 123 = null })", "var _a;\n_a = class {\n}, __publicField(_a, 123, null), _a;\n")
expectPrintedTarget(t, 2015, "(class Foo { static 123(a, b) {} })", "(class Foo {\n static 123(a, b) {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class Foo { static get 123() {} })", "(class Foo {\n static get 123() {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class Foo { static set 123(a) {} })", "(class Foo {\n static set 123(a) {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class Foo { static [foo] })", "var _a, _b;\n_a = foo, _b = class {\n}, __publicField(_b, _a), _b;\n")
expectPrintedTarget(t, 2015, "(class Foo { static [foo] = null })", "var _a, _b;\n_a = foo, _b = class {\n}, __publicField(_b, _a, null), _b;\n")
expectPrintedTarget(t, 2015, "(class Foo { static [foo](a, b) {} })", "(class Foo {\n static [foo](a, b) {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class Foo { static get [foo]() {} })", "(class Foo {\n static get [foo]() {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class Foo { static set [foo](a) {} })", "(class Foo {\n static set [foo](a) {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class { static foo })", "var _a;\n_a = class {\n}, __publicField(_a, \"foo\"), _a;\n")
expectPrintedTarget(t, 2015, "(class { static foo = null })", "var _a;\n_a = class {\n}, __publicField(_a, \"foo\", null), _a;\n")
expectPrintedTarget(t, 2015, "(class { static foo(a, b) {} })", "(class {\n static foo(a, b) {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class { static get foo() {} })", "(class {\n static get foo() {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class { static set foo(a) {} })", "(class {\n static set foo(a) {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class { static 123 })", "var _a;\n_a = class {\n}, __publicField(_a, 123), _a;\n")
expectPrintedTarget(t, 2015, "(class { static 123 = null })", "var _a;\n_a = class {\n}, __publicField(_a, 123, null), _a;\n")
expectPrintedTarget(t, 2015, "(class { static 123(a, b) {} })", "(class {\n static 123(a, b) {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class { static get 123() {} })", "(class {\n static get 123() {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class { static set 123(a) {} })", "(class {\n static set 123(a) {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class { static [foo] })", "var _a, _b;\n_a = foo, _b = class {\n}, __publicField(_b, _a), _b;\n")
expectPrintedTarget(t, 2015, "(class { static [foo] = null })", "var _a, _b;\n_a = foo, _b = class {\n}, __publicField(_b, _a, null), _b;\n")
expectPrintedTarget(t, 2015, "(class { static [foo](a, b) {} })", "(class {\n static [foo](a, b) {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class { static get [foo]() {} })", "(class {\n static get [foo]() {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class { static set [foo](a) {} })", "(class {\n static set [foo](a) {\n }\n});\n")
expectPrintedTarget(t, 2015, "(class {})", "(class {\n});\n")
expectPrintedTarget(t, 2015, "class Foo {}", "class Foo {\n}\n")
expectPrintedTarget(t, 2015, "(class Foo {})", "(class Foo {\n});\n")
// Static field with initializers that access the class expression name must
// still work when they are pulled outside of the class body
expectPrintedTarget(t, 2015, `
let Bar = class Foo {
static foo = 123
static bar = Foo.foo
}
`, `var _a;
let Bar = (_a = class {
}, __publicField(_a, "foo", 123), __publicField(_a, "bar", _a.foo), _a);
`)
// Generated IIFEs for static class blocks should be appropriately annotated
expectPrintedTarget(t, 2015, "class Foo { static { try {} finally { impureCall() } } }",
"class Foo {\n}\n(() => {\n try {\n } finally {\n impureCall();\n }\n})();\n")
expectPrintedTarget(t, 2015, "(class Foo { static { try {} finally { impureCall() } } })",
"var _a;\n_a = class {\n}, (() => {\n try {\n } finally {\n impureCall();\n }\n})(), _a;\n")
expectPrintedTarget(t, 2015, "class Foo { static { try {} finally { /* @__PURE__ */ pureCall() } } }",
"class Foo {\n}\n/* @__PURE__ */ (() => {\n try {\n } finally {\n /* @__PURE__ */ pureCall();\n }\n})();\n")
expectPrintedTarget(t, 2015, "(class Foo { static { try {} finally { /* @__PURE__ */ pureCall() } } })",
"var _a;\n_a = class {\n}, /* @__PURE__ */ (() => {\n try {\n } finally {\n /* @__PURE__ */ pureCall();\n }\n})(), _a;\n")
}
func TestLowerClassStaticThis(t *testing.T) {
expectPrinted(t, "class Foo { x = this }", "class Foo {\n x = this;\n}\n")
expectPrinted(t, "class Foo { static x = this }", "class Foo {\n static x = this;\n}\n")
expectPrinted(t, "class Foo { static x = () => this }", "class Foo {\n static x = () => this;\n}\n")
expectPrinted(t, "class Foo { static x = function() { return this } }", "class Foo {\n static x = function() {\n return this;\n };\n}\n")
expectPrinted(t, "class Foo { static [this.x] }", "class Foo {\n static [this.x];\n}\n")
expectPrinted(t, "class Foo { static x = class { y = this } }", "class Foo {\n static x = class {\n y = this;\n };\n}\n")
expectPrinted(t, "class Foo { static x = class { [this.y] } }", "class Foo {\n static x = class {\n [this.y];\n };\n}\n")
expectPrinted(t, "class Foo { static x = class extends this {} }", "class Foo {\n static x = class extends this {\n };\n}\n")
expectPrinted(t, "x = class Foo { x = this }", "x = class Foo {\n x = this;\n};\n")
expectPrinted(t, "x = class Foo { static x = this }", "x = class Foo {\n static x = this;\n};\n")
expectPrinted(t, "x = class Foo { static x = () => this }", "x = class Foo {\n static x = () => this;\n};\n")
expectPrinted(t, "x = class Foo { static x = function() { return this } }", "x = class Foo {\n static x = function() {\n return this;\n };\n};\n")
expectPrinted(t, "x = class Foo { static [this.x] }", "x = class Foo {\n static [this.x];\n};\n")
expectPrinted(t, "x = class Foo { static x = class { y = this } }", "x = class Foo {\n static x = class {\n y = this;\n };\n};\n")
expectPrinted(t, "x = class Foo { static x = class { [this.y] } }", "x = class Foo {\n static x = class {\n [this.y];\n };\n};\n")
expectPrinted(t, "x = class Foo { static x = class extends this {} }", "x = class Foo {\n static x = class extends this {\n };\n};\n")
expectPrinted(t, "x = class { x = this }", "x = class {\n x = this;\n};\n")
expectPrinted(t, "x = class { static x = this }", "x = class {\n static x = this;\n};\n")
expectPrinted(t, "x = class { static x = () => this }", "x = class {\n static x = () => this;\n};\n")
expectPrinted(t, "x = class { static x = function() { return this } }", "x = class {\n static x = function() {\n return this;\n };\n};\n")
expectPrinted(t, "x = class { static [this.x] }", "x = class {\n static [this.x];\n};\n")
expectPrinted(t, "x = class { static x = class { y = this } }", "x = class {\n static x = class {\n y = this;\n };\n};\n")
expectPrinted(t, "x = class { static x = class { [this.y] } }", "x = class {\n static x = class {\n [this.y];\n };\n};\n")
expectPrinted(t, "x = class { static x = class extends this {} }", "x = class {\n static x = class extends this {\n };\n};\n")
expectPrintedTarget(t, 2015, "class Foo { x = this }",
"class Foo {\n constructor() {\n __publicField(this, \"x\", this);\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { [this.x] }",
"var _a;\n_a = this.x;\nclass Foo {\n constructor() {\n __publicField(this, _a);\n }\n}\n")
expectPrintedTarget(t, 2015, "class Foo { static x = this }",
"const _Foo = class _Foo {\n};\n__publicField(_Foo, \"x\", _Foo);\nlet Foo = _Foo;\n")
expectPrintedTarget(t, 2015, "class Foo { static x = () => this }",
"const _Foo = class _Foo {\n};\n__publicField(_Foo, \"x\", () => _Foo);\nlet Foo = _Foo;\n")
expectPrintedTarget(t, 2015, "class Foo { static x = function() { return this } }",
"class Foo {\n}\n__publicField(Foo, \"x\", function() {\n return this;\n});\n")
expectPrintedTarget(t, 2015, "class Foo { static [this.x] }",
"var _a;\n_a = this.x;\nclass Foo {\n}\n__publicField(Foo, _a);\n")
expectPrintedTarget(t, 2015, "class Foo { static x = class { y = this } }",
"class Foo {\n}\n__publicField(Foo, \"x\", class {\n constructor() {\n __publicField(this, \"y\", this);\n }\n});\n")
expectPrintedTarget(t, 2015, "class Foo { static x = class { [this.y] } }",
"var _a;\nconst _Foo = class _Foo {\n};\n__publicField(_Foo, \"x\", (_a = _Foo.y, class {\n constructor() {\n __publicField(this, _a);\n }\n}));\nlet Foo = _Foo;\n")
expectPrintedTarget(t, 2015, "class Foo { static x = class extends this {} }",
"const _Foo = class _Foo {\n};\n__publicField(_Foo, \"x\", class extends _Foo {\n});\nlet Foo = _Foo;\n")
expectPrintedTarget(t, 2015, "x = class Foo { x = this }",
"x = class Foo {\n constructor() {\n __publicField(this, \"x\", this);\n }\n};\n")
expectPrintedTarget(t, 2015, "x = class Foo { [this.x] }",
"var _a;\nx = (_a = this.x, class Foo {\n constructor() {\n __publicField(this, _a);\n }\n});\n")
expectPrintedTarget(t, 2015, "x = class Foo { static x = this }",
"var _a;\nx = (_a = class {\n}, __publicField(_a, \"x\", _a), _a);\n")
expectPrintedTarget(t, 2015, "x = class Foo { static x = () => this }",
"var _a;\nx = (_a = class {\n}, __publicField(_a, \"x\", () => _a), _a);\n")
expectPrintedTarget(t, 2015, "x = class Foo { static x = function() { return this } }",
"var _a;\nx = (_a = class {\n}, __publicField(_a, \"x\", function() {\n return this;\n}), _a);\n")
expectPrintedTarget(t, 2015, "x = class Foo { static [this.x] }",
"var _a, _b;\nx = (_a = this.x, _b = class {\n}, __publicField(_b, _a), _b);\n")
expectPrintedTarget(t, 2015, "x = class Foo { static x = class { y = this } }",
"var _a;\nx = (_a = class {\n}, __publicField(_a, \"x\", class {\n constructor() {\n __publicField(this, \"y\", this);\n }\n}), _a);\n")
expectPrintedTarget(t, 2015, "x = class Foo { static x = class { [this.y] } }",
"var _a, _b;\nx = (_b = class {\n}, __publicField(_b, \"x\", (_a = _b.y, class {\n constructor() {\n __publicField(this, _a);\n }\n})), _b);\n")
expectPrintedTarget(t, 2015, "x = class Foo { static x = class extends this {} }",
"var _a;\nx = (_a = class {\n}, __publicField(_a, \"x\", class extends _a {\n}), _a);\n")
expectPrintedTarget(t, 2015, "x = class { x = this }",
"x = class {\n constructor() {\n __publicField(this, \"x\", this);\n }\n};\n")
expectPrintedTarget(t, 2015, "x = class { [this.x] }",
"var _a;\nx = (_a = this.x, class {\n constructor() {\n __publicField(this, _a);\n }\n});\n")
expectPrintedTarget(t, 2015, "x = class { static x = this }",
"var _a;\nx = (_a = class {\n}, __publicField(_a, \"x\", _a), _a);\n")
expectPrintedTarget(t, 2015, "x = class { static x = () => this }",
"var _a;\nx = (_a = class {\n}, __publicField(_a, \"x\", () => _a), _a);\n")
expectPrintedTarget(t, 2015, "x = class { static x = function() { return this } }",
"var _a;\nx = (_a = class {\n}, __publicField(_a, \"x\", function() {\n return this;\n}), _a);\n")
expectPrintedTarget(t, 2015, "x = class { static [this.x] }",
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_parser/json_parser.go | internal/js_parser/json_parser.go | package js_parser
import (
"fmt"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_lexer"
"github.com/evanw/esbuild/internal/logger"
)
type jsonParser struct {
log logger.Log
source logger.Source
tracker logger.LineColumnTracker
lexer js_lexer.Lexer
options JSONOptions
suppressWarningsAboutWeirdCode bool
}
func (p *jsonParser) parseMaybeTrailingComma(closeToken js_lexer.T) bool {
commaRange := p.lexer.Range()
p.lexer.Expect(js_lexer.TComma)
if p.lexer.Token == closeToken {
if p.options.Flavor == js_lexer.JSON {
p.log.AddError(&p.tracker, commaRange, "JSON does not support trailing commas")
}
return false
}
return true
}
func (p *jsonParser) parseExpr() js_ast.Expr {
loc := p.lexer.Loc()
switch p.lexer.Token {
case js_lexer.TFalse:
p.lexer.Next()
return js_ast.Expr{Loc: loc, Data: &js_ast.EBoolean{Value: false}}
case js_lexer.TTrue:
p.lexer.Next()
return js_ast.Expr{Loc: loc, Data: &js_ast.EBoolean{Value: true}}
case js_lexer.TNull:
p.lexer.Next()
return js_ast.Expr{Loc: loc, Data: js_ast.ENullShared}
case js_lexer.TStringLiteral:
value := p.lexer.StringLiteral()
p.lexer.Next()
return js_ast.Expr{Loc: loc, Data: &js_ast.EString{Value: value}}
case js_lexer.TNumericLiteral:
value := p.lexer.Number
p.lexer.Next()
return js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: value}}
case js_lexer.TMinus:
p.lexer.Next()
value := p.lexer.Number
p.lexer.Expect(js_lexer.TNumericLiteral)
return js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: -value}}
case js_lexer.TOpenBracket:
p.lexer.Next()
isSingleLine := !p.lexer.HasNewlineBefore
items := []js_ast.Expr{}
for p.lexer.Token != js_lexer.TCloseBracket {
if len(items) > 0 {
if p.lexer.HasNewlineBefore {
isSingleLine = false
}
if !p.parseMaybeTrailingComma(js_lexer.TCloseBracket) {
break
}
if p.lexer.HasNewlineBefore {
isSingleLine = false
}
}
item := p.parseExpr()
items = append(items, item)
}
if p.lexer.HasNewlineBefore {
isSingleLine = false
}
closeBracketLoc := p.lexer.Loc()
p.lexer.Expect(js_lexer.TCloseBracket)
return js_ast.Expr{Loc: loc, Data: &js_ast.EArray{
Items: items,
IsSingleLine: isSingleLine,
CloseBracketLoc: closeBracketLoc,
}}
case js_lexer.TOpenBrace:
p.lexer.Next()
isSingleLine := !p.lexer.HasNewlineBefore
properties := []js_ast.Property{}
duplicates := make(map[string]logger.Range)
for p.lexer.Token != js_lexer.TCloseBrace {
if len(properties) > 0 {
if p.lexer.HasNewlineBefore {
isSingleLine = false
}
if !p.parseMaybeTrailingComma(js_lexer.TCloseBrace) {
break
}
if p.lexer.HasNewlineBefore {
isSingleLine = false
}
}
keyString := p.lexer.StringLiteral()
keyRange := p.lexer.Range()
key := js_ast.Expr{Loc: keyRange.Loc, Data: &js_ast.EString{Value: keyString}}
p.lexer.Expect(js_lexer.TStringLiteral)
// Warn about duplicate keys
if !p.suppressWarningsAboutWeirdCode {
keyText := helpers.UTF16ToString(keyString)
if prevRange, ok := duplicates[keyText]; ok {
p.log.AddIDWithNotes(logger.MsgID_JS_DuplicateObjectKey, logger.Warning, &p.tracker, keyRange,
fmt.Sprintf("Duplicate key %q in object literal", keyText),
[]logger.MsgData{p.tracker.MsgData(prevRange, fmt.Sprintf("The original key %q is here:", keyText))})
} else {
duplicates[keyText] = keyRange
}
}
p.lexer.Expect(js_lexer.TColon)
value := p.parseExpr()
property := js_ast.Property{
Kind: js_ast.PropertyField,
Loc: keyRange.Loc,
Key: key,
ValueOrNil: value,
}
// The key "__proto__" must not be a string literal in JavaScript because
// that actually modifies the prototype of the object. This can be
// avoided by using a computed property key instead of a string literal.
if helpers.UTF16EqualsString(keyString, "__proto__") && !p.options.UnsupportedJSFeatures.Has(compat.ObjectExtensions) {
property.Flags |= js_ast.PropertyIsComputed
}
properties = append(properties, property)
}
if p.lexer.HasNewlineBefore {
isSingleLine = false
}
closeBraceLoc := p.lexer.Loc()
p.lexer.Expect(js_lexer.TCloseBrace)
return js_ast.Expr{Loc: loc, Data: &js_ast.EObject{
Properties: properties,
IsSingleLine: isSingleLine,
CloseBraceLoc: closeBraceLoc,
}}
case js_lexer.TBigIntegerLiteral:
if !p.options.IsForDefine {
p.lexer.Unexpected()
}
value := p.lexer.Identifier
p.lexer.Next()
return js_ast.Expr{Loc: loc, Data: &js_ast.EBigInt{Value: value.String}}
default:
p.lexer.Unexpected()
return js_ast.Expr{}
}
}
type JSONOptions struct {
UnsupportedJSFeatures compat.JSFeature
Flavor js_lexer.JSONFlavor
ErrorSuffix string
IsForDefine bool
}
func ParseJSON(log logger.Log, source logger.Source, options JSONOptions) (result js_ast.Expr, ok bool) {
ok = true
defer func() {
r := recover()
if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
ok = false
} else if r != nil {
panic(r)
}
}()
if options.ErrorSuffix == "" {
options.ErrorSuffix = " in JSON"
}
p := &jsonParser{
log: log,
source: source,
tracker: logger.MakeLineColumnTracker(&source),
options: options,
lexer: js_lexer.NewLexerJSON(log, source, options.Flavor, options.ErrorSuffix),
suppressWarningsAboutWeirdCode: helpers.IsInsideNodeModules(source.KeyPath.Text),
}
result = p.parseExpr()
p.lexer.Expect(js_lexer.TEndOfFile)
return
}
func isValidJSON(value js_ast.Expr) bool {
switch e := value.Data.(type) {
case *js_ast.ENull, *js_ast.EBoolean, *js_ast.EString, *js_ast.ENumber:
return true
case *js_ast.EArray:
for _, item := range e.Items {
if !isValidJSON(item) {
return false
}
}
return true
case *js_ast.EObject:
for _, property := range e.Properties {
if property.Kind != js_ast.PropertyField || property.Flags.Has(js_ast.PropertyIsComputed) {
return false
}
if _, ok := property.Key.Data.(*js_ast.EString); !ok {
return false
}
if !isValidJSON(property.ValueOrNil) {
return false
}
}
return true
}
return false
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_parser/js_parser_test.go | internal/js_parser/js_parser_test.go | package js_parser
import (
"fmt"
"strings"
"testing"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_printer"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/renamer"
"github.com/evanw/esbuild/internal/test"
)
func expectParseErrorCommon(t *testing.T, contents string, expected string, options config.Options) {
t.Helper()
t.Run(contents, func(t *testing.T) {
t.Helper()
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
Parse(log, test.SourceForTest(contents), OptionsFromConfig(&options))
msgs := log.Done()
var text strings.Builder
for _, msg := range msgs {
text.WriteString(msg.String(logger.OutputOptions{}, logger.TerminalInfo{}))
}
test.AssertEqualWithDiff(t, text.String(), expected)
})
}
func expectParseError(t *testing.T, contents string, expected string) {
t.Helper()
expectParseErrorCommon(t, contents, expected, config.Options{})
}
func expectParseErrorTarget(t *testing.T, esVersion int, contents string, expected string) {
t.Helper()
expectParseErrorCommon(t, contents, expected, config.Options{
UnsupportedJSFeatures: compat.UnsupportedJSFeatures(map[compat.Engine]compat.Semver{
compat.ES: {Parts: []int{esVersion}},
}),
})
}
func expectPrintedWithUnsupportedFeatures(t *testing.T, unsupportedJSFeatures compat.JSFeature, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
UnsupportedJSFeatures: unsupportedJSFeatures,
})
}
func expectParseErrorWithUnsupportedFeatures(t *testing.T, unsupportedJSFeatures compat.JSFeature, contents string, expected string) {
t.Helper()
expectParseErrorCommon(t, contents, expected, config.Options{
UnsupportedJSFeatures: unsupportedJSFeatures,
})
}
func expectPrintedCommon(t *testing.T, contents string, expected string, options config.Options) {
t.Helper()
t.Run(contents, func(t *testing.T) {
t.Helper()
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
options.OmitRuntimeForTests = true
tree, ok := Parse(log, test.SourceForTest(contents), OptionsFromConfig(&options))
msgs := log.Done()
var text strings.Builder
for _, msg := range msgs {
if msg.Kind != logger.Warning {
text.WriteString(msg.String(logger.OutputOptions{}, logger.TerminalInfo{}))
}
}
test.AssertEqualWithDiff(t, text.String(), "")
if !ok {
t.Fatal("Parse error")
}
symbols := ast.NewSymbolMap(1)
symbols.SymbolsForSource[0] = tree.Symbols
r := renamer.NewNoOpRenamer(symbols)
js := js_printer.Print(tree, symbols, r, js_printer.Options{
UnsupportedFeatures: options.UnsupportedJSFeatures,
ASCIIOnly: options.ASCIIOnly,
}).JS
test.AssertEqualWithDiff(t, string(js), expected)
})
}
func expectPrinted(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{})
}
func expectPrintedMangle(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
MinifySyntax: true,
})
}
func expectPrintedNormalAndMangle(t *testing.T, contents string, normal string, mangle string) {
expectPrinted(t, contents, normal)
expectPrintedMangle(t, contents, mangle)
}
func expectPrintedTarget(t *testing.T, esVersion int, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
UnsupportedJSFeatures: compat.UnsupportedJSFeatures(map[compat.Engine]compat.Semver{
compat.ES: {Parts: []int{esVersion}},
}),
})
}
func expectPrintedMangleTarget(t *testing.T, esVersion int, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
UnsupportedJSFeatures: compat.UnsupportedJSFeatures(map[compat.Engine]compat.Semver{
compat.ES: {Parts: []int{esVersion}},
}),
MinifySyntax: true,
})
}
func expectPrintedASCII(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
ASCIIOnly: true,
})
}
func expectPrintedTargetASCII(t *testing.T, esVersion int, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
UnsupportedJSFeatures: compat.UnsupportedJSFeatures(map[compat.Engine]compat.Semver{
compat.ES: {Parts: []int{esVersion}},
}),
ASCIIOnly: true,
})
}
func expectParseErrorTargetASCII(t *testing.T, esVersion int, contents string, expected string) {
t.Helper()
expectParseErrorCommon(t, contents, expected, config.Options{
UnsupportedJSFeatures: compat.UnsupportedJSFeatures(map[compat.Engine]compat.Semver{
compat.ES: {Parts: []int{esVersion}},
}),
ASCIIOnly: true,
})
}
func expectParseErrorJSX(t *testing.T, contents string, expected string) {
t.Helper()
expectParseErrorCommon(t, contents, expected, config.Options{
JSX: config.JSXOptions{
Parse: true,
},
})
}
func expectPrintedJSX(t *testing.T, contents string, expectedPreserve string, expectedTransform string) {
t.Helper()
expectPrintedCommon(t, contents, expectedPreserve, config.Options{
JSX: config.JSXOptions{
Parse: true,
Preserve: true,
},
})
expectPrintedCommon(t, contents, expectedTransform, config.Options{
JSX: config.JSXOptions{
Parse: true,
},
})
}
func expectPrintedJSXSideEffects(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
JSX: config.JSXOptions{
Parse: true,
SideEffects: true,
},
})
}
func expectPrintedMangleJSX(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
MinifySyntax: true,
JSX: config.JSXOptions{
Parse: true,
},
})
}
type JSXAutomaticTestOptions struct {
Development bool
ImportSource string
OmitJSXRuntimeForTests bool
SideEffects bool
}
func expectParseErrorJSXAutomatic(t *testing.T, options JSXAutomaticTestOptions, contents string, expected string) {
t.Helper()
expectParseErrorCommon(t, contents, expected, config.Options{
OmitJSXRuntimeForTests: options.OmitJSXRuntimeForTests,
JSX: config.JSXOptions{
AutomaticRuntime: true,
Parse: true,
Development: options.Development,
ImportSource: options.ImportSource,
SideEffects: options.SideEffects,
},
})
}
func expectPrintedJSXAutomatic(t *testing.T, options JSXAutomaticTestOptions, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
OmitJSXRuntimeForTests: options.OmitJSXRuntimeForTests,
JSX: config.JSXOptions{
AutomaticRuntime: true,
Parse: true,
Development: options.Development,
ImportSource: options.ImportSource,
SideEffects: options.SideEffects,
},
})
}
func TestUnOp(t *testing.T) {
// This was important to someone for a very obscure reason. See
// https://github.com/evanw/esbuild/issues/4041 for more info.
expectPrinted(t, "let x; void 0; x", "let x;\nx;\n")
expectPrinted(t, "let x; void x; x", "let x;\nvoid x;\nx;\n")
}
func TestBinOp(t *testing.T) {
for code, entry := range js_ast.OpTable {
opCode := js_ast.OpCode(code)
if opCode.IsLeftAssociative() {
op := entry.Text
expectPrinted(t, "a "+op+" b "+op+" c", "a "+op+" b "+op+" c;\n")
expectPrinted(t, "(a "+op+" b) "+op+" c", "a "+op+" b "+op+" c;\n")
expectPrinted(t, "a "+op+" (b "+op+" c)", "a "+op+" (b "+op+" c);\n")
}
if opCode.IsRightAssociative() {
op := entry.Text
expectPrinted(t, "a "+op+" b "+op+" c", "a "+op+" b "+op+" c;\n")
// Avoid errors about invalid assignment targets
if opCode.BinaryAssignTarget() == js_ast.AssignTargetNone {
expectPrinted(t, "(a "+op+" b) "+op+" c", "(a "+op+" b) "+op+" c;\n")
}
expectPrinted(t, "a "+op+" (b "+op+" c)", "a "+op+" b "+op+" c;\n")
}
}
}
func TestComments(t *testing.T) {
expectParseError(t, "throw //\n x", "<stdin>: ERROR: Unexpected newline after \"throw\"\n")
expectParseError(t, "throw /**/\n x", "<stdin>: ERROR: Unexpected newline after \"throw\"\n")
expectParseError(t, "throw <!--\n x",
`<stdin>: ERROR: Unexpected newline after "throw"
<stdin>: WARNING: Treating "<!--" as the start of a legacy HTML single-line comment
`)
expectParseError(t, "throw -->\n x", "<stdin>: ERROR: Unexpected \">\"\n")
expectParseError(t, "export {}\n<!--", `<stdin>: ERROR: Legacy HTML single-line comments are not allowed in ECMAScript modules
<stdin>: NOTE: This file is considered to be an ECMAScript module because of the "export" keyword here:
<stdin>: WARNING: Treating "<!--" as the start of a legacy HTML single-line comment
`)
expectParseError(t, "export {}\n-->", `<stdin>: ERROR: Legacy HTML single-line comments are not allowed in ECMAScript modules
<stdin>: NOTE: This file is considered to be an ECMAScript module because of the "export" keyword here:
<stdin>: WARNING: Treating "-->" as the start of a legacy HTML single-line comment
`)
expectPrinted(t, "return //\n x", "return;\nx;\n")
expectPrinted(t, "return /**/\n x", "return;\nx;\n")
expectPrinted(t, "return <!--\n x", "return;\nx;\n")
expectPrinted(t, "-->\nx", "x;\n")
expectPrinted(t, "x\n-->\ny", "x;\ny;\n")
expectPrinted(t, "x\n -->\ny", "x;\ny;\n")
expectPrinted(t, "x\n/**/-->\ny", "x;\ny;\n")
expectPrinted(t, "x/*\n*/-->\ny", "x;\ny;\n")
expectPrinted(t, "x\n/**/ /**/-->\ny", "x;\ny;\n")
expectPrinted(t, "if(x-->y)z", "if (x-- > y) z;\n")
}
func TestStrictMode(t *testing.T) {
useStrict := "<stdin>: NOTE: Strict mode is triggered by the \"use strict\" directive here:\n"
expectPrinted(t, "'use strict'", "\"use strict\";\n")
expectPrinted(t, "`use strict`", "`use strict`;\n")
expectPrinted(t, "//! @legal comment\n 'use strict'", "\"use strict\";\n//! @legal comment\n")
expectPrinted(t, "/*! @legal comment */ 'use strict'", "\"use strict\";\n/*! @legal comment */\n")
expectPrinted(t, "function f() { //! @legal comment\n 'use strict' }", "function f() {\n //! @legal comment\n \"use strict\";\n}\n")
expectPrinted(t, "function f() { /*! @legal comment */ 'use strict' }", "function f() {\n /*! @legal comment */\n \"use strict\";\n}\n")
expectParseError(t, "//! @legal comment\n 'use strict'", "")
expectParseError(t, "/*! @legal comment */ 'use strict'", "")
expectParseError(t, "function f() { //! @legal comment\n 'use strict' }", "")
expectParseError(t, "function f() { /*! @legal comment */ 'use strict' }", "")
nonSimple := "<stdin>: ERROR: Cannot use a \"use strict\" directive in a function with a non-simple parameter list\n"
expectParseError(t, "function f() { 'use strict' }", "")
expectParseError(t, "function f(x) { 'use strict' }", "")
expectParseError(t, "function f([x]) { 'use strict' }", nonSimple)
expectParseError(t, "function f({x}) { 'use strict' }", nonSimple)
expectParseError(t, "function f(x = 1) { 'use strict' }", nonSimple)
expectParseError(t, "function f(x, ...y) { 'use strict' }", nonSimple)
expectParseError(t, "(function() { 'use strict' })", "")
expectParseError(t, "(function(x) { 'use strict' })", "")
expectParseError(t, "(function([x]) { 'use strict' })", nonSimple)
expectParseError(t, "(function({x}) { 'use strict' })", nonSimple)
expectParseError(t, "(function(x = 1) { 'use strict' })", nonSimple)
expectParseError(t, "(function(x, ...y) { 'use strict' })", nonSimple)
expectParseError(t, "() => { 'use strict' }", "")
expectParseError(t, "(x) => { 'use strict' }", "")
expectParseError(t, "([x]) => { 'use strict' }", nonSimple)
expectParseError(t, "({x}) => { 'use strict' }", nonSimple)
expectParseError(t, "(x = 1) => { 'use strict' }", nonSimple)
expectParseError(t, "(x, ...y) => { 'use strict' }", nonSimple)
expectParseError(t, "(x, ...y) => { //! @license comment\n 'use strict' }", nonSimple)
why := "<stdin>: NOTE: This file is considered to be an ECMAScript module because of the \"export\" keyword here:\n"
expectPrinted(t, "let x = '\\0'", "let x = \"\\0\";\n")
expectPrinted(t, "let x = '\\00'", "let x = \"\\0\";\n")
expectPrinted(t, "'use strict'; let x = '\\0'", "\"use strict\";\nlet x = \"\\0\";\n")
expectPrinted(t, "let x = '\\0'; export {}", "let x = \"\\0\";\nexport {};\n")
expectParseError(t, "'use strict'; let x = '\\00'", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; let x = '\\08'", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; let x = '\\008'", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in strict mode\n"+useStrict)
expectParseError(t, "let x = '\\00'; export {}", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in an ECMAScript module\n"+why)
expectParseError(t, "let x = '\\09'; export {}", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in an ECMAScript module\n"+why)
expectParseError(t, "let x = '\\009'; export {}", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in an ECMAScript module\n"+why)
expectPrinted(t, "'\\0'", "\"\\0\";\n")
expectPrinted(t, "'\\00'", "\"\\0\";\n")
expectPrinted(t, "'use strict'; '\\0'", "\"use strict\";\n\"\\0\";\n")
expectParseError(t, "'use strict'; '\\00'", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; '\\08'", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; '\\008'", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'\\00'; 'use strict';", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'\\08'; 'use strict';", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'\\008'; 'use strict';", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'\\00'; export {}", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in an ECMAScript module\n"+why)
expectParseError(t, "'\\09'; export {}", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in an ECMAScript module\n"+why)
expectParseError(t, "'\\009'; export {}", "<stdin>: ERROR: Legacy octal escape sequences cannot be used in an ECMAScript module\n"+why)
expectPrinted(t, "with (x) y", "with (x) y;\n")
expectParseError(t, "'use strict'; with (x) y", "<stdin>: ERROR: With statements cannot be used in strict mode\n"+useStrict)
expectParseError(t, "with (x) y; export {}", "<stdin>: ERROR: With statements cannot be used in an ECMAScript module\n"+why)
expectPrinted(t, "delete x", "delete x;\n")
expectParseError(t, "'use strict'; delete x", "<stdin>: ERROR: Delete of a bare identifier cannot be used in strict mode\n"+useStrict)
expectParseError(t, "delete x; export {}", "<stdin>: ERROR: Delete of a bare identifier cannot be used in an ECMAScript module\n"+why)
expectPrinted(t, "for (var x = y in z) ;", "x = y;\nfor (var x in z) ;\n")
expectParseError(t, "'use strict'; for (var x = y in z) ;",
"<stdin>: ERROR: Variable initializers inside for-in loops cannot be used in strict mode\n"+useStrict)
expectParseError(t, "for (var x = y in z) ; export {}",
"<stdin>: ERROR: Variable initializers inside for-in loops cannot be used in an ECMAScript module\n"+why)
expectPrinted(t, "function f(a, a) {}", "function f(a, a) {\n}\n")
expectPrinted(t, "(function(a, a) {})", "(function(a, a) {\n});\n")
expectPrinted(t, "({ f: function(a, a) {} })", "({ f: function(a, a) {\n} });\n")
expectPrinted(t, "({ f: function*(a, a) {} })", "({ f: function* (a, a) {\n} });\n")
expectPrinted(t, "({ f: async function(a, a) {} })", "({ f: async function(a, a) {\n} });\n")
bindingError := "<stdin>: ERROR: \"a\" cannot be bound multiple times in the same parameter list\n" +
"<stdin>: NOTE: The name \"a\" was originally bound here:\n"
expectParseError(t, "function f(a, a) { 'use strict' }", bindingError)
expectParseError(t, "function *f(a, a) { 'use strict' }", bindingError)
expectParseError(t, "async function f(a, a) { 'use strict' }", bindingError)
expectParseError(t, "(function(a, a) { 'use strict' })", bindingError)
expectParseError(t, "(function*(a, a) { 'use strict' })", bindingError)
expectParseError(t, "(async function(a, a) { 'use strict' })", bindingError)
expectParseError(t, "function f(a, [a]) {}", bindingError)
expectParseError(t, "function f([a], a) {}", bindingError)
expectParseError(t, "'use strict'; function f(a, a) {}", bindingError)
expectParseError(t, "'use strict'; (function(a, a) {})", bindingError)
expectParseError(t, "'use strict'; ((a, a) => {})", bindingError)
expectParseError(t, "function f(a, a) {}; export {}", bindingError)
expectParseError(t, "(function(a, a) {}); export {}", bindingError)
expectParseError(t, "(function(a, [a]) {})", bindingError)
expectParseError(t, "({ f(a, a) {} })", bindingError)
expectParseError(t, "({ *f(a, a) {} })", bindingError)
expectParseError(t, "({ async f(a, a) {} })", bindingError)
expectParseError(t, "(a, a) => {}", bindingError)
expectParseError(t, "'use strict'; if (0) function f() {}",
"<stdin>: ERROR: Function declarations inside if statements cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; if (0) ; else function f() {}",
"<stdin>: ERROR: Function declarations inside if statements cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; x: function f() {}",
"<stdin>: ERROR: Function declarations inside labels cannot be used in strict mode\n"+useStrict)
expectParseError(t, "if (0) function f() {} export {}",
"<stdin>: ERROR: Function declarations inside if statements cannot be used in an ECMAScript module\n"+why)
expectParseError(t, "if (0) ; else function f() {} export {}",
"<stdin>: ERROR: Function declarations inside if statements cannot be used in an ECMAScript module\n"+why)
expectParseError(t, "x: function f() {} export {}",
"<stdin>: ERROR: Function declarations inside labels cannot be used in an ECMAScript module\n"+why)
expectPrinted(t, "eval++", "eval++;\n")
expectPrinted(t, "eval = 0", "eval = 0;\n")
expectPrinted(t, "eval += 0", "eval += 0;\n")
expectPrinted(t, "[eval] = 0", "[eval] = 0;\n")
expectPrinted(t, "arguments++", "arguments++;\n")
expectPrinted(t, "arguments = 0", "arguments = 0;\n")
expectPrinted(t, "arguments += 0", "arguments += 0;\n")
expectPrinted(t, "[arguments] = 0", "[arguments] = 0;\n")
expectParseError(t, "'use strict'; eval++", "<stdin>: ERROR: Invalid assignment target\n")
expectParseError(t, "'use strict'; eval = 0", "<stdin>: ERROR: Invalid assignment target\n")
expectParseError(t, "'use strict'; eval += 0", "<stdin>: ERROR: Invalid assignment target\n")
expectParseError(t, "'use strict'; [eval] = 0", "<stdin>: ERROR: Invalid assignment target\n")
expectParseError(t, "'use strict'; arguments++", "<stdin>: ERROR: Invalid assignment target\n")
expectParseError(t, "'use strict'; arguments = 0", "<stdin>: ERROR: Invalid assignment target\n")
expectParseError(t, "'use strict'; arguments += 0", "<stdin>: ERROR: Invalid assignment target\n")
expectParseError(t, "'use strict'; [arguments] = 0", "<stdin>: ERROR: Invalid assignment target\n")
evalDecl := "<stdin>: ERROR: Declarations with the name \"eval\" cannot be used in strict mode\n" + useStrict
argsDecl := "<stdin>: ERROR: Declarations with the name \"arguments\" cannot be used in strict mode\n" + useStrict
expectPrinted(t, "function eval() {}", "function eval() {\n}\n")
expectPrinted(t, "function arguments() {}", "function arguments() {\n}\n")
expectPrinted(t, "function f(eval) {}", "function f(eval) {\n}\n")
expectPrinted(t, "function f(arguments) {}", "function f(arguments) {\n}\n")
expectPrinted(t, "({ f(eval) {} })", "({ f(eval) {\n} });\n")
expectPrinted(t, "({ f(arguments) {} })", "({ f(arguments) {\n} });\n")
expectParseError(t, "'use strict'; function eval() {}", evalDecl)
expectParseError(t, "'use strict'; function arguments() {}", argsDecl)
expectParseError(t, "'use strict'; function f(eval) {}", evalDecl)
expectParseError(t, "'use strict'; function f(arguments) {}", argsDecl)
expectParseError(t, "function eval() { 'use strict' }", evalDecl)
expectParseError(t, "function arguments() { 'use strict' }", argsDecl)
expectParseError(t, "function f(eval) { 'use strict' }", evalDecl)
expectParseError(t, "function f(arguments) { 'use strict' }", argsDecl)
expectParseError(t, "({ f(eval) { 'use strict' } })", evalDecl)
expectParseError(t, "({ f(arguments) { 'use strict' } })", argsDecl)
expectParseError(t, "'use strict'; class eval {}", evalDecl)
expectParseError(t, "'use strict'; class arguments {}", argsDecl)
expectPrinted(t, "let protected", "let protected;\n")
expectPrinted(t, "let protecte\\u0064", "let protected;\n")
expectPrinted(t, "let x = protected", "let x = protected;\n")
expectPrinted(t, "let x = protecte\\u0064", "let x = protected;\n")
expectParseError(t, "'use strict'; let protected",
"<stdin>: ERROR: \"protected\" is a reserved word and cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; let protecte\\u0064",
"<stdin>: ERROR: \"protected\" is a reserved word and cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; let x = protected",
"<stdin>: ERROR: \"protected\" is a reserved word and cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; let x = protecte\\u0064",
"<stdin>: ERROR: \"protected\" is a reserved word and cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; protected: 0",
"<stdin>: ERROR: \"protected\" is a reserved word and cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; protecte\\u0064: 0",
"<stdin>: ERROR: \"protected\" is a reserved word and cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; function protected() {}",
"<stdin>: ERROR: \"protected\" is a reserved word and cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; function protecte\\u0064() {}",
"<stdin>: ERROR: \"protected\" is a reserved word and cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; (function protected() {})",
"<stdin>: ERROR: \"protected\" is a reserved word and cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; (function protecte\\u0064() {})",
"<stdin>: ERROR: \"protected\" is a reserved word and cannot be used in strict mode\n"+useStrict)
expectPrinted(t, "0123", "83;\n")
expectPrinted(t, "({0123: 4})", "({ 83: 4 });\n")
expectPrinted(t, "let {0123: x} = y", "let { 83: x } = y;\n")
expectParseError(t, "'use strict'; 0123",
"<stdin>: ERROR: Legacy octal literals cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; ({0123: 4})",
"<stdin>: ERROR: Legacy octal literals cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; let {0123: x} = y",
"<stdin>: ERROR: Legacy octal literals cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; 08",
"<stdin>: ERROR: Legacy octal literals cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; ({08: 4})",
"<stdin>: ERROR: Legacy octal literals cannot be used in strict mode\n"+useStrict)
expectParseError(t, "'use strict'; let {08: x} = y",
"<stdin>: ERROR: Legacy octal literals cannot be used in strict mode\n"+useStrict)
classNote := "<stdin>: NOTE: All code inside a class is implicitly in strict mode\n"
expectPrinted(t, "function f() { 'use strict' } with (x) y", "function f() {\n \"use strict\";\n}\nwith (x) y;\n")
expectPrinted(t, "with (x) y; function f() { 'use strict' }", "with (x) y;\nfunction f() {\n \"use strict\";\n}\n")
expectPrinted(t, "class f {} with (x) y", "class f {\n}\nwith (x) y;\n")
expectPrinted(t, "with (x) y; class f {}", "with (x) y;\nclass f {\n}\n")
expectPrinted(t, "`use strict`; with (x) y", "`use strict`;\nwith (x) y;\n")
expectPrinted(t, "{ 'use strict'; with (x) y }", "{\n \"use strict\";\n with (x) y;\n}\n")
expectPrinted(t, "if (0) { 'use strict'; with (x) y }", "if (0) {\n \"use strict\";\n with (x) y;\n}\n")
expectPrinted(t, "while (0) { 'use strict'; with (x) y }", "while (0) {\n \"use strict\";\n with (x) y;\n}\n")
expectPrinted(t, "try { 'use strict'; with (x) y } catch {}", "try {\n \"use strict\";\n with (x) y;\n} catch {\n}\n")
expectPrinted(t, "try {} catch { 'use strict'; with (x) y }", "try {\n} catch {\n \"use strict\";\n with (x) y;\n}\n")
expectPrinted(t, "try {} finally { 'use strict'; with (x) y }", "try {\n} finally {\n \"use strict\";\n with (x) y;\n}\n")
expectParseError(t, "\"use strict\"; with (x) y", "<stdin>: ERROR: With statements cannot be used in strict mode\n"+useStrict)
expectParseError(t, "function f() { 'use strict'; with (x) y }", "<stdin>: ERROR: With statements cannot be used in strict mode\n"+useStrict)
expectParseError(t, "function f() { 'use strict'; function y() { with (x) y } }", "<stdin>: ERROR: With statements cannot be used in strict mode\n"+useStrict)
expectParseError(t, "class f { x() { with (x) y } }", "<stdin>: ERROR: With statements cannot be used in strict mode\n"+classNote)
expectParseError(t, "class f { x() { function y() { with (x) y } } }", "<stdin>: ERROR: With statements cannot be used in strict mode\n"+classNote)
expectParseError(t, "class f { x() { function protected() {} } }", "<stdin>: ERROR: \"protected\" is a reserved word and cannot be used in strict mode\n"+classNote)
reservedWordExport := "<stdin>: ERROR: \"protected\" is a reserved word and cannot be used in an ECMAScript module\n" +
why
expectParseError(t, "var protected; export {}", reservedWordExport)
expectParseError(t, "class protected {} export {}", reservedWordExport)
expectParseError(t, "(class protected {}); export {}", reservedWordExport)
expectParseError(t, "function protected() {} export {}", reservedWordExport)
expectParseError(t, "(function protected() {}); export {}", reservedWordExport)
importMeta := "<stdin>: ERROR: With statements cannot be used in an ECMAScript module\n" +
"<stdin>: NOTE: This file is considered to be an ECMAScript module because of the use of \"import.meta\" here:\n"
importStatement := "<stdin>: ERROR: With statements cannot be used in an ECMAScript module\n" +
"<stdin>: NOTE: This file is considered to be an ECMAScript module because of the \"import\" keyword here:\n"
exportKeyword := "<stdin>: ERROR: With statements cannot be used in an ECMAScript module\n" +
"<stdin>: NOTE: This file is considered to be an ECMAScript module because of the \"export\" keyword here:\n"
tlaKeyword := "<stdin>: ERROR: With statements cannot be used in an ECMAScript module\n" +
"<stdin>: NOTE: This file is considered to be an ECMAScript module because of the top-level \"await\" keyword here:\n"
expectPrinted(t, "import(x); with (y) z", "import(x);\nwith (y) z;\n")
expectPrinted(t, "import('x'); with (y) z", "import(\"x\");\nwith (y) z;\n")
expectPrinted(t, "with (y) z; import(x)", "with (y) z;\nimport(x);\n")
expectPrinted(t, "with (y) z; import('x')", "with (y) z;\nimport(\"x\");\n")
expectPrinted(t, "(import(x)); with (y) z", "import(x);\nwith (y) z;\n")
expectPrinted(t, "(import('x')); with (y) z", "import(\"x\");\nwith (y) z;\n")
expectPrinted(t, "with (y) z; (import(x))", "with (y) z;\nimport(x);\n")
expectPrinted(t, "with (y) z; (import('x'))", "with (y) z;\nimport(\"x\");\n")
expectParseError(t, "import.meta; with (y) z", importMeta)
expectParseError(t, "with (y) z; import.meta", importMeta)
expectParseError(t, "(import.meta); with (y) z", importMeta)
expectParseError(t, "with (y) z; (import.meta)", importMeta)
expectParseError(t, "import 'x'; with (y) z", importStatement)
expectParseError(t, "import * as x from 'x'; with (y) z", importStatement)
expectParseError(t, "import x from 'x'; with (y) z", importStatement)
expectParseError(t, "import {x} from 'x'; with (y) z", importStatement)
expectParseError(t, "export {}; with (y) z", exportKeyword)
expectParseError(t, "export let x; with (y) z", exportKeyword)
expectParseError(t, "export function x() {} with (y) z", exportKeyword)
expectParseError(t, "export class x {} with (y) z", exportKeyword)
expectParseError(t, "await 0; with (y) z", tlaKeyword)
expectParseError(t, "with (y) z; await 0", tlaKeyword)
expectParseError(t, "for await (x of y); with (y) z", tlaKeyword)
expectParseError(t, "with (y) z; for await (x of y);", tlaKeyword)
expectParseError(t, "await using x = _; with (y) z", tlaKeyword)
expectParseError(t, "with (y) z; await using x = _", tlaKeyword)
expectParseError(t, "for (await using x of _) ; with (y) z", tlaKeyword)
expectParseError(t, "with (y) z; for (await using x of _) ;", tlaKeyword)
fAlreadyDeclaredError := "<stdin>: ERROR: The symbol \"f\" has already been declared\n" +
"<stdin>: NOTE: The symbol \"f\" was originally declared here:\n"
nestedNote := "<stdin>: NOTE: Duplicate function declarations are not allowed in nested blocks"
moduleNote := "<stdin>: NOTE: Duplicate top-level function declarations are not allowed in an ECMAScript module. " +
"This file is considered to be an ECMAScript module because of the \"export\" keyword here:\n"
cases := []string{
"function f() {} function f() {}",
"function f() {} function *f() {}",
"function *f() {} function f() {}",
"function f() {} async function f() {}",
"async function f() {} function f() {}",
"function f() {} async function *f() {}",
"async function *f() {} function f() {}",
}
for _, c := range cases {
expectParseError(t, c, "")
expectParseError(t, "'use strict'; "+c, "")
expectParseError(t, "function foo() { 'use strict'; "+c+" }", "")
}
expectParseError(t, "function f() {} function f() {} export {}", fAlreadyDeclaredError+moduleNote)
expectParseError(t, "function f() {} function *f() {} export {}", fAlreadyDeclaredError+moduleNote)
expectParseError(t, "function f() {} async function f() {} export {}", fAlreadyDeclaredError+moduleNote)
expectParseError(t, "function *f() {} function f() {} export {}", fAlreadyDeclaredError+moduleNote)
expectParseError(t, "async function f() {} function f() {} export {}", fAlreadyDeclaredError+moduleNote)
expectParseError(t, "'use strict'; { function f() {} function f() {} }",
fAlreadyDeclaredError+nestedNote+" in strict mode. Strict mode is triggered by the \"use strict\" directive here:\n")
expectParseError(t, "'use strict'; switch (0) { case 1: function f() {} default: function f() {} }",
fAlreadyDeclaredError+nestedNote+" in strict mode. Strict mode is triggered by the \"use strict\" directive here:\n")
expectParseError(t, "function foo() { 'use strict'; { function f() {} function f() {} } }",
fAlreadyDeclaredError+nestedNote+" in strict mode. Strict mode is triggered by the \"use strict\" directive here:\n")
expectParseError(t, "function foo() { 'use strict'; switch (0) { case 1: function f() {} default: function f() {} } }",
fAlreadyDeclaredError+nestedNote+" in strict mode. Strict mode is triggered by the \"use strict\" directive here:\n")
expectParseError(t, "{ function f() {} function f() {} } export {}",
fAlreadyDeclaredError+nestedNote+" in an ECMAScript module. This file is considered to be an ECMAScript module because of the \"export\" keyword here:\n")
expectParseError(t, "switch (0) { case 1: function f() {} default: function f() {} } export {}",
fAlreadyDeclaredError+nestedNote+" in an ECMAScript module. This file is considered to be an ECMAScript module because of the \"export\" keyword here:\n")
expectParseError(t, "var x; var x", "")
expectParseError(t, "'use strict'; var x; var x", "")
expectParseError(t, "var x; var x; export {}", "")
}
func TestExponentiation(t *testing.T) {
expectPrinted(t, "--x ** 2", "--x ** 2;\n")
expectPrinted(t, "++x ** 2", "++x ** 2;\n")
expectPrinted(t, "x-- ** 2", "x-- ** 2;\n")
expectPrinted(t, "x++ ** 2", "x++ ** 2;\n")
expectPrinted(t, "(-x) ** 2", "(-x) ** 2;\n")
expectPrinted(t, "(+x) ** 2", "(+x) ** 2;\n")
expectPrinted(t, "(~x) ** 2", "(~x) ** 2;\n")
expectPrinted(t, "(!x) ** 2", "(!x) ** 2;\n")
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_parser/global_name_parser.go | internal/js_parser/global_name_parser.go | package js_parser
import (
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_lexer"
"github.com/evanw/esbuild/internal/logger"
)
func ParseGlobalName(log logger.Log, source logger.Source) (result []string, ok bool) {
ok = true
defer func() {
r := recover()
if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
ok = false
} else if r != nil {
panic(r)
}
}()
lexer := js_lexer.NewLexerGlobalName(log, source)
// Start off with an identifier or a keyword that results in an object
result = append(result, lexer.Identifier.String)
switch lexer.Token {
case js_lexer.TThis:
lexer.Next()
case js_lexer.TImport:
// Handle "import.meta"
lexer.Next()
lexer.Expect(js_lexer.TDot)
result = append(result, lexer.Identifier.String)
lexer.ExpectContextualKeyword("meta")
default:
lexer.Expect(js_lexer.TIdentifier)
}
// Follow with dot or index expressions
for lexer.Token != js_lexer.TEndOfFile {
switch lexer.Token {
case js_lexer.TDot:
lexer.Next()
if !lexer.IsIdentifierOrKeyword() {
lexer.Expect(js_lexer.TIdentifier)
}
result = append(result, lexer.Identifier.String)
lexer.Next()
case js_lexer.TOpenBracket:
lexer.Next()
result = append(result, helpers.UTF16ToString(lexer.StringLiteral()))
lexer.Expect(js_lexer.TStringLiteral)
lexer.Expect(js_lexer.TCloseBracket)
default:
lexer.Expect(js_lexer.TDot)
}
}
return
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_parser/json_parser_test.go | internal/js_parser/json_parser_test.go | package js_parser
import (
"fmt"
"strings"
"testing"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_printer"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/test"
)
func expectParseErrorJSON(t *testing.T, contents string, expected string) {
t.Helper()
t.Run(contents, func(t *testing.T) {
t.Helper()
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
ParseJSON(log, test.SourceForTest(contents), JSONOptions{})
msgs := log.Done()
var text strings.Builder
for _, msg := range msgs {
text.WriteString(msg.String(logger.OutputOptions{}, logger.TerminalInfo{}))
}
test.AssertEqualWithDiff(t, text.String(), expected)
})
}
// Note: The input is parsed as JSON but printed as JS. This means the printed
// code may not be valid JSON. That's ok because esbuild always outputs JS
// bundles, not JSON bundles.
func expectPrintedJSON(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedJSONWithWarning(t, contents, "", expected)
}
func expectPrintedJSONWithWarning(t *testing.T, contents string, warning string, expected string) {
t.Helper()
t.Run(contents, func(t *testing.T) {
t.Helper()
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
expr, ok := ParseJSON(log, test.SourceForTest(contents), JSONOptions{})
msgs := log.Done()
var text strings.Builder
for _, msg := range msgs {
text.WriteString(msg.String(logger.OutputOptions{}, logger.TerminalInfo{}))
}
test.AssertEqualWithDiff(t, text.String(), warning)
if !ok {
t.Fatal("Parse error")
}
// Insert this expression into a statement
tree := js_ast.AST{
Parts: []js_ast.Part{{Stmts: []js_ast.Stmt{{Data: &js_ast.SExpr{Value: expr}}}}},
}
js := js_printer.Print(tree, ast.SymbolMap{}, nil, js_printer.Options{
MinifyWhitespace: true,
}).JS
// Remove the trailing semicolon
if n := len(js); n > 1 && js[n-1] == ';' {
js = js[:n-1]
}
test.AssertEqualWithDiff(t, string(js), expected)
})
}
func TestJSONAtom(t *testing.T) {
expectPrintedJSON(t, "false", "false")
expectPrintedJSON(t, "true", "true")
expectPrintedJSON(t, "null", "null")
expectParseErrorJSON(t, "undefined", "<stdin>: ERROR: Unexpected \"undefined\" in JSON\n")
}
func TestJSONString(t *testing.T) {
expectPrintedJSON(t, "\"x\"", "\"x\"")
expectParseErrorJSON(t, "'x'", "<stdin>: ERROR: JSON strings must use double quotes\n")
expectParseErrorJSON(t, "`x`", "<stdin>: ERROR: Unexpected \"`x`\" in JSON\n")
// Newlines
expectPrintedJSON(t, "\"\u2028\"", "\"\\u2028\"")
expectPrintedJSON(t, "\"\u2029\"", "\"\\u2029\"")
expectParseErrorJSON(t, "\"\r\"", "<stdin>: ERROR: Unterminated string literal\n")
expectParseErrorJSON(t, "\"\n\"", "<stdin>: ERROR: Unterminated string literal\n")
// Control characters
for c := 0; c < 0x20; c++ {
if c != '\r' && c != '\n' {
expectParseErrorJSON(t, fmt.Sprintf("\"%c\"", c),
fmt.Sprintf("<stdin>: ERROR: Syntax error \"\\x%02X\"\n", c))
}
}
// Valid escapes
expectPrintedJSON(t, "\"\\\"\"", "'\"'")
expectPrintedJSON(t, "\"\\\\\"", "\"\\\\\"")
expectPrintedJSON(t, "\"\\/\"", "\"/\"")
expectPrintedJSON(t, "\"\\b\"", "\"\\b\"")
expectPrintedJSON(t, "\"\\f\"", "\"\\f\"")
expectPrintedJSON(t, "\"\\n\"", "\"\\n\"")
expectPrintedJSON(t, "\"\\r\"", "\"\\r\"")
expectPrintedJSON(t, "\"\\t\"", "\"\t\"")
expectPrintedJSON(t, "\"\\u0000\"", "\"\\0\"")
expectPrintedJSON(t, "\"\\u0078\"", "\"x\"")
expectPrintedJSON(t, "\"\\u1234\"", "\"\u1234\"")
expectPrintedJSON(t, "\"\\uD800\"", "\"\\uD800\"")
expectPrintedJSON(t, "\"\\uDC00\"", "\"\\uDC00\"")
// Invalid escapes
expectParseErrorJSON(t, "\"\\", "<stdin>: ERROR: Unterminated string literal\n")
expectParseErrorJSON(t, "\"\\0\"", "<stdin>: ERROR: Syntax error \"0\"\n")
expectParseErrorJSON(t, "\"\\1\"", "<stdin>: ERROR: Syntax error \"1\"\n")
expectParseErrorJSON(t, "\"\\'\"", "<stdin>: ERROR: Syntax error \"'\"\n")
expectParseErrorJSON(t, "\"\\a\"", "<stdin>: ERROR: Syntax error \"a\"\n")
expectParseErrorJSON(t, "\"\\v\"", "<stdin>: ERROR: Syntax error \"v\"\n")
expectParseErrorJSON(t, "\"\\\n\"", "<stdin>: ERROR: Syntax error \"\\x0A\"\n")
expectParseErrorJSON(t, "\"\\x78\"", "<stdin>: ERROR: Syntax error \"x\"\n")
expectParseErrorJSON(t, "\"\\u{1234}\"", "<stdin>: ERROR: Syntax error \"{\"\n")
expectParseErrorJSON(t, "\"\\uG\"", "<stdin>: ERROR: Syntax error \"G\"\n")
expectParseErrorJSON(t, "\"\\uDG\"", "<stdin>: ERROR: Syntax error \"G\"\n")
expectParseErrorJSON(t, "\"\\uDEG\"", "<stdin>: ERROR: Syntax error \"G\"\n")
expectParseErrorJSON(t, "\"\\uDEFG\"", "<stdin>: ERROR: Syntax error \"G\"\n")
expectParseErrorJSON(t, "\"\\u\"", "<stdin>: ERROR: Syntax error '\"'\n")
expectParseErrorJSON(t, "\"\\uD\"", "<stdin>: ERROR: Syntax error '\"'\n")
expectParseErrorJSON(t, "\"\\uDE\"", "<stdin>: ERROR: Syntax error '\"'\n")
expectParseErrorJSON(t, "\"\\uDEF\"", "<stdin>: ERROR: Syntax error '\"'\n")
}
func TestJSONNumber(t *testing.T) {
expectPrintedJSON(t, "0", "0")
expectPrintedJSON(t, "-0", "-0")
expectPrintedJSON(t, "123", "123")
expectPrintedJSON(t, "123.456", "123.456")
expectPrintedJSON(t, "123e20", "123e20")
expectPrintedJSON(t, "123e-20", "123e-20")
expectParseErrorJSON(t, "123.", "<stdin>: ERROR: Unexpected \"123.\" in JSON\n")
expectParseErrorJSON(t, "-123.", "<stdin>: ERROR: Unexpected \"123.\" in JSON\n")
expectParseErrorJSON(t, ".123", "<stdin>: ERROR: Unexpected \".123\" in JSON\n")
expectParseErrorJSON(t, "-.123", "<stdin>: ERROR: Unexpected \".123\" in JSON\n")
expectParseErrorJSON(t, "NaN", "<stdin>: ERROR: Unexpected \"NaN\" in JSON\n")
expectParseErrorJSON(t, "Infinity", "<stdin>: ERROR: Unexpected \"Infinity\" in JSON\n")
expectParseErrorJSON(t, "-Infinity", "<stdin>: ERROR: Unexpected \"-\" in JSON\n")
expectParseErrorJSON(t, "+1", "<stdin>: ERROR: Unexpected \"+\" in JSON\n")
expectParseErrorJSON(t, "- 1", "<stdin>: ERROR: Unexpected \"-\" in JSON\n")
expectParseErrorJSON(t, "01", "<stdin>: ERROR: Unexpected \"01\" in JSON\n")
expectParseErrorJSON(t, "0b1", "<stdin>: ERROR: Unexpected \"0b1\" in JSON\n")
expectParseErrorJSON(t, "0o1", "<stdin>: ERROR: Unexpected \"0o1\" in JSON\n")
expectParseErrorJSON(t, "0x1", "<stdin>: ERROR: Unexpected \"0x1\" in JSON\n")
expectParseErrorJSON(t, "0n", "<stdin>: ERROR: Unexpected \"0n\" in JSON\n")
expectParseErrorJSON(t, "-01", "<stdin>: ERROR: Unexpected \"01\" in JSON\n")
expectParseErrorJSON(t, "-0b1", "<stdin>: ERROR: Unexpected \"0b1\" in JSON\n")
expectParseErrorJSON(t, "-0o1", "<stdin>: ERROR: Unexpected \"0o1\" in JSON\n")
expectParseErrorJSON(t, "-0x1", "<stdin>: ERROR: Unexpected \"0x1\" in JSON\n")
expectParseErrorJSON(t, "-0n", "<stdin>: ERROR: Expected number in JSON but found \"0n\"\n")
expectParseErrorJSON(t, "1_2", "<stdin>: ERROR: Unexpected \"1_2\" in JSON\n")
expectParseErrorJSON(t, "1.e2", "<stdin>: ERROR: Unexpected \"1.e2\" in JSON\n")
}
func TestJSONObject(t *testing.T) {
expectPrintedJSON(t, "{\"x\":0}", "({x:0})")
expectPrintedJSON(t, "{\"x\":0,\"y\":1}", "({x:0,y:1})")
expectPrintedJSONWithWarning(t,
"{\"x\":0,\"x\":1}",
"<stdin>: WARNING: Duplicate key \"x\" in object literal\n<stdin>: NOTE: The original key \"x\" is here:\n",
"({x:0,x:1})")
expectParseErrorJSON(t, "{\"x\":0,}", "<stdin>: ERROR: JSON does not support trailing commas\n")
expectParseErrorJSON(t, "{x:0}", "<stdin>: ERROR: Expected string in JSON but found \"x\"\n")
expectParseErrorJSON(t, "{1:0}", "<stdin>: ERROR: Expected string in JSON but found \"1\"\n")
expectParseErrorJSON(t, "{[\"x\"]:0}", "<stdin>: ERROR: Expected string in JSON but found \"[\"\n")
}
func TestJSONArray(t *testing.T) {
expectPrintedJSON(t, "[]", "[]")
expectPrintedJSON(t, "[1]", "[1]")
expectPrintedJSON(t, "[1,2]", "[1,2]")
expectParseErrorJSON(t, "[,]", "<stdin>: ERROR: Unexpected \",\" in JSON\n")
expectParseErrorJSON(t, "[,1]", "<stdin>: ERROR: Unexpected \",\" in JSON\n")
expectParseErrorJSON(t, "[1,]", "<stdin>: ERROR: JSON does not support trailing commas\n")
expectParseErrorJSON(t, "[1,,2]", "<stdin>: ERROR: Unexpected \",\" in JSON\n")
}
func TestJSONInvalid(t *testing.T) {
expectParseErrorJSON(t, "({\"x\":0})", "<stdin>: ERROR: Unexpected \"(\" in JSON\n")
expectParseErrorJSON(t, "{\"x\":(0)}", "<stdin>: ERROR: Unexpected \"(\" in JSON\n")
expectParseErrorJSON(t, "#!/usr/bin/env node\n{}", "<stdin>: ERROR: Unexpected \"#!/usr/bin/env node\" in JSON\n")
expectParseErrorJSON(t, "{\"x\":0}{\"y\":1}", "<stdin>: ERROR: Expected end of file in JSON but found \"{\"\n")
}
func TestJSONComments(t *testing.T) {
expectParseErrorJSON(t, "/*comment*/{}", "<stdin>: ERROR: JSON does not support comments\n")
expectParseErrorJSON(t, "//comment\n{}", "<stdin>: ERROR: JSON does not support comments\n")
expectParseErrorJSON(t, "{/*comment*/}", "<stdin>: ERROR: JSON does not support comments\n")
expectParseErrorJSON(t, "{//comment\n}", "<stdin>: ERROR: JSON does not support comments\n")
expectParseErrorJSON(t, "{}/*comment*/", "<stdin>: ERROR: JSON does not support comments\n")
expectParseErrorJSON(t, "{}//comment\n", "<stdin>: ERROR: JSON does not support comments\n")
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_parser/ts_parser_test.go | internal/js_parser/ts_parser_test.go | package js_parser
import (
"testing"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/config"
)
func expectParseErrorTS(t *testing.T, contents string, expected string) {
t.Helper()
expectParseErrorCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
},
})
}
func expectParseErrorExperimentalDecoratorTS(t *testing.T, contents string, expected string) {
t.Helper()
expectParseErrorCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
Config: config.TSConfig{
ExperimentalDecorators: config.True,
},
},
})
}
func expectPrintedWithUnsupportedFeaturesTS(t *testing.T, unsupportedJSFeatures compat.JSFeature, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
},
UnsupportedJSFeatures: unsupportedJSFeatures,
})
}
func expectParseErrorTargetTS(t *testing.T, esVersion int, contents string, expected string) {
t.Helper()
expectParseErrorCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
},
UnsupportedJSFeatures: compat.UnsupportedJSFeatures(map[compat.Engine]compat.Semver{
compat.ES: {Parts: []int{esVersion}},
}),
})
}
func expectPrintedTS(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
},
})
}
func expectPrintedAssignSemanticsTS(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
Config: config.TSConfig{
UseDefineForClassFields: config.False,
},
},
})
}
func expectPrintedAssignSemanticsTargetTS(t *testing.T, esVersion int, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
Config: config.TSConfig{
UseDefineForClassFields: config.False,
},
},
UnsupportedJSFeatures: compat.UnsupportedJSFeatures(map[compat.Engine]compat.Semver{
compat.ES: {Parts: []int{esVersion}},
}),
})
}
func expectPrintedExperimentalDecoratorTS(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
Config: config.TSConfig{
ExperimentalDecorators: config.True,
},
},
})
}
func expectPrintedMangleTS(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
},
MinifySyntax: true,
})
}
func expectPrintedMangleAssignSemanticsTS(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
Config: config.TSConfig{
UseDefineForClassFields: config.False,
},
},
MinifySyntax: true,
})
}
func expectPrintedTargetTS(t *testing.T, esVersion int, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
},
UnsupportedJSFeatures: compat.UnsupportedJSFeatures(map[compat.Engine]compat.Semver{
compat.ES: {Parts: []int{esVersion}},
}),
})
}
func expectPrintedTargetExperimentalDecoratorTS(t *testing.T, esVersion int, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
Config: config.TSConfig{
ExperimentalDecorators: config.True,
},
},
UnsupportedJSFeatures: compat.UnsupportedJSFeatures(map[compat.Engine]compat.Semver{
compat.ES: {Parts: []int{esVersion}},
}),
})
}
func expectParseErrorTSNoAmbiguousLessThan(t *testing.T, contents string, expected string) {
t.Helper()
expectParseErrorCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
NoAmbiguousLessThan: true,
},
})
}
func expectPrintedTSNoAmbiguousLessThan(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
NoAmbiguousLessThan: true,
},
})
}
func expectParseErrorTSX(t *testing.T, contents string, expected string) {
t.Helper()
expectParseErrorCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
},
JSX: config.JSXOptions{
Parse: true,
},
})
}
func expectPrintedTSX(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, expected, config.Options{
TS: config.TSOptions{
Parse: true,
},
JSX: config.JSXOptions{
Parse: true,
},
})
}
func TestTSTypes(t *testing.T) {
expectPrintedTS(t, "let x: T extends number\n ? T\n : number", "let x;\n")
expectPrintedTS(t, "let x: {y: T extends number ? T : number}", "let x;\n")
expectPrintedTS(t, "let x: {y: T \n extends: number}", "let x;\n")
expectPrintedTS(t, "let x: {y: T \n extends?: number}", "let x;\n")
expectPrintedTS(t, "let x: (number | string)[]", "let x;\n")
expectPrintedTS(t, "let x: [string[]?]", "let x;\n")
expectPrintedTS(t, "let x: [number?, string?]", "let x;\n")
expectPrintedTS(t, "let x: [a: number, b?: string, ...c: number[]]", "let x;\n")
expectPrintedTS(t, "type x =\n A\n | B\n C", "C;\n")
expectPrintedTS(t, "type x =\n | A\n | B\n C", "C;\n")
expectPrintedTS(t, "type x =\n A\n & B\n C", "C;\n")
expectPrintedTS(t, "type x =\n & A\n & B\n C", "C;\n")
expectPrintedTS(t, "type x = [-1, 0, 1]\n[]", "[];\n")
expectPrintedTS(t, "type x = [-1n, 0n, 1n]\n[]", "[];\n")
expectPrintedTS(t, "type x = {0: number, readonly 1: boolean}\n[]", "[];\n")
expectPrintedTS(t, "type x = {'a': number, readonly 'b': boolean}\n[]", "[];\n")
expectPrintedTS(t, "type\nFoo = {}", "type;\nFoo = {};\n")
expectPrintedTS(t, "export type\n{ Foo } \n x", "x;\n")
expectPrintedTS(t, "export type\n* from 'foo' \n x", "x;\n")
expectPrintedTS(t, "export type\n* as ns from 'foo' \n x", "x;\n")
expectParseErrorTS(t, "export type\nFoo = {}", "<stdin>: ERROR: Unexpected newline after \"type\"\n")
expectPrintedTS(t, "let x: {x: 'a', y: false, z: null}", "let x;\n")
expectPrintedTS(t, "let x: {foo(): void}", "let x;\n")
expectPrintedTS(t, "let x: {['x']: number}", "let x;\n")
expectPrintedTS(t, "let x: {['x'](): void}", "let x;\n")
expectPrintedTS(t, "let x: {[key: string]: number}", "let x;\n")
expectPrintedTS(t, "let x: {[keyof: string]: number}", "let x;\n")
expectPrintedTS(t, "let x: {[readonly: string]: number}", "let x;\n")
expectPrintedTS(t, "let x: {[infer: string]: number}", "let x;\n")
expectPrintedTS(t, "let x: [keyof: string]", "let x;\n")
expectPrintedTS(t, "let x: [readonly: string]", "let x;\n")
expectPrintedTS(t, "let x: [infer: string]", "let x;\n")
expectParseErrorTS(t, "let x: A extends B ? keyof : string", "<stdin>: ERROR: Unexpected \":\"\n")
expectParseErrorTS(t, "let x: A extends B ? readonly : string", "<stdin>: ERROR: Unexpected \":\"\n")
expectParseErrorTS(t, "let x: A extends B ? infer : string", "<stdin>: ERROR: Expected identifier but found \":\"\n")
expectParseErrorTS(t, "let x: {[new: string]: number}", "<stdin>: ERROR: Expected \"(\" but found \":\"\n")
expectParseErrorTS(t, "let x: {[import: string]: number}", "<stdin>: ERROR: Expected \"(\" but found \":\"\n")
expectParseErrorTS(t, "let x: {[typeof: string]: number}", "<stdin>: ERROR: Expected identifier but found \":\"\n")
expectPrintedTS(t, "let x: () => void = Foo", "let x = Foo;\n")
expectPrintedTS(t, "let x: new () => void = Foo", "let x = Foo;\n")
expectPrintedTS(t, "let x = 'x' as keyof T", "let x = \"x\";\n")
expectPrintedTS(t, "let x = [1] as readonly [number]", "let x = [1];\n")
expectPrintedTS(t, "let x = 'x' as keyof typeof Foo", "let x = \"x\";\n")
expectPrintedTS(t, "let fs: typeof import('fs') = require('fs')", "let fs = require(\"fs\");\n")
expectPrintedTS(t, "let fs: typeof import('fs').exists = require('fs').exists", "let fs = require(\"fs\").exists;\n")
expectPrintedTS(t, "let fs: typeof import('fs', { assert: { type: 'json' } }) = require('fs')", "let fs = require(\"fs\");\n")
expectPrintedTS(t, "let fs: typeof import('fs', { assert: { 'resolution-mode': 'import' } }) = require('fs')", "let fs = require(\"fs\");\n")
expectPrintedTS(t, "let x: <T>() => Foo<T>", "let x;\n")
expectPrintedTS(t, "let x: new <T>() => Foo<T>", "let x;\n")
expectPrintedTS(t, "let x: <T extends object>() => Foo<T>", "let x;\n")
expectPrintedTS(t, "let x: new <T extends object>() => Foo<T>", "let x;\n")
expectPrintedTS(t, "type Foo<T> = {[P in keyof T]?: T[P]}", "")
expectPrintedTS(t, "type Foo<T> = {[P in keyof T]+?: T[P]}", "")
expectPrintedTS(t, "type Foo<T> = {[P in keyof T]-?: T[P]}", "")
expectPrintedTS(t, "type Foo<T> = {readonly [P in keyof T]: T[P]}", "")
expectPrintedTS(t, "type Foo<T> = {-readonly [P in keyof T]: T[P]}", "")
expectPrintedTS(t, "type Foo<T> = {+readonly [P in keyof T]: T[P]}", "")
expectPrintedTS(t, "type Foo<T> = {[infer in T]?: Foo}", "")
expectPrintedTS(t, "type Foo<T> = {[keyof in T]?: Foo}", "")
expectPrintedTS(t, "type Foo<T> = {[asserts in T]?: Foo}", "")
expectPrintedTS(t, "type Foo<T> = {[abstract in T]?: Foo}", "")
expectPrintedTS(t, "type Foo<T> = {[readonly in T]?: Foo}", "")
expectPrintedTS(t, "type Foo<T> = {[satisfies in T]?: Foo}", "")
expectPrintedTS(t, "let x: number! = y", "let x = y;\n")
expectPrintedTS(t, "let x: number \n !y", "let x;\n!y;\n")
expectPrintedTS(t, "const x: unique = y", "const x = y;\n")
expectPrintedTS(t, "const x: unique<T> = y", "const x = y;\n")
expectPrintedTS(t, "const x: unique\nsymbol = y", "const x = y;\n")
expectPrintedTS(t, "let x: typeof a = y", "let x = y;\n")
expectPrintedTS(t, "let x: typeof a.b = y", "let x = y;\n")
expectPrintedTS(t, "let x: typeof a.if = y", "let x = y;\n")
expectPrintedTS(t, "let x: typeof if.a = y", "let x = y;\n")
expectPrintedTS(t, "let x: typeof readonly = y", "let x = y;\n")
expectParseErrorTS(t, "let x: typeof readonly Array", "<stdin>: ERROR: Expected \";\" but found \"Array\"\n")
expectPrintedTS(t, "let x: `y`", "let x;\n")
expectParseErrorTS(t, "let x: tag`y`", "<stdin>: ERROR: Expected \";\" but found \"`y`\"\n")
expectPrintedTS(t, "let x: { <A extends B>(): c.d \n <E extends F>(): g.h }", "let x;\n")
expectPrintedTSX(t, "type x = a.b \n <c></c>", "/* @__PURE__ */ React.createElement(\"c\", null);\n")
expectPrintedTS(t, "type Foo = a.b \n | c.d", "")
expectPrintedTS(t, "type Foo = a.b \n & c.d", "")
expectPrintedTS(t, "type Foo = \n | a.b \n | c.d", "")
expectPrintedTS(t, "type Foo = \n & a.b \n & c.d", "")
expectPrintedTS(t, "type Foo = Bar extends [infer T] ? T : null", "")
expectPrintedTS(t, "type Foo = Bar extends [infer T extends string] ? T : null", "")
expectPrintedTS(t, "type Foo = {} extends infer T extends {} ? A<T> : never", "")
expectPrintedTS(t, "type Foo = {} extends (infer T extends {}) ? A<T> : never", "")
expectPrintedTS(t, "type Foo<T> = T extends { a: infer U extends number } | { b: infer U extends number } ? U : never", "")
expectPrintedTS(t, "type Foo<T> = T extends { a: infer U extends number } & { b: infer U extends number } ? U : never", "")
expectPrintedTS(t, "type Foo<T> = T extends { a: infer U extends number } | infer U extends number ? U : never", "")
expectPrintedTS(t, "type Foo<T> = T extends { a: infer U extends number } & infer U extends number ? U : never", "")
expectPrintedTS(t, "let x: A extends B<infer C extends D> ? D : never", "let x;\n")
expectPrintedTS(t, "let x: A extends B<infer C extends D ? infer C : never> ? D : never", "let x;\n")
expectPrintedTS(t, "let x: ([e1, e2, ...es]: any) => any", "let x;\n")
expectPrintedTS(t, "let x: (...[e1, e2, es]: any) => any", "let x;\n")
expectPrintedTS(t, "let x: (...[e1, e2, ...es]: any) => any", "let x;\n")
expectPrintedTS(t, "let x: (y, [e1, e2, ...es]: any) => any", "let x;\n")
expectPrintedTS(t, "let x: (y, ...[e1, e2, es]: any) => any", "let x;\n")
expectPrintedTS(t, "let x: (y, ...[e1, e2, ...es]: any) => any", "let x;\n")
expectPrintedTS(t, "let x: A.B<X.Y>", "let x;\n")
expectPrintedTS(t, "let x: A.B<X.Y>=2", "let x = 2;\n")
expectPrintedTS(t, "let x: A.B<X.Y<Z>>", "let x;\n")
expectPrintedTS(t, "let x: A.B<X.Y<Z>>=2", "let x = 2;\n")
expectPrintedTS(t, "let x: A.B<X.Y<Z<T>>>", "let x;\n")
expectPrintedTS(t, "let x: A.B<X.Y<Z<T>>>=2", "let x = 2;\n")
expectPrintedTS(t, "(): A<T>=> 0", "() => 0;\n")
expectPrintedTS(t, "(): A<B<T>>=> 0", "() => 0;\n")
expectPrintedTS(t, "(): A<B<C<T>>>=> 0", "() => 0;\n")
expectPrintedTS(t, "let foo: any\n<x>y", "let foo;\ny;\n")
expectPrintedTSX(t, "let foo: any\n<x>y</x>", "let foo;\n/* @__PURE__ */ React.createElement(\"x\", null, \"y\");\n")
expectParseErrorTS(t, "let foo: (any\n<x>y)", "<stdin>: ERROR: Expected \")\" but found \"<\"\n")
expectPrintedTS(t, "let foo = bar as (null)", "let foo = bar;\n")
expectPrintedTS(t, "let foo = bar\nas (null)", "let foo = bar;\nas(null);\n")
expectParseErrorTS(t, "let foo = (bar\nas (null))", "<stdin>: ERROR: Expected \")\" but found \"as\"\n")
expectPrintedTS(t, "a as any ? b : c;", "a ? b : c;\n")
expectPrintedTS(t, "a as any ? async () => b : c;", "a ? async () => b : c;\n")
expectPrintedTS(t, "foo as number extends Object ? any : any;", "foo;\n")
expectPrintedTS(t, "foo as number extends Object ? () => void : any;", "foo;\n")
expectPrintedTS(t, "let a = b ? c : d as T extends T ? T extends T ? T : never : never ? e : f;", "let a = b ? c : d ? e : f;\n")
expectParseErrorTS(t, "type a = b extends c", "<stdin>: ERROR: Expected \"?\" but found end of file\n")
expectParseErrorTS(t, "type a = b extends c extends d", "<stdin>: ERROR: Expected \"?\" but found \"extends\"\n")
expectParseErrorTS(t, "type a = b ? c : d", "<stdin>: ERROR: Expected \";\" but found \"?\"\n")
expectPrintedTS(t, "let foo: keyof Object = 'toString'", "let foo = \"toString\";\n")
expectPrintedTS(t, "let foo: keyof\nObject = 'toString'", "let foo = \"toString\";\n")
expectPrintedTS(t, "let foo: (keyof\nObject) = 'toString'", "let foo = \"toString\";\n")
expectPrintedTS(t, "type Foo = Array<<T>(x: T) => T>\n x", "x;\n")
expectPrintedTSX(t, "<Foo<<T>(x: T) => T>/>", "/* @__PURE__ */ React.createElement(Foo, null);\n")
expectPrintedTS(t, "interface Foo<> {}", "")
expectPrintedTSX(t, "interface Foo<> {}", "")
expectPrintedTS(t, "type Foo<> = {}", "")
expectPrintedTSX(t, "type Foo<> = {}", "")
expectParseErrorTS(t, "class Foo<> {}", "<stdin>: ERROR: Expected identifier but found \">\"\n")
expectParseErrorTSX(t, "class Foo<> {}", "<stdin>: ERROR: Expected identifier but found \">\"\n")
expectParseErrorTS(t, "class Foo { foo<>() {} }", "<stdin>: ERROR: Expected identifier but found \">\"\n")
expectParseErrorTSX(t, "class Foo { foo<>() {} }", "<stdin>: ERROR: Expected identifier but found \">\"\n")
expectParseErrorTS(t, "type Foo = { foo<>(): void }", "<stdin>: ERROR: Expected identifier but found \">\"\n")
expectParseErrorTSX(t, "type Foo = { foo<>(): void }", "<stdin>: ERROR: Expected identifier but found \">\"\n")
expectParseErrorTS(t, "type Foo = <>() => {}", "<stdin>: ERROR: Expected identifier but found \">\"\n")
expectParseErrorTSX(t, "type Foo = <>() => {}", "<stdin>: ERROR: Expected identifier but found \">\"\n")
expectParseErrorTS(t, "let Foo = <>() => {}", "<stdin>: ERROR: Unexpected \">\"\n")
expectParseErrorTSX(t, "let Foo = <>() => {}",
"<stdin>: ERROR: The character \">\" is not valid inside a JSX element\nNOTE: Did you mean to escape it as \"{'>'}\" instead?\n"+
"<stdin>: ERROR: Unexpected end of file before a closing fragment tag\n<stdin>: NOTE: The opening fragment tag is here:\n")
// Certain built-in types do not accept type parameters
expectPrintedTS(t, "x as 1 < 1", "x < 1;\n")
expectPrintedTS(t, "x as 1n < 1", "x < 1;\n")
expectPrintedTS(t, "x as -1 < 1", "x < 1;\n")
expectPrintedTS(t, "x as -1n < 1", "x < 1;\n")
expectPrintedTS(t, "x as '' < 1", "x < 1;\n")
expectPrintedTS(t, "x as `` < 1", "x < 1;\n")
expectPrintedTS(t, "x as any < 1", "x < 1;\n")
expectPrintedTS(t, "x as bigint < 1", "x < 1;\n")
expectPrintedTS(t, "x as false < 1", "x < 1;\n")
expectPrintedTS(t, "x as never < 1", "x < 1;\n")
expectPrintedTS(t, "x as null < 1", "x < 1;\n")
expectPrintedTS(t, "x as number < 1", "x < 1;\n")
expectPrintedTS(t, "x as object < 1", "x < 1;\n")
expectPrintedTS(t, "x as string < 1", "x < 1;\n")
expectPrintedTS(t, "x as symbol < 1", "x < 1;\n")
expectPrintedTS(t, "x as this < 1", "x < 1;\n")
expectPrintedTS(t, "x as true < 1", "x < 1;\n")
expectPrintedTS(t, "x as undefined < 1", "x < 1;\n")
expectPrintedTS(t, "x as unique symbol < 1", "x < 1;\n")
expectPrintedTS(t, "x as unknown < 1", "x < 1;\n")
expectPrintedTS(t, "x as void < 1", "x < 1;\n")
expectParseErrorTS(t, "x as Foo < 1", "<stdin>: ERROR: Expected \">\" but found end of file\n")
// These keywords are valid tuple labels
expectPrintedTS(t, "type _any = [any: string]", "")
expectPrintedTS(t, "type _asserts = [asserts: string]", "")
expectPrintedTS(t, "type _bigint = [bigint: string]", "")
expectPrintedTS(t, "type _boolean = [boolean: string]", "")
expectPrintedTS(t, "type _false = [false: string]", "")
expectPrintedTS(t, "type _function = [function: string]", "")
expectPrintedTS(t, "type _import = [import: string]", "")
expectPrintedTS(t, "type _infer = [infer: string]", "")
expectPrintedTS(t, "type _never = [never: string]", "")
expectPrintedTS(t, "type _new = [new: string]", "")
expectPrintedTS(t, "type _null = [null: string]", "")
expectPrintedTS(t, "type _number = [number: string]", "")
expectPrintedTS(t, "type _object = [object: string]", "")
expectPrintedTS(t, "type _readonly = [readonly: string]", "")
expectPrintedTS(t, "type _string = [string: string]", "")
expectPrintedTS(t, "type _symbol = [symbol: string]", "")
expectPrintedTS(t, "type _this = [this: string]", "")
expectPrintedTS(t, "type _true = [true: string]", "")
expectPrintedTS(t, "type _typeof = [typeof: string]", "")
expectPrintedTS(t, "type _undefined = [undefined: string]", "")
expectPrintedTS(t, "type _unique = [unique: string]", "")
expectPrintedTS(t, "type _unknown = [unknown: string]", "")
expectPrintedTS(t, "type _void = [void: string]", "")
// Also check tuple labels with a question mark
expectPrintedTS(t, "type _any = [any?: string]", "")
expectPrintedTS(t, "type _asserts = [asserts?: string]", "")
expectPrintedTS(t, "type _bigint = [bigint?: string]", "")
expectPrintedTS(t, "type _boolean = [boolean?: string]", "")
expectPrintedTS(t, "type _false = [false?: string]", "")
expectPrintedTS(t, "type _function = [function?: string]", "")
expectPrintedTS(t, "type _import = [import?: string]", "")
expectPrintedTS(t, "type _infer = [infer?: string]", "")
expectPrintedTS(t, "type _never = [never?: string]", "")
expectPrintedTS(t, "type _new = [new?: string]", "")
expectPrintedTS(t, "type _null = [null?: string]", "")
expectPrintedTS(t, "type _number = [number?: string]", "")
expectPrintedTS(t, "type _object = [object?: string]", "")
expectPrintedTS(t, "type _readonly = [readonly?: string]", "")
expectPrintedTS(t, "type _string = [string?: string]", "")
expectPrintedTS(t, "type _symbol = [symbol?: string]", "")
expectPrintedTS(t, "type _this = [this?: string]", "")
expectPrintedTS(t, "type _true = [true?: string]", "")
expectPrintedTS(t, "type _typeof = [typeof?: string]", "")
expectPrintedTS(t, "type _undefined = [undefined?: string]", "")
expectPrintedTS(t, "type _unique = [unique?: string]", "")
expectPrintedTS(t, "type _unknown = [unknown?: string]", "")
expectPrintedTS(t, "type _void = [void?: string]", "")
// These keywords are invalid tuple labels
expectParseErrorTS(t, "type _break = [break: string]", "<stdin>: ERROR: Unexpected \"break\"\n")
expectParseErrorTS(t, "type _case = [case: string]", "<stdin>: ERROR: Unexpected \"case\"\n")
expectParseErrorTS(t, "type _catch = [catch: string]", "<stdin>: ERROR: Unexpected \"catch\"\n")
expectParseErrorTS(t, "type _class = [class: string]", "<stdin>: ERROR: Unexpected \"class\"\n")
expectParseErrorTS(t, "type _const = [const: string]", "<stdin>: ERROR: Unexpected \"const\"\n")
expectParseErrorTS(t, "type _continue = [continue: string]", "<stdin>: ERROR: Unexpected \"continue\"\n")
expectParseErrorTS(t, "type _debugger = [debugger: string]", "<stdin>: ERROR: Unexpected \"debugger\"\n")
expectParseErrorTS(t, "type _default = [default: string]", "<stdin>: ERROR: Unexpected \"default\"\n")
expectParseErrorTS(t, "type _delete = [delete: string]", "<stdin>: ERROR: Unexpected \"delete\"\n")
expectParseErrorTS(t, "type _do = [do: string]", "<stdin>: ERROR: Unexpected \"do\"\n")
expectParseErrorTS(t, "type _else = [else: string]", "<stdin>: ERROR: Unexpected \"else\"\n")
expectParseErrorTS(t, "type _enum = [enum: string]", "<stdin>: ERROR: Unexpected \"enum\"\n")
expectParseErrorTS(t, "type _export = [export: string]", "<stdin>: ERROR: Unexpected \"export\"\n")
expectParseErrorTS(t, "type _extends = [extends: string]", "<stdin>: ERROR: Unexpected \"extends\"\n")
expectParseErrorTS(t, "type _finally = [finally: string]", "<stdin>: ERROR: Unexpected \"finally\"\n")
expectParseErrorTS(t, "type _for = [for: string]", "<stdin>: ERROR: Unexpected \"for\"\n")
expectParseErrorTS(t, "type _if = [if: string]", "<stdin>: ERROR: Unexpected \"if\"\n")
expectParseErrorTS(t, "type _in = [in: string]", "<stdin>: ERROR: Unexpected \"in\"\n")
expectParseErrorTS(t, "type _instanceof = [instanceof: string]", "<stdin>: ERROR: Unexpected \"instanceof\"\n")
expectParseErrorTS(t, "type _return = [return: string]", "<stdin>: ERROR: Unexpected \"return\"\n")
expectParseErrorTS(t, "type _super = [super: string]", "<stdin>: ERROR: Unexpected \"super\"\n")
expectParseErrorTS(t, "type _switch = [switch: string]", "<stdin>: ERROR: Unexpected \"switch\"\n")
expectParseErrorTS(t, "type _throw = [throw: string]", "<stdin>: ERROR: Unexpected \"throw\"\n")
expectParseErrorTS(t, "type _try = [try: string]", "<stdin>: ERROR: Unexpected \"try\"\n")
expectParseErrorTS(t, "type _var = [var: string]", "<stdin>: ERROR: Unexpected \"var\"\n")
expectParseErrorTS(t, "type _while = [while: string]", "<stdin>: ERROR: Unexpected \"while\"\n")
expectParseErrorTS(t, "type _with = [with: string]", "<stdin>: ERROR: Unexpected \"with\"\n")
// TypeScript 4.1
expectPrintedTS(t, "let foo: `${'a' | 'b'}-${'c' | 'd'}` = 'a-c'", "let foo = \"a-c\";\n")
// TypeScript 4.2
expectPrintedTS(t, "let x: abstract new () => void = Foo", "let x = Foo;\n")
expectPrintedTS(t, "let x: abstract new <T>() => Foo<T>", "let x;\n")
expectPrintedTS(t, "let x: abstract new <T extends object>() => Foo<T>", "let x;\n")
expectParseErrorTS(t, "let x: abstract () => void = Foo", "<stdin>: ERROR: Expected \";\" but found \"(\"\n")
expectParseErrorTS(t, "let x: abstract <T>() => Foo<T>", "<stdin>: ERROR: Expected \";\" but found \"(\"\n")
expectParseErrorTS(t, "let x: abstract <T extends object>() => Foo<T>", "<stdin>: ERROR: Expected \"?\" but found \">\"\n")
// TypeScript 4.7
jsxErrorArrow := "<stdin>: ERROR: The character \">\" is not valid inside a JSX element\n" +
"NOTE: Did you mean to escape it as \"{'>'}\" instead?\n"
expectPrintedTS(t, "type Foo<in T> = T", "")
expectPrintedTS(t, "type Foo<out T> = T", "")
expectPrintedTS(t, "type Foo<in out> = T", "")
expectPrintedTS(t, "type Foo<out out> = T", "")
expectPrintedTS(t, "type Foo<in out out> = T", "")
expectPrintedTS(t, "type Foo<in X, out Y> = [X, Y]", "")
expectPrintedTS(t, "type Foo<out X, in Y> = [X, Y]", "")
expectPrintedTS(t, "type Foo<out X, out Y extends keyof X> = [X, Y]", "")
expectParseErrorTS(t, "type Foo<i\\u006E T> = T", "<stdin>: ERROR: Expected identifier but found \"i\\\\u006E\"\n")
expectParseErrorTS(t, "type Foo<ou\\u0074 T> = T", "<stdin>: ERROR: Expected \">\" but found \"T\"\n")
expectParseErrorTS(t, "type Foo<in in> = T", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n<stdin>: ERROR: Expected identifier but found \">\"\n")
expectParseErrorTS(t, "type Foo<out in> = T", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n<stdin>: ERROR: Expected identifier but found \">\"\n")
expectParseErrorTS(t, "type Foo<out in T> = T", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n")
expectParseErrorTS(t, "type Foo<public T> = T", "<stdin>: ERROR: Expected \">\" but found \"T\"\n")
expectParseErrorTS(t, "type Foo<in out in T> = T", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n")
expectParseErrorTS(t, "type Foo<in out out T> = T", "<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectPrintedTS(t, "class Foo<in T> {}", "class Foo {\n}\n")
expectPrintedTS(t, "class Foo<out T> {}", "class Foo {\n}\n")
expectPrintedTS(t, "export default class Foo<in T> {}", "export default class Foo {\n}\n")
expectPrintedTS(t, "export default class Foo<out T> {}", "export default class Foo {\n}\n")
expectPrintedTS(t, "export default class <in T> {}", "export default class {\n}\n")
expectPrintedTS(t, "export default class <out T> {}", "export default class {\n}\n")
expectPrintedTS(t, "interface Foo<in T> {}", "")
expectPrintedTS(t, "interface Foo<out T> {}", "")
expectPrintedTS(t, "declare class Foo<in T> {}", "")
expectPrintedTS(t, "declare class Foo<out T> {}", "")
expectPrintedTS(t, "declare interface Foo<in T> {}", "")
expectPrintedTS(t, "declare interface Foo<out T> {}", "")
expectParseErrorTS(t, "function foo<in T>() {}", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n")
expectParseErrorTS(t, "function foo<out T>() {}", "<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "export default function foo<in T>() {}", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n")
expectParseErrorTS(t, "export default function foo<out T>() {}", "<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "export default function <in T>() {}", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n")
expectParseErrorTS(t, "export default function <out T>() {}", "<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "let foo: Foo<in T>", "<stdin>: ERROR: Unexpected \"in\"\n")
expectParseErrorTS(t, "let foo: Foo<out T>", "<stdin>: ERROR: Expected \">\" but found \"T\"\n")
expectParseErrorTS(t, "declare function foo<in T>()", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n")
expectParseErrorTS(t, "declare function foo<out T>()", "<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "declare let foo: Foo<in T>", "<stdin>: ERROR: Unexpected \"in\"\n")
expectParseErrorTS(t, "declare let foo: Foo<out T>", "<stdin>: ERROR: Expected \">\" but found \"T\"\n")
expectPrintedTS(t, "Foo = class <in T> {}", "Foo = class {\n};\n")
expectPrintedTS(t, "Foo = class <out T> {}", "Foo = class {\n};\n")
expectPrintedTS(t, "Foo = class Bar<in T> {}", "Foo = class Bar {\n};\n")
expectPrintedTS(t, "Foo = class Bar<out T> {}", "Foo = class Bar {\n};\n")
expectParseErrorTS(t, "foo = function <in T>() {}", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n")
expectParseErrorTS(t, "foo = function <out T>() {}", "<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "class Foo { foo<in T>(): T {} }", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n")
expectParseErrorTS(t, "class Foo { foo<out T>(): T {} }", "<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "foo = { foo<in T>(): T {} }", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n")
expectParseErrorTS(t, "foo = { foo<out T>(): T {} }", "<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "<in T>() => {}", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n")
expectParseErrorTS(t, "<out T>() => {}", "<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "<in T, out T>() => {}", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "let x: <in T>() => {}", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n")
expectParseErrorTS(t, "let x: <out T>() => {}", "<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "let x: <in T, out T>() => {}", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "let x: new <in T>() => {}", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n")
expectParseErrorTS(t, "let x: new <out T>() => {}", "<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "let x: new <in T, out T>() => {}", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "let x: { y<in T>(): any }", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n")
expectParseErrorTS(t, "let x: { y<out T>(): any }", "<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectParseErrorTS(t, "let x: { y<in T, out T>(): any }", "<stdin>: ERROR: The modifier \"in\" is not valid here:\n<stdin>: ERROR: The modifier \"out\" is not valid here:\n")
expectPrintedTSX(t, "<in T></in>", "/* @__PURE__ */ React.createElement(\"in\", { T: true });\n")
expectPrintedTSX(t, "<out T></out>", "/* @__PURE__ */ React.createElement(\"out\", { T: true });\n")
expectPrintedTSX(t, "<in out T></in>", "/* @__PURE__ */ React.createElement(\"in\", { out: true, T: true });\n")
expectPrintedTSX(t, "<out in T></out>", "/* @__PURE__ */ React.createElement(\"out\", { in: true, T: true });\n")
expectPrintedTSX(t, "<in T extends={true}></in>", "/* @__PURE__ */ React.createElement(\"in\", { T: true, extends: true });\n")
expectPrintedTSX(t, "<out T extends={true}></out>", "/* @__PURE__ */ React.createElement(\"out\", { T: true, extends: true });\n")
expectPrintedTSX(t, "<in out T extends={true}></in>", "/* @__PURE__ */ React.createElement(\"in\", { out: true, T: true, extends: true });\n")
expectParseErrorTSX(t, "<in T,>() => {}", "<stdin>: ERROR: Expected \">\" but found \",\"\n")
expectParseErrorTSX(t, "<out T,>() => {}", "<stdin>: ERROR: Expected \">\" but found \",\"\n")
expectParseErrorTSX(t, "<in out T,>() => {}", "<stdin>: ERROR: Expected \">\" but found \",\"\n")
expectParseErrorTSX(t, "<in T extends any>() => {}", jsxErrorArrow+"<stdin>: ERROR: Unexpected end of file before a closing \"in\" tag\n<stdin>: NOTE: The opening \"in\" tag is here:\n")
expectParseErrorTSX(t, "<out T extends any>() => {}", jsxErrorArrow+"<stdin>: ERROR: Unexpected end of file before a closing \"out\" tag\n<stdin>: NOTE: The opening \"out\" tag is here:\n")
expectParseErrorTSX(t, "<in out T extends any>() => {}", jsxErrorArrow+"<stdin>: ERROR: Unexpected end of file before a closing \"in\" tag\n<stdin>: NOTE: The opening \"in\" tag is here:\n")
expectPrintedTS(t, "class Container { get data(): typeof this.#data {} }", "class Container {\n get data() {\n }\n}\n")
expectPrintedTS(t, "const a: typeof this.#a = 1;", "const a = 1;\n")
expectParseErrorTS(t, "const a: typeof #a = 1;", "<stdin>: ERROR: Expected identifier but found \"#a\"\n")
// TypeScript 5.0
expectPrintedTS(t, "class Foo<const T> {}", "class Foo {\n}\n")
expectPrintedTS(t, "class Foo<const T extends X> {}", "class Foo {\n}\n")
expectPrintedTS(t, "Foo = class <const T> {}", "Foo = class {\n};\n")
expectPrintedTS(t, "Foo = class Bar<const T> {}", "Foo = class Bar {\n};\n")
expectPrintedTS(t, "function foo<const T>() {}", "function foo() {\n}\n")
expectPrintedTS(t, "foo = function <const T>() {}", "foo = function() {\n};\n")
expectPrintedTS(t, "foo = function bar<const T>() {}", "foo = function bar() {\n};\n")
expectPrintedTS(t, "class Foo { bar<const T>() {} }", "class Foo {\n bar() {\n }\n}\n")
expectPrintedTS(t, "interface Foo { bar<const T>(): T }", "")
expectPrintedTS(t, "interface Foo { new bar<const T>(): T }", "")
expectPrintedTS(t, "let x: { bar<const T>(): T }", "let x;\n")
expectPrintedTS(t, "let x: { new bar<const T>(): T }", "let x;\n")
expectPrintedTS(t, "foo = { bar<const T>() {} }", "foo = { bar() {\n} };\n")
expectPrintedTS(t, "x = <const>(y)", "x = y;\n")
expectPrintedTS(t, "<const T>() => {}", "() => {\n};\n")
expectPrintedTS(t, "<const const T>() => {}", "() => {\n};\n")
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_parser/js_parser.go | internal/js_parser/js_parser.go | package js_parser
import (
"fmt"
"math"
"math/big"
"regexp"
"sort"
"strings"
"unicode/utf8"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_lexer"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/renamer"
"github.com/evanw/esbuild/internal/runtime"
)
// This parser does two passes:
//
// 1. Parse the source into an AST, create the scope tree, and declare symbols.
//
// 2. Visit each node in the AST, bind identifiers to declared symbols, do
// constant folding, substitute compile-time variable definitions, and
// lower certain syntactic constructs as appropriate given the language
// target.
//
// So many things have been put in so few passes because we want to minimize
// the number of full-tree passes to improve performance. However, we need
// to have at least two separate passes to handle variable hoisting. See the
// comment about scopesInOrder below for more information.
type parser struct {
options Options
log logger.Log
source logger.Source
tracker logger.LineColumnTracker
fnOrArrowDataParse fnOrArrowDataParse
fnOnlyDataVisit fnOnlyDataVisit
allocatedNames []string
currentScope *js_ast.Scope
scopesForCurrentPart []*js_ast.Scope
symbols []ast.Symbol
astHelpers js_ast.HelperContext
tsUseCounts []uint32
injectedDefineSymbols []ast.Ref
injectedSymbolSources map[ast.Ref]injectedSymbolSource
injectedDotNames map[string][]injectedDotName
dropLabelsMap map[string]struct{}
exprComments map[logger.Loc][]string
mangledProps map[string]ast.Ref
reservedProps map[string]bool
symbolUses map[ast.Ref]js_ast.SymbolUse
importSymbolPropertyUses map[ast.Ref]map[string]js_ast.SymbolUse
symbolCallUses map[ast.Ref]js_ast.SymbolCallUse
declaredSymbols []js_ast.DeclaredSymbol
globPatternImports []globPatternImport
runtimeImports map[string]ast.LocRef
duplicateCaseChecker duplicateCaseChecker
unrepresentableIdentifiers map[string]bool
legacyOctalLiterals map[js_ast.E]logger.Range
scopesInOrderForEnum map[logger.Loc][]scopeOrder
binaryExprStack []binaryExprVisitor
// For strict mode handling
hoistedRefForSloppyModeBlockFn map[ast.Ref]ast.Ref
// For lowering private methods
privateGetters map[ast.Ref]ast.Ref
privateSetters map[ast.Ref]ast.Ref
// These are for TypeScript
//
// We build up enough information about the TypeScript namespace hierarchy to
// be able to resolve scope lookups and property accesses for TypeScript enum
// and namespace features. Each JavaScript scope object inside a namespace
// has a reference to a map of exported namespace members from sibling scopes.
//
// In addition, there is a map from each relevant symbol reference to the data
// associated with that namespace or namespace member: "refToTSNamespaceMemberData".
// This gives enough info to be able to resolve queries into the namespace.
//
// When visiting expressions, namespace metadata is associated with the most
// recently visited node. If namespace metadata is present, "tsNamespaceTarget"
// will be set to the most recently visited node (as a way to mark that this
// node has metadata) and "tsNamespaceMemberData" will be set to the metadata.
refToTSNamespaceMemberData map[ast.Ref]js_ast.TSNamespaceMemberData
tsNamespaceTarget js_ast.E
tsNamespaceMemberData js_ast.TSNamespaceMemberData
emittedNamespaceVars map[ast.Ref]bool
isExportedInsideNamespace map[ast.Ref]ast.Ref
localTypeNames map[string]bool
tsEnums map[ast.Ref]map[string]js_ast.TSEnumValue
constValues map[ast.Ref]js_ast.ConstValue
propDerivedCtorValue js_ast.E
propMethodDecoratorScope *js_ast.Scope
// This is the reference to the generated function argument for the namespace,
// which is different than the reference to the namespace itself:
//
// namespace ns {
// }
//
// The code above is transformed into something like this:
//
// var ns1;
// (function(ns2) {
// })(ns1 || (ns1 = {}));
//
// This variable is "ns2" not "ns1". It is only used during the second
// "visit" pass.
enclosingNamespaceArgRef *ast.Ref
// Imports (both ES6 and CommonJS) are tracked at the top level
importRecords []ast.ImportRecord
importRecordsForCurrentPart []uint32
exportStarImportRecords []uint32
// These are for handling ES6 imports and exports
importItemsForNamespace map[ast.Ref]namespaceImportItems
isImportItem map[ast.Ref]bool
namedImports map[ast.Ref]js_ast.NamedImport
namedExports map[string]js_ast.NamedExport
topLevelSymbolToParts map[ast.Ref][]uint32
importNamespaceCCMap map[importNamespaceCall]bool
// The parser does two passes and we need to pass the scope tree information
// from the first pass to the second pass. That's done by tracking the calls
// to pushScopeForParsePass() and popScope() during the first pass in
// scopesInOrder.
//
// Then, when the second pass calls pushScopeForVisitPass() and popScope(),
// we consume entries from scopesInOrder and make sure they are in the same
// order. This way the second pass can efficiently use the same scope tree
// as the first pass without having to attach the scope tree to the AST.
//
// We need to split this into two passes because the pass that declares the
// symbols must be separate from the pass that binds identifiers to declared
// symbols to handle declaring a hoisted "var" symbol in a nested scope and
// binding a name to it in a parent or sibling scope.
scopesInOrder []scopeOrder
// These propagate the name from the parent context into an anonymous child
// expression. For example:
//
// let foo = function() {}
// assert.strictEqual(foo.name, 'foo')
//
nameToKeep string
nameToKeepIsFor js_ast.E
// These properties are for the visit pass, which runs after the parse pass.
// The visit pass binds identifiers to declared symbols, does constant
// folding, substitutes compile-time variable definitions, and lowers certain
// syntactic constructs as appropriate.
stmtExprValue js_ast.E
callTarget js_ast.E
dotOrIndexTarget js_ast.E
templateTag js_ast.E
deleteTarget js_ast.E
loopBody js_ast.S
suspiciousLogicalOperatorInsideArrow js_ast.E
moduleScope *js_ast.Scope
// This is internal-only data used for the implementation of Yarn PnP
manifestForYarnPnP js_ast.Expr
stringLocalsForYarnPnP map[ast.Ref]stringLocalForYarnPnP
// This helps recognize the "await import()" pattern. When this is present,
// warnings about non-string import paths will be omitted inside try blocks.
awaitTarget js_ast.E
// This helps recognize the "import().catch()" pattern. We also try to avoid
// warning about this just like the "try { await import() }" pattern.
thenCatchChain thenCatchChain
// When bundling, hoisted top-level local variables declared with "var" in
// nested scopes are moved up to be declared in the top-level scope instead.
// The old "var" statements are turned into regular assignments instead. This
// makes it easier to quickly scan the top-level statements for "var" locals
// with the guarantee that all will be found.
relocatedTopLevelVars []ast.LocRef
// We need to lower private names such as "#foo" if they are used in a brand
// check such as "#foo in x" even if the private name syntax would otherwise
// be supported. This is because private names are a newly-added feature.
//
// However, this parser operates in only two passes for speed. The first pass
// parses things and declares variables, and the second pass lowers things and
// resolves references to declared variables. So the existence of a "#foo in x"
// expression for a specific "#foo" cannot be used to decide to lower "#foo"
// because it's too late by that point. There may be another expression such
// as "x.#foo" before that point and that must be lowered as well even though
// it has already been visited.
//
// Instead what we do is track just the names of fields used in private brand
// checks during the first pass. This tracks the names themselves, not symbol
// references. Then, during the second pass when we are about to enter into
// a class, we conservatively decide to lower all private names in that class
// which are used in a brand check anywhere in the file.
lowerAllOfThesePrivateNames map[string]bool
// Temporary variables used for lowering
tempLetsToDeclare []ast.Ref
tempRefsToDeclare []tempRef
topLevelTempRefsToDeclare []tempRef
lexer js_lexer.Lexer
// Private field access in a decorator lowers all private fields in that class
parseExperimentalDecoratorNesting int
// Temporary variables used for lowering
tempRefCount int
topLevelTempRefCount int
// We need to scan over the source contents to recover the line and column offsets
jsxSourceLoc int
jsxSourceLine int
jsxSourceColumn int
exportsRef ast.Ref
requireRef ast.Ref
moduleRef ast.Ref
importMetaRef ast.Ref
promiseRef ast.Ref
regExpRef ast.Ref
bigIntRef ast.Ref
superCtorRef ast.Ref
// Imports from "react/jsx-runtime" and "react", respectively.
// (Or whatever was specified in the "importSource" option)
jsxRuntimeImports map[string]ast.LocRef
jsxLegacyImports map[string]ast.LocRef
// For lowering private methods
weakMapRef ast.Ref
weakSetRef ast.Ref
esmImportStatementKeyword logger.Range
esmImportMeta logger.Range
esmExportKeyword logger.Range
enclosingClassKeyword logger.Range
topLevelAwaitKeyword logger.Range
liveTopLevelAwaitKeyword logger.Range
latestArrowArgLoc logger.Loc
forbidSuffixAfterAsLoc logger.Loc
firstJSXElementLoc logger.Loc
fnOrArrowDataVisit fnOrArrowDataVisit
singleStmtDepth int
// ArrowFunction is a special case in the grammar. Although it appears to be
// a PrimaryExpression, it's actually an AssignmentExpression. This means if
// a AssignmentExpression ends up producing an ArrowFunction then nothing can
// come after it other than the comma operator, since the comma operator is
// the only thing above AssignmentExpression under the Expression rule:
//
// AssignmentExpression:
// ArrowFunction
// ConditionalExpression
// LeftHandSideExpression = AssignmentExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
//
// Expression:
// AssignmentExpression
// Expression , AssignmentExpression
//
afterArrowBodyLoc logger.Loc
// Setting this to true disables warnings about code that is very likely to
// be a bug. This is used to ignore issues inside "node_modules" directories.
// This has caught real issues in the past. However, it's not esbuild's job
// to find bugs in other libraries, and these warnings are problematic for
// people using these libraries with esbuild. The only fix is to either
// disable all esbuild warnings and not get warnings about your own code, or
// to try to get the warning fixed in the affected library. This is
// especially annoying if the warning is a false positive as was the case in
// https://github.com/firebase/firebase-js-sdk/issues/3814. So these warnings
// are now disabled for code inside "node_modules" directories.
suppressWarningsAboutWeirdCode bool
// A file is considered to be an ECMAScript module if it has any of the
// features of one (e.g. the "export" keyword), otherwise it's considered
// a CommonJS module.
//
// However, we have a single exception: a file where the only ESM feature
// is the "import" keyword is allowed to have CommonJS exports. This feature
// is necessary to be able to synchronously import ESM code into CommonJS,
// which we need to enable in a few important cases. Some examples are:
// our runtime code, injected files (the "inject" feature is ESM-only),
// and certain automatically-generated virtual modules from plugins.
isFileConsideredToHaveESMExports bool // Use only for export-related stuff
isFileConsideredESM bool // Use for all other stuff
// Inside a TypeScript namespace, an "export declare" statement can be used
// to cause a namespace to be emitted even though it has no other observable
// effect. This flag is used to implement this feature.
//
// Specifically, namespaces should be generated for all of the following
// namespaces below except for "f", which should not be generated:
//
// namespace a { export declare const a }
// namespace b { export declare let [[b]] }
// namespace c { export declare function c() }
// namespace d { export declare class d {} }
// namespace e { export declare enum e {} }
// namespace f { export declare namespace f {} }
//
// The TypeScript compiler compiles this into the following code (notice "f"
// is missing):
//
// var a; (function (a_1) {})(a || (a = {}));
// var b; (function (b_1) {})(b || (b = {}));
// var c; (function (c_1) {})(c || (c = {}));
// var d; (function (d_1) {})(d || (d = {}));
// var e; (function (e_1) {})(e || (e = {}));
//
// Note that this should not be implemented by declaring symbols for "export
// declare" statements because the TypeScript compiler doesn't generate any
// code for these statements, so these statements are actually references to
// global variables. There is one exception, which is that local variables
// *should* be declared as symbols because they are replaced with. This seems
// like very arbitrary behavior but it's what the TypeScript compiler does,
// so we try to match it.
//
// Specifically, in the following code below "a" and "b" should be declared
// and should be substituted with "ns.a" and "ns.b" but the other symbols
// shouldn't. References to the other symbols actually refer to global
// variables instead of to symbols that are exported from the namespace.
// This is the case as of TypeScript 4.3. I assume this is a TypeScript bug:
//
// namespace ns {
// export declare const a
// export declare let [[b]]
// export declare function c()
// export declare class d { }
// export declare enum e { }
// console.log(a, b, c, d, e)
// }
//
// The TypeScript compiler compiles this into the following code:
//
// var ns;
// (function (ns) {
// console.log(ns.a, ns.b, c, d, e);
// })(ns || (ns = {}));
//
// Relevant issue: https://github.com/evanw/esbuild/issues/1158
hasNonLocalExportDeclareInsideNamespace bool
// When this flag is enabled, we attempt to fold all expressions that
// TypeScript would consider to be "constant expressions". This flag is
// enabled inside each enum body block since TypeScript requires numeric
// constant folding in enum definitions.
//
// We also enable this flag in certain cases in JavaScript files such as when
// parsing "const" declarations at the top of a non-ESM file, but we still
// reuse TypeScript's notion of "constant expressions" for our own convenience.
//
// As of TypeScript 5.0, a "constant expression" is defined as follows:
//
// An expression is considered a constant expression if it is
//
// * a number or string literal,
// * a unary +, -, or ~ applied to a numeric constant expression,
// * a binary +, -, *, /, %, **, <<, >>, >>>, |, &, ^ applied to two numeric constant expressions,
// * a binary + applied to two constant expressions whereof at least one is a string,
// * a template expression where each substitution expression is a constant expression,
// * a parenthesized constant expression,
// * a dotted name (e.g. x.y.z) that references a const variable with a constant expression initializer and no type annotation,
// * a dotted name that references an enum member with an enum literal type, or
// * a dotted name indexed by a string literal (e.g. x.y["z"]) that references an enum member with an enum literal type.
//
// More detail: https://github.com/microsoft/TypeScript/pull/50528. Note that
// we don't implement certain items in this list. For example, we don't do all
// number-to-string conversions since ours might differ from how JavaScript
// would do it, which would be a correctness issue.
shouldFoldTypeScriptConstantExpressions bool
allowIn bool
hasTopLevelReturn bool
latestReturnHadSemicolon bool
messageAboutThisIsUndefined bool
isControlFlowDead bool
shouldAddKeyComment bool
// If this is true, then all top-level statements are wrapped in a try/catch
willWrapModuleInTryCatchForUsing bool
}
type globPatternImport struct {
assertOrWith *ast.ImportAssertOrWith
parts []helpers.GlobPart
name string
approximateRange logger.Range
ref ast.Ref
kind ast.ImportKind
phase ast.ImportPhase
}
type namespaceImportItems struct {
entries map[string]ast.LocRef
importRecordIndex uint32
}
type stringLocalForYarnPnP struct {
value []uint16
loc logger.Loc
}
type injectedSymbolSource struct {
source logger.Source
loc logger.Loc
}
type injectedDotName struct {
parts []string
injectedDefineIndex uint32
}
type importNamespaceCallKind uint8
const (
exprKindCall importNamespaceCallKind = iota
exprKindNew
exprKindJSXTag
)
type importNamespaceCall struct {
ref ast.Ref
kind importNamespaceCallKind
}
type thenCatchChain struct {
nextTarget js_ast.E
catchLoc logger.Loc
hasMultipleArgs bool
hasCatch bool
}
// This is used as part of an incremental build cache key. Some of these values
// can potentially change between builds if they are derived from nearby
// "package.json" or "tsconfig.json" files that were changed since the last
// build.
type Options struct {
injectedFiles []config.InjectedFile
jsx config.JSXOptions
tsAlwaysStrict *config.TSAlwaysStrict
mangleProps *regexp.Regexp
reserveProps *regexp.Regexp
dropLabels []string
// This pointer will always be different for each build but the contents
// shouldn't ever behave different semantically. We ignore this field for the
// equality comparison.
defines *config.ProcessedDefines
// This is an embedded struct. Always access these directly instead of off
// the name "optionsThatSupportStructuralEquality". This is only grouped like
// this to make the equality comparison easier and safer (and hopefully faster).
optionsThatSupportStructuralEquality
}
type optionsThatSupportStructuralEquality struct {
originalTargetEnv string
moduleTypeData js_ast.ModuleTypeData
unsupportedJSFeatures compat.JSFeature
unsupportedJSFeatureOverrides compat.JSFeature
unsupportedJSFeatureOverridesMask compat.JSFeature
// Byte-sized values go here (gathered together here to keep this object compact)
ts config.TSOptions
mode config.Mode
platform config.Platform
outputFormat config.Format
logPathStyle logger.PathStyle
codePathStyle logger.PathStyle
asciiOnly bool
keepNames bool
minifySyntax bool
minifyIdentifiers bool
minifyWhitespace bool
omitRuntimeForTests bool
omitJSXRuntimeForTests bool
ignoreDCEAnnotations bool
treeShaking bool
dropDebugger bool
mangleQuoted bool
// This is an internal-only option used for the implementation of Yarn PnP
decodeHydrateRuntimeStateYarnPnP bool
}
func OptionsForYarnPnP() Options {
return Options{
optionsThatSupportStructuralEquality: optionsThatSupportStructuralEquality{
decodeHydrateRuntimeStateYarnPnP: true,
},
}
}
func OptionsFromConfig(options *config.Options) Options {
return Options{
injectedFiles: options.InjectedFiles,
jsx: options.JSX,
defines: options.Defines,
tsAlwaysStrict: options.TSAlwaysStrict,
mangleProps: options.MangleProps,
reserveProps: options.ReserveProps,
dropLabels: options.DropLabels,
optionsThatSupportStructuralEquality: optionsThatSupportStructuralEquality{
unsupportedJSFeatures: options.UnsupportedJSFeatures,
unsupportedJSFeatureOverrides: options.UnsupportedJSFeatureOverrides,
unsupportedJSFeatureOverridesMask: options.UnsupportedJSFeatureOverridesMask,
originalTargetEnv: options.OriginalTargetEnv,
ts: options.TS,
mode: options.Mode,
platform: options.Platform,
outputFormat: options.OutputFormat,
moduleTypeData: options.ModuleTypeData,
asciiOnly: options.ASCIIOnly,
keepNames: options.KeepNames,
minifySyntax: options.MinifySyntax,
minifyIdentifiers: options.MinifyIdentifiers,
minifyWhitespace: options.MinifyWhitespace,
omitRuntimeForTests: options.OmitRuntimeForTests,
omitJSXRuntimeForTests: options.OmitJSXRuntimeForTests,
ignoreDCEAnnotations: options.IgnoreDCEAnnotations,
treeShaking: options.TreeShaking,
dropDebugger: options.DropDebugger,
mangleQuoted: options.MangleQuoted,
logPathStyle: options.LogPathStyle,
codePathStyle: options.CodePathStyle,
},
}
}
func (a *Options) Equal(b *Options) bool {
// Compare "optionsThatSupportStructuralEquality"
if a.optionsThatSupportStructuralEquality != b.optionsThatSupportStructuralEquality {
return false
}
// Compare "tsAlwaysStrict"
if (a.tsAlwaysStrict == nil && b.tsAlwaysStrict != nil) || (a.tsAlwaysStrict != nil && b.tsAlwaysStrict == nil) ||
(a.tsAlwaysStrict != nil && b.tsAlwaysStrict != nil && *a.tsAlwaysStrict != *b.tsAlwaysStrict) {
return false
}
// Compare "mangleProps" and "reserveProps"
if !isSameRegexp(a.mangleProps, b.mangleProps) || !isSameRegexp(a.reserveProps, b.reserveProps) {
return false
}
// Compare "dropLabels"
if !helpers.StringArraysEqual(a.dropLabels, b.dropLabels) {
return false
}
// Compare "injectedFiles"
if len(a.injectedFiles) != len(b.injectedFiles) {
return false
}
for i, x := range a.injectedFiles {
y := b.injectedFiles[i]
if x.Source != y.Source || x.DefineName != y.DefineName || len(x.Exports) != len(y.Exports) {
return false
}
for j := range x.Exports {
if x.Exports[j] != y.Exports[j] {
return false
}
}
}
// Compare "jsx"
if a.jsx.Parse != b.jsx.Parse || !jsxExprsEqual(a.jsx.Factory, b.jsx.Factory) || !jsxExprsEqual(a.jsx.Fragment, b.jsx.Fragment) {
return false
}
// Do a cheap assert that the defines object hasn't changed
if (a.defines != nil || b.defines != nil) && (a.defines == nil || b.defines == nil ||
len(a.defines.IdentifierDefines) != len(b.defines.IdentifierDefines) ||
len(a.defines.DotDefines) != len(b.defines.DotDefines)) {
panic("Internal error")
}
return true
}
func isSameRegexp(a *regexp.Regexp, b *regexp.Regexp) bool {
if a == nil {
return b == nil
} else {
return b != nil && a.String() == b.String()
}
}
func jsxExprsEqual(a config.DefineExpr, b config.DefineExpr) bool {
if !helpers.StringArraysEqual(a.Parts, b.Parts) {
return false
}
if a.Constant != nil {
if b.Constant == nil || !js_ast.ValuesLookTheSame(a.Constant, b.Constant) {
return false
}
} else if b.Constant != nil {
return false
}
return true
}
type tempRef struct {
valueOrNil js_ast.Expr
ref ast.Ref
}
const (
locModuleScope = -1
)
type scopeOrder struct {
scope *js_ast.Scope
loc logger.Loc
}
type awaitOrYield uint8
const (
// The keyword is used as an identifier, not a special expression
allowIdent awaitOrYield = iota
// Declaring the identifier is forbidden, and the keyword is used as a special expression
allowExpr
// Declaring the identifier is forbidden, and using the identifier is also forbidden
forbidAll
)
// This is function-specific information used during parsing. It is saved and
// restored on the call stack around code that parses nested functions and
// arrow expressions.
type fnOrArrowDataParse struct {
arrowArgErrors *deferredArrowArgErrors
decoratorScope *js_ast.Scope
asyncRange logger.Range
needsAsyncLoc logger.Loc
await awaitOrYield
yield awaitOrYield
allowSuperCall bool
allowSuperProperty bool
isTopLevel bool
isConstructor bool
isTypeScriptDeclare bool
isThisDisallowed bool
isReturnDisallowed bool
// In TypeScript, forward declarations of functions have no bodies
allowMissingBodyForTypeScript bool
}
// This is function-specific information used during visiting. It is saved and
// restored on the call stack around code that parses nested functions and
// arrow expressions.
type fnOrArrowDataVisit struct {
// This is used to silence unresolvable imports due to "require" calls inside
// a try/catch statement. The assumption is that the try/catch statement is
// there to handle the case where the reference to "require" crashes.
tryBodyCount int32
tryCatchLoc logger.Loc
isArrow bool
isAsync bool
isGenerator bool
isInsideLoop bool
isInsideSwitch bool
isDerivedClassCtor bool
isOutsideFnOrArrow bool
shouldLowerSuperPropertyAccess bool
}
// This is function-specific information used during visiting. It is saved and
// restored on the call stack around code that parses nested functions (but not
// nested arrow functions).
type fnOnlyDataVisit struct {
// This is a reference to the magic "arguments" variable that exists inside
// functions in JavaScript. It will be non-nil inside functions and nil
// otherwise.
argumentsRef *ast.Ref
// Arrow functions don't capture the value of "this" and "arguments". Instead,
// the values are inherited from the surrounding context. If arrow functions
// are turned into regular functions due to lowering, we will need to generate
// local variables to capture these values so they are preserved correctly.
thisCaptureRef *ast.Ref
argumentsCaptureRef *ast.Ref
// If true, we're inside a static class context where "this" expressions
// should be replaced with the class name.
shouldReplaceThisWithInnerClassNameRef bool
// This is true if "this" is equal to the class name. It's true if we're in a
// static class field initializer, a static class method, or a static class
// block.
isInStaticClassContext bool
// This is a reference to the enclosing class name if there is one. It's used
// to implement "this" and "super" references. A name is automatically generated
// if one is missing so this will always be present inside a class body.
innerClassNameRef *ast.Ref
// If we're inside an async arrow function and async functions are not
// supported, then we will have to convert that arrow function to a generator
// function. That means references to "arguments" inside the arrow function
// will have to reference a captured variable instead of the real variable.
isInsideAsyncArrowFn bool
// If false, disallow "new.target" expressions. We disallow all "new.target"
// expressions at the top-level of the file (i.e. not inside a function or
// a class field). Technically since CommonJS files are wrapped in a function
// you can use "new.target" in node as an alias for "undefined" but we don't
// support that.
isNewTargetAllowed bool
// If false, the value for "this" is the top-level module scope "this" value.
// That means it's "undefined" for ECMAScript modules and "exports" for
// CommonJS modules. We track this information so that we can substitute the
// correct value for these top-level "this" references at compile time instead
// of passing the "this" expression through to the output and leaving the
// interpretation up to the run-time behavior of the generated code.
//
// If true, the value for "this" is nested inside something (either a function
// or a class declaration). That means the top-level module scope "this" value
// has been shadowed and is now inaccessible.
isThisNested bool
// If true, "this" is used in current function scope.
hasThisUsage bool
// Do not warn about "this" being undefined for code that the TypeScript
// compiler generates that looks like this:
//
// var __rest = (this && this.__rest) || function (s, e) {
// ...
// };
//
silenceMessageAboutThisBeingUndefined bool
}
type livenessStatus int8
const (
alwaysDead livenessStatus = -1
livenessUnknown livenessStatus = 0
alwaysLive livenessStatus = 1
)
type switchCaseLiveness struct {
status livenessStatus
canFallThrough bool
}
func analyzeSwitchCasesForLiveness(s *js_ast.SSwitch) []switchCaseLiveness {
cases := make([]switchCaseLiveness, 0, len(s.Cases))
defaultIndex := -1
// Determine the status of the individual cases independently
maxStatus := alwaysDead
for i, c := range s.Cases {
if c.ValueOrNil.Data == nil {
defaultIndex = i
}
// Check the value for strict equality
var status livenessStatus
if maxStatus == alwaysLive {
status = alwaysDead // Everything after an always-live case is always dead
} else if c.ValueOrNil.Data == nil {
status = alwaysDead // This is the default case, and will be filled in later
} else if isEqualToTest, ok := js_ast.CheckEqualityIfNoSideEffects(s.Test.Data, c.ValueOrNil.Data, js_ast.StrictEquality); ok {
if isEqualToTest {
status = alwaysLive // This branch will always be matched, and will be taken unless an earlier branch was taken
} else {
status = alwaysDead // This branch will never be matched, and will not be taken unless there was fall-through
}
} else {
status = livenessUnknown // This branch depends on run-time values and may or may not be matched
}
if maxStatus < status {
maxStatus = status
}
cases = append(cases, switchCaseLiveness{
status: status,
canFallThrough: caseBodyCouldHaveFallThrough(c.Body),
})
}
// Set the liveness for the default case last based on the other cases
if defaultIndex != -1 {
// The negation here transposes "always live" with "always dead"
status := -maxStatus
if maxStatus < status {
maxStatus = status
}
cases[defaultIndex].status = status
}
// Then propagate fall-through information in linear fall-through order
for i, c := range cases {
// Propagate state forward if this isn't dead. Note that the "can fall
// through" flag does not imply "must fall through". The body may have
// an embedded "break" inside an if statement, for example.
if c.status != alwaysDead {
for j := i + 1; j < len(cases) && cases[j-1].canFallThrough; j++ {
cases[j].status = livenessUnknown
}
} else if maxStatus > alwaysDead && stmtsCareAboutScope(s.Cases[i].Body) {
// Since adjacent cases share a scope, dead cases can potentially still
// affect other cases that are live. Consider the following:
//
// globalThis.foo = true
// switch (1) {
// case 0:
// let foo
// case 1:
// return foo
// }
//
// This code is supposed to throw a ReferenceError. But if we treat the
// first case as dead code, then "let foo" will end up being removed and
// the code will incorrectly return true instead.
cases[i].status = livenessUnknown
}
}
return cases
}
// Check for potential fall-through by checking for a jump at the end of the body
func caseBodyCouldHaveFallThrough(stmts []js_ast.Stmt) bool {
for len(stmts) > 0 {
switch s := stmts[len(stmts)-1].Data.(type) {
case *js_ast.SBlock:
stmts = s.Stmts // If this ends with a block, check the block's body next
continue
case *js_ast.SBreak, *js_ast.SContinue, *js_ast.SReturn, *js_ast.SThrow:
return false
}
break
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/cli_helpers/cli_helpers.go | internal/cli_helpers/cli_helpers.go | // This package contains internal CLI-related code that must be shared with
// other internal code outside of the CLI package.
package cli_helpers
import (
"fmt"
"github.com/evanw/esbuild/pkg/api"
)
type ErrorWithNote struct {
Text string
Note string
}
func MakeErrorWithNote(text string, note string) *ErrorWithNote {
return &ErrorWithNote{
Text: text,
Note: note,
}
}
func ParseLoader(text string) (api.Loader, *ErrorWithNote) {
switch text {
case "base64":
return api.LoaderBase64, nil
case "binary":
return api.LoaderBinary, nil
case "copy":
return api.LoaderCopy, nil
case "css":
return api.LoaderCSS, nil
case "dataurl":
return api.LoaderDataURL, nil
case "default":
return api.LoaderDefault, nil
case "empty":
return api.LoaderEmpty, nil
case "file":
return api.LoaderFile, nil
case "global-css":
return api.LoaderGlobalCSS, nil
case "js":
return api.LoaderJS, nil
case "json":
return api.LoaderJSON, nil
case "jsx":
return api.LoaderJSX, nil
case "local-css":
return api.LoaderLocalCSS, nil
case "text":
return api.LoaderText, nil
case "ts":
return api.LoaderTS, nil
case "tsx":
return api.LoaderTSX, nil
default:
return api.LoaderNone, MakeErrorWithNote(
fmt.Sprintf("Invalid loader value: %q", text),
"Valid values are \"base64\", \"binary\", \"copy\", \"css\", \"dataurl\", \"empty\", \"file\", \"global-css\", \"js\", \"json\", \"jsx\", \"local-css\", \"text\", \"ts\", or \"tsx\".",
)
}
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/test/util.go | internal/test/util.go | package test
import (
"fmt"
"testing"
"github.com/evanw/esbuild/internal/fs"
"github.com/evanw/esbuild/internal/logger"
)
func AssertEqual(t *testing.T, observed interface{}, expected interface{}) {
t.Helper()
if observed != expected {
t.Fatalf("%s != %s", observed, expected)
}
}
func AssertEqualWithDiff(t *testing.T, observed interface{}, expected interface{}) {
t.Helper()
if observed != expected {
stringA := fmt.Sprintf("%v", observed)
stringB := fmt.Sprintf("%v", expected)
color := !fs.CheckIfWindows()
t.Fatal("\n" + diff(stringB, stringA, color))
}
}
func SourceForTest(contents string) logger.Source {
return logger.Source{
Index: 0,
KeyPath: logger.Path{Text: "<stdin>"},
PrettyPaths: logger.PrettyPaths{Abs: "<stdin>", Rel: "<stdin>"},
Contents: contents,
IdentifierName: "stdin",
}
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/test/diff.go | internal/test/diff.go | package test
import (
"fmt"
"strings"
"github.com/evanw/esbuild/internal/logger"
)
func diff(old string, new string, color bool) string {
return strings.Join(diffRec(nil, strings.Split(old, "\n"), strings.Split(new, "\n"), color), "\n")
}
// This is a simple recursive line-by-line diff implementation
func diffRec(result []string, old []string, new []string, color bool) []string {
o, n, common := lcSubstr(old, new)
if common == 0 {
// Everything changed
for _, line := range old {
if color {
result = append(result, fmt.Sprintf("%s-%s%s", logger.TerminalColors.Red, line, logger.TerminalColors.Reset))
} else {
result = append(result, "-"+line)
}
}
for _, line := range new {
if color {
result = append(result, fmt.Sprintf("%s+%s%s", logger.TerminalColors.Green, line, logger.TerminalColors.Reset))
} else {
result = append(result, "+"+line)
}
}
} else {
// Something in the middle stayed the same
result = diffRec(result, old[:o], new[:n], color)
for _, line := range old[o : o+common] {
if color {
result = append(result, fmt.Sprintf("%s %s%s", logger.TerminalColors.Dim, line, logger.TerminalColors.Reset))
} else {
result = append(result, " "+line)
}
}
result = diffRec(result, old[o+common:], new[n+common:], color)
}
return result
}
// From: https://en.wikipedia.org/wiki/Longest_common_substring_problem
func lcSubstr(S []string, T []string) (int, int, int) {
r := len(S)
n := len(T)
Lprev := make([]int, n)
Lnext := make([]int, n)
z := 0
retI := 0
retJ := 0
for i := 0; i < r; i++ {
for j := 0; j < n; j++ {
if S[i] == T[j] {
if j == 0 {
Lnext[j] = 1
} else {
Lnext[j] = Lprev[j-1] + 1
}
if Lnext[j] > z {
z = Lnext[j]
retI = i + 1
retJ = j + 1
}
} else {
Lnext[j] = 0
}
}
Lprev, Lnext = Lnext, Lprev
}
return retI - z, retJ - z, z
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/runtime/runtime_test.go | internal/runtime/runtime_test.go | package runtime_test
import (
"testing"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/js_parser"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/runtime"
)
func TestUnsupportedFeatures(t *testing.T) {
for key, feature := range compat.StringToJSFeature {
t.Run(key, func(t *testing.T) {
source := runtime.Source(feature)
log := logger.NewDeferLog(logger.DeferLogAll, nil)
js_parser.Parse(log, source, js_parser.OptionsFromConfig(&config.Options{
UnsupportedJSFeatures: feature,
TreeShaking: true,
}))
if log.HasErrors() {
msgs := "Internal error: failed to parse runtime:\n"
for _, msg := range log.Done() {
msgs += msg.String(logger.OutputOptions{IncludeSource: true}, logger.TerminalInfo{})
}
t.Fatal(msgs[:len(msgs)-1])
}
})
}
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/runtime/runtime.go | internal/runtime/runtime.go | package runtime
// This is esbuild's runtime code. It contains helper functions that are
// automatically injected into output files to implement certain features. For
// example, the "**" operator is replaced with a call to "__pow" when targeting
// ES2015. Tree shaking automatically removes unused code from the runtime.
import (
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/logger"
)
// The runtime source is always at a special index. The index is always zero
// but this constant is always used instead to improve readability and ensure
// all code that references this index can be discovered easily.
const SourceIndex = uint32(0)
func Source(unsupportedJSFeatures compat.JSFeature) logger.Source {
// Note: These helper functions used to be named similar things to the helper
// functions from the TypeScript compiler. However, people sometimes use these
// two projects in combination and TypeScript's implementation of these helpers
// causes name collisions. Some examples:
//
// * The "tslib" library will overwrite esbuild's helper functions if the bundled
// code is run in the global scope: https://github.com/evanw/esbuild/issues/1102
//
// * Running the TypeScript compiler on esbuild's output to convert ES6 to ES5
// will also overwrite esbuild's helper functions because TypeScript doesn't
// change the names of its helper functions to avoid name collisions:
// https://github.com/microsoft/TypeScript/issues/43296
//
// These can both be considered bugs in TypeScript. However, they are unlikely
// to be fixed and it's simplest to just avoid using the same names to avoid
// these bugs. Forbidden names (from "tslib"):
//
// __assign
// __asyncDelegator
// __asyncGenerator
// __asyncValues
// __await
// __awaiter
// __classPrivateFieldGet
// __classPrivateFieldSet
// __createBinding
// __decorate
// __exportStar
// __extends
// __generator
// __importDefault
// __importStar
// __makeTemplateObject
// __metadata
// __param
// __read
// __rest
// __spread
// __spreadArray
// __spreadArrays
// __values
//
// Note: The "__objRest" function has a for-of loop which requires ES6, but
// transforming destructuring to ES5 isn't even supported so it's ok.
text := `
var __create = Object.create
var __freeze = Object.freeze
var __defProp = Object.defineProperty
var __defProps = Object.defineProperties
var __getOwnPropDesc = Object.getOwnPropertyDescriptor // Note: can return "undefined" due to a Safari bug
var __getOwnPropDescs = Object.getOwnPropertyDescriptors
var __getOwnPropNames = Object.getOwnPropertyNames
var __getOwnPropSymbols = Object.getOwnPropertySymbols
var __getProtoOf = Object.getPrototypeOf
var __hasOwnProp = Object.prototype.hasOwnProperty
var __propIsEnum = Object.prototype.propertyIsEnumerable
var __reflectGet = Reflect.get
var __reflectSet = Reflect.set
var __knownSymbol = (name, symbol) => (symbol = Symbol[name]) ? symbol : Symbol.for('Symbol.' + name)
var __typeError = msg => { throw TypeError(msg) }
export var __pow = Math.pow
var __defNormalProp = (obj, key, value) => key in obj
? __defProp(obj, key, {enumerable: true, configurable: true, writable: true, value})
: obj[key] = value
export var __spreadValues = (a, b) => {
for (var prop in b ||= {})
if (__hasOwnProp.call(b, prop))
__defNormalProp(a, prop, b[prop])
if (__getOwnPropSymbols)
`
// Avoid "of" when not using ES6
if !unsupportedJSFeatures.Has(compat.ForOf) {
text += `
for (var prop of __getOwnPropSymbols(b)) {
`
} else {
text += `
for (var props = __getOwnPropSymbols(b), i = 0, n = props.length, prop; i < n; i++) {
prop = props[i]
`
}
text += `
if (__propIsEnum.call(b, prop))
__defNormalProp(a, prop, b[prop])
}
return a
}
export var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b))
// Update the "name" property on the function or class for "--keep-names"
export var __name = (target, value) => __defProp(target, 'name', { value, configurable: true })
// This fallback "require" function exists so that "typeof require" can
// naturally be "function" even in non-CommonJS environments since esbuild
// emulates a CommonJS environment (issue #1202). However, people want this
// shim to fall back to "globalThis.require" even if it's defined later
// (including property accesses such as "require.resolve") so we need to
// use a proxy (issue #1614).
export var __require =
/* @__PURE__ */ (x =>
typeof require !== 'undefined' ? require :
typeof Proxy !== 'undefined' ? new Proxy(x, {
get: (a, b) => (typeof require !== 'undefined' ? require : a)[b]
}) : x
)(function(x) {
if (typeof require !== 'undefined') return require.apply(this, arguments)
throw Error('Dynamic require of "' + x + '" is not supported')
})
// This is used for glob imports
export var __glob = map => path => {
var fn = map[path]
if (fn) return fn()
throw new Error('Module not found in bundle: ' + path)
}
// For object rest patterns
export var __restKey = key => typeof key === 'symbol' ? key : key + ''
export var __objRest = (source, exclude) => {
var target = {}
for (var prop in source)
if (__hasOwnProp.call(source, prop) && exclude.indexOf(prop) < 0)
target[prop] = source[prop]
if (source != null && __getOwnPropSymbols)
`
// Avoid "of" when not using ES6
if !unsupportedJSFeatures.Has(compat.ForOf) {
text += `
for (var prop of __getOwnPropSymbols(source)) {
`
} else {
text += `
for (var props = __getOwnPropSymbols(source), i = 0, n = props.length, prop; i < n; i++) {
prop = props[i]
`
}
text += `
if (exclude.indexOf(prop) < 0 && __propIsEnum.call(source, prop))
target[prop] = source[prop]
}
return target
}
// This is for lazily-initialized ESM code. This has two implementations, a
// compact one for minified code and a verbose one that generates friendly
// names in V8's profiler and in stack traces.
export var __esm = (fn, res) => function __init() {
return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res
}
export var __esmMin = (fn, res) => () => (fn && (res = fn(fn = 0)), res)
// Wraps a CommonJS closure and returns a require() function. This has two
// implementations, a compact one for minified code and a verbose one that
// generates friendly names in V8's profiler and in stack traces.
export var __commonJS = (cb, mod) => function __require() {
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = {exports: {}}).exports, mod), mod.exports
}
export var __commonJSMin = (cb, mod) => () => (mod || cb((mod = {exports: {}}).exports, mod), mod.exports)
// Used to implement ESM exports both for "require()" and "import * as"
export var __export = (target, all) => {
for (var name in all)
__defProp(target, name, { get: all[name], enumerable: true })
}
var __copyProps = (to, from, except, desc) => {
if (from && typeof from === 'object' || typeof from === 'function')
`
// Avoid "let" when not using ES6
if !unsupportedJSFeatures.Has(compat.ForOf) && !unsupportedJSFeatures.Has(compat.ConstAndLet) {
text += `
for (let key of __getOwnPropNames(from))
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable })
`
} else {
text += `
for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
key = keys[i]
if (!__hasOwnProp.call(to, key) && key !== except)
__defProp(to, key, { get: (k => from[k]).bind(null, key), enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable })
}
`
}
text += `
return to
}
// This is used to implement "export * from" statements. It copies properties
// from the imported module to the current module's ESM export object. If the
// current module is an entry point and the target format is CommonJS, we
// also copy the properties to "module.exports" in addition to our module's
// internal ESM export object.
export var __reExport = (target, mod, secondTarget) => (
__copyProps(target, mod, 'default'),
secondTarget && __copyProps(secondTarget, mod, 'default')
)
// Converts the module from CommonJS to ESM. When in node mode (i.e. in an
// ".mjs" file, package.json has "type: module", or the "__esModule" export
// in the CommonJS file is falsy or missing), the "default" property is
// overridden to point to the original CommonJS exports object instead.
export var __toESM = (mod, isNodeMode, target) => (
target = mod != null ? __create(__getProtoOf(mod)) : {},
__copyProps(
// If the importer is in node compatibility mode or this is not an ESM
// file that has been converted to a CommonJS file using a Babel-
// compatible transform (i.e. "__esModule" has not been set), then set
// "default" to the CommonJS "module.exports" for node compatibility.
isNodeMode || !mod || !mod.__esModule
? __defProp(target, 'default', { value: mod, enumerable: true })
: target,
mod)
)
// Converts the module from ESM to CommonJS. This clones the input module
// object with the addition of a non-enumerable "__esModule" property set
// to "true", which overwrites any existing export named "__esModule".
export var __toCommonJS = mod => __copyProps(__defProp({}, '__esModule', { value: true }), mod)
// For TypeScript experimental decorators
// - kind === undefined: class
// - kind === 1: method, parameter
// - kind === 2: field
export var __decorateClass = (decorators, target, key, kind) => {
var result = kind > 1 ? void 0 : kind ? __getOwnPropDesc(target, key) : target
for (var i = decorators.length - 1, decorator; i >= 0; i--)
if (decorator = decorators[i])
result = (kind ? decorator(target, key, result) : decorator(result)) || result
if (kind && result) __defProp(target, key, result)
return result
}
export var __decorateParam = (index, decorator) => (target, key) => decorator(target, key, index)
// For JavaScript decorators
export var __decoratorStart = base => [, , , __create(base?.[__knownSymbol('metadata')] ?? null)]
var __decoratorStrings = ['class', 'method', 'getter', 'setter', 'accessor', 'field', 'value', 'get', 'set']
var __expectFn = fn => fn !== void 0 && typeof fn !== 'function' ? __typeError('Function expected') : fn
var __decoratorContext = (kind, name, done, metadata, fns) => ({ kind: __decoratorStrings[kind], name, metadata, addInitializer: fn =>
done._ ? __typeError('Already initialized') : fns.push(__expectFn(fn || null)) })
export var __decoratorMetadata = (array, target) => __defNormalProp(target, __knownSymbol('metadata'), array[3])
export var __runInitializers = (array, flags, self, value) => {
for (var i = 0, fns = array[flags >> 1], n = fns && fns.length; i < n; i++) flags & 1 ? fns[i].call(self) : value = fns[i].call(self, value)
return value
}
export var __decorateElement = (array, flags, name, decorators, target, extra) => {
var fn, it, done, ctx, access, k = flags & 7, s = !!(flags & 8), p = !!(flags & 16)
var j = k > 3 ? array.length + 1 : k ? s ? 1 : 2 : 0, key = __decoratorStrings[k + 5]
var initializers = k > 3 && (array[j - 1] = []), extraInitializers = array[j] || (array[j] = [])
var desc = k && (
!p && !s && (target = target.prototype),
k < 5 && (k > 3 || !p) &&
`
// Avoid object extensions when not using ES6
if !unsupportedJSFeatures.Has(compat.ObjectExtensions) && !unsupportedJSFeatures.Has(compat.ObjectAccessors) {
text += `__getOwnPropDesc(k < 4 ? target : { get [name]() { return __privateGet(this, extra) }, set [name](x) { return __privateSet(this, extra, x) } }, name)`
} else {
text += `(k < 4 ? __getOwnPropDesc(target, name) : { get: () => __privateGet(this, extra), set: x => __privateSet(this, extra, x) })`
}
text += `
)
k ? p && k < 4 && __name(extra, (k > 2 ? 'set ' : k > 1 ? 'get ' : '') + name) : __name(target, name)
for (var i = decorators.length - 1; i >= 0; i--) {
ctx = __decoratorContext(k, name, done = {}, array[3], extraInitializers)
if (k) {
ctx.static = s, ctx.private = p, access = ctx.access = { has: p ? x => __privateIn(target, x) : x => name in x }
if (k ^ 3) access.get = p ? x => (k ^ 1 ? __privateGet : __privateMethod)(x, target, k ^ 4 ? extra : desc.get) : x => x[name]
if (k > 2) access.set = p ? (x, y) => __privateSet(x, target, y, k ^ 4 ? extra : desc.set) : (x, y) => x[name] = y
}
it = (0, decorators[i])(k ? k < 4 ? p ? extra : desc[key] : k > 4 ? void 0 : { get: desc.get, set: desc.set } : target, ctx), done._ = 1
if (k ^ 4 || it === void 0) __expectFn(it) && (k > 4 ? initializers.unshift(it) : k ? p ? extra = it : desc[key] = it : target = it)
else if (typeof it !== 'object' || it === null) __typeError('Object expected')
else __expectFn(fn = it.get) && (desc.get = fn), __expectFn(fn = it.set) && (desc.set = fn), __expectFn(fn = it.init) && initializers.unshift(fn)
}
return k || __decoratorMetadata(array, target),
desc && __defProp(target, name, desc),
p ? k ^ 4 ? extra : desc : target
}
// For class members
export var __publicField = (obj, key, value) => (
__defNormalProp(obj, typeof key !== 'symbol' ? key + '' : key, value)
)
var __accessCheck = (obj, member, msg) => (
member.has(obj) || __typeError('Cannot ' + msg)
)
export var __privateIn = (member, obj) => (
Object(obj) !== obj ? __typeError('Cannot use the "in" operator on this value') :
member.has(obj)
)
export var __privateGet = (obj, member, getter) => (
__accessCheck(obj, member, 'read from private field'),
getter ? getter.call(obj) : member.get(obj)
)
export var __privateAdd = (obj, member, value) => (
member.has(obj) ? __typeError('Cannot add the same private member more than once') :
member instanceof WeakSet ? member.add(obj) : member.set(obj, value)
)
export var __privateSet = (obj, member, value, setter) => (
__accessCheck(obj, member, 'write to private field'),
setter ? setter.call(obj, value) : member.set(obj, value),
value
)
export var __privateMethod = (obj, member, method) => (
__accessCheck(obj, member, 'access private method'),
method
)
export var __earlyAccess = (name) => {
throw ReferenceError('Cannot access "' + name + '" before initialization')
}
`
if !unsupportedJSFeatures.Has(compat.ObjectAccessors) {
text += `
export var __privateWrapper = (obj, member, setter, getter) => ({
set _(value) { __privateSet(obj, member, value, setter) },
get _() { return __privateGet(obj, member, getter) },
})
`
} else {
text += `
export var __privateWrapper = (obj, member, setter, getter) => __defProp({}, '_', {
set: value => __privateSet(obj, member, value, setter),
get: () => __privateGet(obj, member, getter),
})
`
}
text += `
// For "super" property accesses
export var __superGet = (cls, obj, key) => __reflectGet(__getProtoOf(cls), key, obj)
export var __superSet = (cls, obj, key, val) => (__reflectSet(__getProtoOf(cls), key, val, obj), val)
`
if !unsupportedJSFeatures.Has(compat.ObjectAccessors) {
text += `
export var __superWrapper = (cls, obj, key) => ({
get _() { return __superGet(cls, obj, key) },
set _(val) { __superSet(cls, obj, key, val) },
})
`
} else {
text += `
export var __superWrapper = (cls, obj, key) => __defProp({}, '_', {
get: () => __superGet(cls, obj, key),
set: val => __superSet(cls, obj, key, val),
})
`
}
text += `
// For lowering tagged template literals
export var __template = (cooked, raw) => __freeze(__defProp(cooked, 'raw', { value: __freeze(raw || cooked.slice()) }))
// This helps for lowering async functions
export var __async = (__this, __arguments, generator) => {
return new Promise((resolve, reject) => {
var fulfilled = value => {
try {
step(generator.next(value))
} catch (e) {
reject(e)
}
}
var rejected = value => {
try {
step(generator.throw(value))
} catch (e) {
reject(e)
}
}
var step = x => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected)
step((generator = generator.apply(__this, __arguments)).next())
})
}
// These help for lowering async generator functions
export var __await = function (promise, isYieldStar) {
this[0] = promise
this[1] = isYieldStar
}
export var __asyncGenerator = (__this, __arguments, generator) => {
var resume = (k, v, yes, no) => {
try {
var x = generator[k](v), isAwait = (v = x.value) instanceof __await, done = x.done
Promise.resolve(isAwait ? v[0] : v)
.then(y => isAwait
? resume(k === 'return' ? k : 'next', v[1] ? { done: y.done, value: y.value } : y, yes, no)
: yes({ value: y, done }))
.catch(e => resume('throw', e, yes, no))
} catch (e) {
no(e)
}
}, method = k => it[k] = x => new Promise((yes, no) => resume(k, x, yes, no)), it = {}
return generator = generator.apply(__this, __arguments),
it[__knownSymbol('asyncIterator')] = () => it,
method('next'),
method('throw'),
method('return'),
it
}
export var __yieldStar = value => {
var obj = value[__knownSymbol('asyncIterator')], isAwait = false, method, it = {}
if (obj == null) {
obj = value[__knownSymbol('iterator')]()
method = k => it[k] = x => obj[k](x)
} else {
obj = obj.call(value)
method = k => it[k] = v => {
if (isAwait) {
isAwait = false
if (k === 'throw') throw v
return v
}
isAwait = true
return {
done: false,
value: new __await(new Promise(resolve => {
var x = obj[k](v)
if (!(x instanceof Object)) __typeError('Object expected')
resolve(x)
}), 1),
}
}
}
return it[__knownSymbol('iterator')] = () => it,
method('next'),
'throw' in obj ? method('throw') : it.throw = x => { throw x },
'return' in obj && method('return'),
it
}
// This helps for lowering for-await loops
export var __forAwait = (obj, it, method) =>
(it = obj[__knownSymbol('asyncIterator')])
? it.call(obj)
: (obj = obj[__knownSymbol('iterator')](),
it = {},
method = (key, fn) =>
(fn = obj[key]) && (it[key] = arg =>
new Promise((yes, no, done) => (
arg = fn.call(obj, arg),
done = arg.done,
Promise.resolve(arg.value)
.then(value => yes({ value, done }), no)
))),
method('next'),
method('return'),
it)
// This is for the "binary" loader (custom code is ~2x faster than "atob")
export var __toBinaryNode = Uint8Array.fromBase64 || (base64 => new Uint8Array(Buffer.from(base64, 'base64')))
export var __toBinary = Uint8Array.fromBase64 || /* @__PURE__ */ (() => {
var table = new Uint8Array(128)
for (var i = 0; i < 64; i++) table[i < 26 ? i + 65 : i < 52 ? i + 71 : i < 62 ? i - 4 : i * 4 - 205] = i
return base64 => {
var n = base64.length, bytes = new Uint8Array((n - (base64[n - 1] == '=') - (base64[n - 2] == '=')) * 3 / 4 | 0)
for (var i = 0, j = 0; i < n;) {
var c0 = table[base64.charCodeAt(i++)], c1 = table[base64.charCodeAt(i++)]
var c2 = table[base64.charCodeAt(i++)], c3 = table[base64.charCodeAt(i++)]
bytes[j++] = (c0 << 2) | (c1 >> 4)
bytes[j++] = (c1 << 4) | (c2 >> 2)
bytes[j++] = (c2 << 6) | c3
}
return bytes
}
})()
// These are for the "using" statement in TypeScript 5.2+
export var __using = (stack, value, async) => {
if (value != null) {
if (typeof value !== 'object' && typeof value !== 'function') __typeError('Object expected')
var dispose, inner
if (async) dispose = value[__knownSymbol('asyncDispose')]
if (dispose === void 0) {
dispose = value[__knownSymbol('dispose')]
if (async) inner = dispose
}
if (typeof dispose !== 'function') __typeError('Object not disposable')
if (inner) dispose = function() { try { inner.call(this) } catch (e) { return Promise.reject(e) } }
stack.push([async, dispose, value])
} else if (async) {
stack.push([async])
}
return value
}
export var __callDispose = (stack, error, hasError) => {
var E = typeof SuppressedError === 'function' ? SuppressedError :
function (e, s, m, _) { return _ = Error(m), _.name = 'SuppressedError', _.error = e, _.suppressed = s, _ }
var fail = e => error = hasError ? new E(e, error, 'An error was suppressed during disposal') : (hasError = true, e)
var next = (it) => {
while (it = stack.pop()) {
try {
var result = it[1] && it[1].call(it[2])
if (it[0]) return Promise.resolve(result).then(next, (e) => (fail(e), next()))
} catch (e) {
fail(e)
}
}
if (hasError) throw error
}
return next()
}
`
return logger.Source{
Index: SourceIndex,
KeyPath: logger.Path{Text: "<runtime>"},
PrettyPaths: logger.PrettyPaths{Abs: "<runtime>", Rel: "<runtime>"},
IdentifierName: "runtime",
Contents: text,
}
}
// The TypeScript decorator transform behaves similar to the official
// TypeScript compiler.
//
// One difference is that the "__decorateClass" function doesn't contain a reference
// to the non-existent "Reflect.decorate" function. This function was never
// standardized and checking for it is wasted code (as well as a potentially
// dangerous cause of unintentional behavior changes in the future).
//
// Another difference is that the "__decorateClass" function doesn't take in an
// optional property descriptor like it does in the official TypeScript
// compiler's support code. This appears to be a dead code path in the official
// support code that is only there for legacy reasons.
//
// Here are some examples of how esbuild's decorator transform works:
//
// ============================= Class decorator ==============================
//
// // TypeScript // JavaScript
// @dec let C = class {
// class C { };
// } C = __decorateClass([
// dec
// ], C);
//
// ============================ Method decorator ==============================
//
// // TypeScript // JavaScript
// class C { class C {
// @dec foo() {}
// foo() {} }
// } __decorateClass([
// dec
// ], C.prototype, 'foo', 1);
//
// =========================== Parameter decorator ============================
//
// // TypeScript // JavaScript
// class C { class C {
// foo(@dec bar) {} foo(bar) {}
// } }
// __decorateClass([
// __decorateParam(0, dec)
// ], C.prototype, 'foo', 1);
//
// ============================= Field decorator ==============================
//
// // TypeScript // JavaScript
// class C { class C {
// @dec constructor() {
// foo = 123 this.foo = 123
// } }
// }
// __decorateClass([
// dec
// ], C.prototype, 'foo', 2);
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/linker/debug.go | internal/linker/debug.go | package linker
import (
"fmt"
"strings"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/graph"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
)
// Set this to true and then load the resulting metafile in "graph-debugger.html"
// to debug graph information.
//
// This is deliberately not exposed in the final binary. It is *very* internal
// and only exists to help debug esbuild itself. Make sure this is always set
// back to false before committing.
const debugVerboseMetafile = false
func (c *linkerContext) generateExtraDataForFileJS(sourceIndex uint32) string {
if !debugVerboseMetafile {
return ""
}
file := &c.graph.Files[sourceIndex]
repr := file.InputFile.Repr.(*graph.JSRepr)
sb := strings.Builder{}
isFirstPartWithStmts := true
quoteSym := func(ref ast.Ref) string {
name := fmt.Sprintf("%d:%d [%s]", ref.SourceIndex, ref.InnerIndex, c.graph.Symbols.Get(ref).OriginalName)
return string(helpers.QuoteForJSON(name, c.options.ASCIIOnly))
}
sb.WriteString(`,"parts":[`)
for partIndex, part := range repr.AST.Parts {
if partIndex > 0 {
sb.WriteByte(',')
}
var isFirst bool
code := ""
sb.WriteString(fmt.Sprintf(`{"isLive":%v`, part.IsLive))
sb.WriteString(fmt.Sprintf(`,"canBeRemovedIfUnused":%v`, part.CanBeRemovedIfUnused))
if partIndex == int(js_ast.NSExportPartIndex) {
sb.WriteString(`,"nsExportPartIndex":true`)
} else if ast.MakeIndex32(uint32(partIndex)) == repr.Meta.WrapperPartIndex {
sb.WriteString(`,"wrapperPartIndex":true`)
} else if len(part.Stmts) > 0 {
contents := file.InputFile.Source.Contents
start := int(part.Stmts[0].Loc.Start)
if isFirstPartWithStmts {
start = 0
isFirstPartWithStmts = false
}
end := len(contents)
if partIndex+1 < len(repr.AST.Parts) {
if nextStmts := repr.AST.Parts[partIndex+1].Stmts; len(nextStmts) > 0 {
if nextStart := int(nextStmts[0].Loc.Start); nextStart >= start {
end = int(nextStart)
}
}
}
start = moveBeforeExport(contents, start)
end = moveBeforeExport(contents, end)
code = contents[start:end]
}
// importRecords
sb.WriteString(`,"importRecords":[`)
isFirst = true
for _, importRecordIndex := range part.ImportRecordIndices {
record := repr.AST.ImportRecords[importRecordIndex]
if !record.SourceIndex.IsValid() {
continue
}
if isFirst {
isFirst = false
} else {
sb.WriteByte(',')
}
path := c.graph.Files[record.SourceIndex.GetIndex()].InputFile.Source.PrettyPaths.Rel
sb.WriteString(fmt.Sprintf(`{"source":%s}`, helpers.QuoteForJSON(path, c.options.ASCIIOnly)))
}
sb.WriteByte(']')
// declaredSymbols
sb.WriteString(`,"declaredSymbols":[`)
isFirst = true
for _, declSym := range part.DeclaredSymbols {
if !declSym.IsTopLevel {
continue
}
if isFirst {
isFirst = false
} else {
sb.WriteByte(',')
}
sb.WriteString(fmt.Sprintf(`{"name":%s}`, quoteSym(declSym.Ref)))
}
sb.WriteByte(']')
// symbolUses
sb.WriteString(`,"symbolUses":[`)
isFirst = true
for ref, uses := range part.SymbolUses {
if isFirst {
isFirst = false
} else {
sb.WriteByte(',')
}
sb.WriteString(fmt.Sprintf(`{"name":%s,"countEstimate":%d}`, quoteSym(ref), uses.CountEstimate))
}
sb.WriteByte(']')
// dependencies
sb.WriteString(`,"dependencies":[`)
for i, dep := range part.Dependencies {
if i > 0 {
sb.WriteByte(',')
}
sb.WriteString(fmt.Sprintf(`{"source":%s,"partIndex":%d}`,
helpers.QuoteForJSON(c.graph.Files[dep.SourceIndex].InputFile.Source.PrettyPaths.Rel, c.options.ASCIIOnly),
dep.PartIndex,
))
}
sb.WriteByte(']')
// code
sb.WriteString(`,"code":`)
sb.Write(helpers.QuoteForJSON(code, c.options.ASCIIOnly))
sb.WriteByte('}')
}
sb.WriteString(`]`)
return sb.String()
}
func moveBeforeExport(contents string, i int) int {
contents = strings.TrimRight(contents[:i], " \t\r\n")
if strings.HasSuffix(contents, "export") {
return len(contents) - 6
}
return i
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/linker/linker.go | internal/linker/linker.go | package linker
// This package implements the second phase of the bundling operation that
// generates the output files when given a module graph. It has been split off
// into separate package to allow two linkers to cleanly exist in the same code
// base. This will be useful when rewriting the linker because the new one can
// be off by default to minimize disruption, but can still be enabled by anyone
// to assist in giving feedback on the rewrite.
import (
"bytes"
"encoding/base64"
"encoding/binary"
"fmt"
"hash"
"net/url"
"path"
"sort"
"strconv"
"strings"
"sync"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/bundler"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/css_parser"
"github.com/evanw/esbuild/internal/css_printer"
"github.com/evanw/esbuild/internal/fs"
"github.com/evanw/esbuild/internal/graph"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_lexer"
"github.com/evanw/esbuild/internal/js_printer"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/renamer"
"github.com/evanw/esbuild/internal/resolver"
"github.com/evanw/esbuild/internal/runtime"
"github.com/evanw/esbuild/internal/sourcemap"
"github.com/evanw/esbuild/internal/xxhash"
)
type linkerContext struct {
options *config.Options
timer *helpers.Timer
log logger.Log
fs fs.FS
res *resolver.Resolver
graph graph.LinkerGraph
chunks []chunkInfo
// This helps avoid an infinite loop when matching imports to exports
cycleDetector []importTracker
// This represents the parallel computation of source map related data.
// Calling this will block until the computation is done. The resulting value
// is shared between threads and must be treated as immutable.
dataForSourceMaps func() []bundler.DataForSourceMap
// This is passed to us from the bundling phase
uniqueKeyPrefix string
uniqueKeyPrefixBytes []byte // This is just "uniqueKeyPrefix" in byte form
// Property mangling results go here
mangledProps map[ast.Ref]string
// We may need to refer to the CommonJS "module" symbol for exports
unboundModuleRef ast.Ref
// We may need to refer to the "__esm" and/or "__commonJS" runtime symbols
cjsRuntimeRef ast.Ref
esmRuntimeRef ast.Ref
}
type partRange struct {
sourceIndex uint32
partIndexBegin uint32
partIndexEnd uint32
}
type chunkInfo struct {
// This is a random string and is used to represent the output path of this
// chunk before the final output path has been computed.
uniqueKey string
filesWithPartsInChunk map[uint32]bool
entryBits helpers.BitSet
// For code splitting
crossChunkImports []chunkImport
// This is the representation-specific information
chunkRepr chunkRepr
// This is the final path of this chunk relative to the output directory, but
// without the substitution of the final hash (since it hasn't been computed).
finalTemplate []config.PathTemplate
// This is the final path of this chunk relative to the output directory. It
// is the substitution of the final hash into "finalTemplate".
finalRelPath string
// If non-empty, this chunk needs to generate an external legal comments file.
externalLegalComments []byte
// This contains the hash for just this chunk without including information
// from the hashes of other chunks. Later on in the linking process, the
// final hash for this chunk will be constructed by merging the isolated
// hashes of all transitive dependencies of this chunk. This is separated
// into two phases like this to handle cycles in the chunk import graph.
waitForIsolatedHash func() []byte
// Other fields relating to the output file for this chunk
jsonMetadataChunkCallback func(finalOutputSize int) helpers.Joiner
outputSourceMap sourcemap.SourceMapPieces
// When this chunk is initially generated in isolation, the output pieces
// will contain slices of the output with the unique keys of other chunks
// omitted.
intermediateOutput intermediateOutput
// This information is only useful if "isEntryPoint" is true
entryPointBit uint // An index into "c.graph.EntryPoints"
sourceIndex uint32 // An index into "c.sources"
isEntryPoint bool
isExecutable bool
}
type chunkImport struct {
chunkIndex uint32
importKind ast.ImportKind
}
type outputPieceIndexKind uint8
const (
outputPieceNone outputPieceIndexKind = iota
outputPieceAssetIndex
outputPieceChunkIndex
)
// This is a chunk of source code followed by a reference to another chunk. For
// example, the file "@import 'CHUNK0001'; body { color: black; }" would be
// represented by two pieces, one with the data "@import '" and another with the
// data "'; body { color: black; }". The first would have the chunk index 1 and
// the second would have an invalid chunk index.
type outputPiece struct {
data []byte
// Note: The "kind" may be "outputPieceNone" in which case there is one piece
// with data and no chunk index. For example, the chunk may not contain any
// imports.
index uint32
kind outputPieceIndexKind
}
type intermediateOutput struct {
// If the chunk has references to other chunks, then "pieces" contains the
// contents of the chunk and "joiner" should not be used. Another joiner
// will have to be constructed later when merging the pieces together.
pieces []outputPiece
// If the chunk doesn't have any references to other chunks, then "pieces" is
// nil and "joiner" contains the contents of the chunk. This is more efficient
// because it avoids doing a join operation twice.
joiner helpers.Joiner
}
type chunkRepr interface{ isChunk() }
func (*chunkReprJS) isChunk() {}
func (*chunkReprCSS) isChunk() {}
type chunkReprJS struct {
filesInChunkInOrder []uint32
partsInChunkInOrder []partRange
// For code splitting
exportsToOtherChunks map[ast.Ref]string
importsFromOtherChunks map[uint32]crossChunkImportItemArray
crossChunkPrefixStmts []js_ast.Stmt
crossChunkSuffixStmts []js_ast.Stmt
cssChunkIndex uint32
hasCSSChunk bool
}
type chunkReprCSS struct {
importsInChunkInOrder []cssImportOrder
}
type externalImportCSS struct {
path logger.Path
conditions []css_ast.ImportConditions
conditionImportRecords []ast.ImportRecord
}
// Returns a log where "log.HasErrors()" only returns true if any errors have
// been logged since this call. This is useful when there have already been
// errors logged by other linkers that share the same log.
func wrappedLog(log logger.Log) logger.Log {
var mutex sync.Mutex
var hasErrors bool
addMsg := log.AddMsg
log.AddMsg = func(msg logger.Msg) {
if msg.Kind == logger.Error {
mutex.Lock()
defer mutex.Unlock()
hasErrors = true
}
addMsg(msg)
}
log.HasErrors = func() bool {
mutex.Lock()
defer mutex.Unlock()
return hasErrors
}
return log
}
func Link(
options *config.Options,
timer *helpers.Timer,
log logger.Log,
fs fs.FS,
res *resolver.Resolver,
inputFiles []graph.InputFile,
entryPoints []graph.EntryPoint,
uniqueKeyPrefix string,
reachableFiles []uint32,
dataForSourceMaps func() []bundler.DataForSourceMap,
) []graph.OutputFile {
timer.Begin("Link")
defer timer.End("Link")
log = wrappedLog(log)
timer.Begin("Clone linker graph")
c := linkerContext{
options: options,
timer: timer,
log: log,
fs: fs,
res: res,
dataForSourceMaps: dataForSourceMaps,
uniqueKeyPrefix: uniqueKeyPrefix,
uniqueKeyPrefixBytes: []byte(uniqueKeyPrefix),
graph: graph.CloneLinkerGraph(
inputFiles,
reachableFiles,
entryPoints,
options.CodeSplitting,
),
}
timer.End("Clone linker graph")
// Use a smaller version of these functions if we don't need profiler names
runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr)
if c.options.ProfilerNames {
c.cjsRuntimeRef = runtimeRepr.AST.NamedExports["__commonJS"].Ref
c.esmRuntimeRef = runtimeRepr.AST.NamedExports["__esm"].Ref
} else {
c.cjsRuntimeRef = runtimeRepr.AST.NamedExports["__commonJSMin"].Ref
c.esmRuntimeRef = runtimeRepr.AST.NamedExports["__esmMin"].Ref
}
var additionalFiles []graph.OutputFile
for _, entryPoint := range entryPoints {
file := &c.graph.Files[entryPoint.SourceIndex].InputFile
switch repr := file.Repr.(type) {
case *graph.JSRepr:
// Loaders default to CommonJS when they are the entry point and the output
// format is not ESM-compatible since that avoids generating the ESM-to-CJS
// machinery.
if repr.AST.HasLazyExport && (c.options.Mode == config.ModePassThrough ||
(c.options.Mode == config.ModeConvertFormat && !c.options.OutputFormat.KeepESMImportExportSyntax())) {
repr.AST.ExportsKind = js_ast.ExportsCommonJS
}
// Entry points with ES6 exports must generate an exports object when
// targeting non-ES6 formats. Note that the IIFE format only needs this
// when the global name is present, since that's the only way the exports
// can actually be observed externally.
if repr.AST.ExportKeyword.Len > 0 && (options.OutputFormat == config.FormatCommonJS ||
(options.OutputFormat == config.FormatIIFE && len(options.GlobalName) > 0)) {
repr.AST.UsesExportsRef = true
repr.Meta.ForceIncludeExportsForEntryPoint = true
}
case *graph.CopyRepr:
// If an entry point uses the copy loader, then copy the file manually
// here. Other uses of the copy loader will automatically be included
// along with the corresponding bundled chunk but that doesn't happen
// for entry points.
additionalFiles = append(additionalFiles, file.AdditionalFiles...)
}
}
// Allocate a new unbound symbol called "module" in case we need it later
if c.options.OutputFormat == config.FormatCommonJS {
c.unboundModuleRef = c.graph.GenerateNewSymbol(runtime.SourceIndex, ast.SymbolUnbound, "module")
} else {
c.unboundModuleRef = ast.InvalidRef
}
c.scanImportsAndExports()
// Stop now if there were errors
if c.log.HasErrors() {
c.options.ExclusiveMangleCacheUpdate(func(map[string]interface{}, map[string]bool) {
// Always do this so that we don't cause other entry points when there are errors
})
return []graph.OutputFile{}
}
c.treeShakingAndCodeSplitting()
if c.options.Mode == config.ModePassThrough {
for _, entryPoint := range c.graph.EntryPoints() {
c.preventExportsFromBeingRenamed(entryPoint.SourceIndex)
}
}
c.computeChunks()
c.computeCrossChunkDependencies()
// Merge mangled properties before chunks are generated since the names must
// be consistent across all chunks, or the generated code will break
c.timer.Begin("Waiting for mangle cache")
c.options.ExclusiveMangleCacheUpdate(func(
mangleCache map[string]interface{},
cssUsedLocalNames map[string]bool,
) {
c.timer.End("Waiting for mangle cache")
c.mangleProps(mangleCache)
c.mangleLocalCSS(cssUsedLocalNames)
})
// Make sure calls to "ast.FollowSymbols()" in parallel goroutines after this
// won't hit concurrent map mutation hazards
ast.FollowAllSymbols(c.graph.Symbols)
return c.generateChunksInParallel(additionalFiles)
}
func (c *linkerContext) mangleProps(mangleCache map[string]interface{}) {
c.timer.Begin("Mangle props")
defer c.timer.End("Mangle props")
mangledProps := make(map[ast.Ref]string)
c.mangledProps = mangledProps
// Reserve all JS keywords
reservedProps := make(map[string]bool)
for keyword := range js_lexer.Keywords {
reservedProps[keyword] = true
}
// Reserve all target properties in the cache
for original, remapped := range mangleCache {
if remapped == false {
reservedProps[original] = true
} else {
reservedProps[remapped.(string)] = true
}
}
// Merge all mangled property symbols together
freq := ast.CharFreq{}
mergedProps := make(map[string]ast.Ref)
for _, sourceIndex := range c.graph.ReachableFiles {
// Don't mangle anything in the runtime code
if sourceIndex == runtime.SourceIndex {
continue
}
// For each file
if repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr); ok {
// Reserve all non-mangled properties
for prop := range repr.AST.ReservedProps {
reservedProps[prop] = true
}
// Merge each mangled property with other ones of the same name
for name, ref := range repr.AST.MangledProps {
if existing, ok := mergedProps[name]; ok {
ast.MergeSymbols(c.graph.Symbols, ref, existing)
} else {
mergedProps[name] = ref
}
}
// Include this file's frequency histogram, which affects the mangled names
if repr.AST.CharFreq != nil {
freq.Include(repr.AST.CharFreq)
}
}
}
// Sort by use count (note: does not currently account for live vs. dead code)
sorted := make(renamer.StableSymbolCountArray, 0, len(mergedProps))
stableSourceIndices := c.graph.StableSourceIndices
for _, ref := range mergedProps {
sorted = append(sorted, renamer.StableSymbolCount{
StableSourceIndex: stableSourceIndices[ref.SourceIndex],
Ref: ref,
Count: c.graph.Symbols.Get(ref).UseCountEstimate,
})
}
sort.Sort(sorted)
// Assign names in order of use count
minifier := ast.DefaultNameMinifierJS.ShuffleByCharFreq(freq)
nextName := 0
for _, symbolCount := range sorted {
symbol := c.graph.Symbols.Get(symbolCount.Ref)
// Don't change existing mappings
if existing, ok := mangleCache[symbol.OriginalName]; ok {
if existing != false {
mangledProps[symbolCount.Ref] = existing.(string)
}
continue
}
// Generate a new name
name := minifier.NumberToMinifiedName(nextName)
nextName++
// Avoid reserved properties
for reservedProps[name] {
name = minifier.NumberToMinifiedName(nextName)
nextName++
}
// Track the new mapping
if mangleCache != nil {
mangleCache[symbol.OriginalName] = name
}
mangledProps[symbolCount.Ref] = name
}
}
func (c *linkerContext) mangleLocalCSS(usedLocalNames map[string]bool) {
c.timer.Begin("Mangle local CSS")
defer c.timer.End("Mangle local CSS")
mangledProps := c.mangledProps
globalNames := make(map[string]bool)
localNames := make(map[ast.Ref]struct{})
// Collect all local and global CSS names
freq := ast.CharFreq{}
for _, sourceIndex := range c.graph.ReachableFiles {
if repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.CSSRepr); ok {
for innerIndex, symbol := range c.graph.Symbols.SymbolsForSource[sourceIndex] {
if symbol.Kind == ast.SymbolGlobalCSS {
globalNames[symbol.OriginalName] = true
} else {
ref := ast.Ref{SourceIndex: sourceIndex, InnerIndex: uint32(innerIndex)}
ref = ast.FollowSymbols(c.graph.Symbols, ref)
localNames[ref] = struct{}{}
}
}
// Include this file's frequency histogram, which affects the mangled names
if repr.AST.CharFreq != nil {
freq.Include(repr.AST.CharFreq)
}
}
}
// Sort by use count (note: does not currently account for live vs. dead code)
sorted := make(renamer.StableSymbolCountArray, 0, len(localNames))
stableSourceIndices := c.graph.StableSourceIndices
for ref := range localNames {
sorted = append(sorted, renamer.StableSymbolCount{
StableSourceIndex: stableSourceIndices[ref.SourceIndex],
Ref: ref,
Count: c.graph.Symbols.Get(ref).UseCountEstimate,
})
}
sort.Sort(sorted)
// Rename all local names to avoid collisions
if c.options.MinifyIdentifiers {
minifier := ast.DefaultNameMinifierCSS.ShuffleByCharFreq(freq)
nextName := 0
for _, symbolCount := range sorted {
name := minifier.NumberToMinifiedName(nextName)
for globalNames[name] || usedLocalNames[name] {
nextName++
name = minifier.NumberToMinifiedName(nextName)
}
// Turn this local name into a global one
mangledProps[symbolCount.Ref] = name
usedLocalNames[name] = true
}
} else {
nameCounts := make(map[string]uint32)
for _, symbolCount := range sorted {
symbol := c.graph.Symbols.Get(symbolCount.Ref)
name := fmt.Sprintf("%s_%s", c.graph.Files[symbolCount.Ref.SourceIndex].InputFile.Source.IdentifierName, symbol.OriginalName)
// If the name is already in use, generate a new name by appending a number
if globalNames[name] || usedLocalNames[name] {
// To avoid O(n^2) behavior, the number must start off being the number
// that we used last time there was a collision with this name. Otherwise
// if there are many collisions with the same name, each name collision
// would have to increment the counter past all previous name collisions
// which is a O(n^2) time algorithm.
tries, ok := nameCounts[name]
if !ok {
tries = 1
}
prefix := name
// Keep incrementing the number until the name is unused
for {
tries++
name = prefix + strconv.Itoa(int(tries))
// Make sure this new name is unused
if !globalNames[name] && !usedLocalNames[name] {
// Store the count so we can start here next time instead of starting
// from 1. This means we avoid O(n^2) behavior.
nameCounts[prefix] = tries
break
}
}
}
// Turn this local name into a global one
mangledProps[symbolCount.Ref] = name
usedLocalNames[name] = true
}
}
}
// Currently the automatic chunk generation algorithm should by construction
// never generate chunks that import each other since files are allocated to
// chunks based on which entry points they are reachable from.
//
// This will change in the future when we allow manual chunk labels. But before
// we allow manual chunk labels, we'll need to rework module initialization to
// allow code splitting chunks to be lazily-initialized.
//
// Since that work hasn't been finished yet, cycles in the chunk import graph
// can cause initialization bugs. So let's forbid these cycles for now to guard
// against code splitting bugs that could cause us to generate buggy chunks.
func (c *linkerContext) enforceNoCyclicChunkImports() {
var validate func(int, map[int]int) bool
// DFS memoization with 3-colors, more space efficient
// 0: white (unvisited), 1: gray (visiting), 2: black (visited)
colors := make(map[int]int)
validate = func(chunkIndex int, colors map[int]int) bool {
if colors[chunkIndex] == 1 {
c.log.AddError(nil, logger.Range{}, "Internal error: generated chunks contain a circular import")
return true
}
if colors[chunkIndex] == 2 {
return false
}
colors[chunkIndex] = 1
for _, chunkImport := range c.chunks[chunkIndex].crossChunkImports {
// Ignore cycles caused by dynamic "import()" expressions. These are fine
// because they don't necessarily cause initialization order issues and
// they don't indicate a bug in our chunk generation algorithm. They arise
// normally in real code (e.g. two files that import each other).
if chunkImport.importKind != ast.ImportDynamic {
// Recursively validate otherChunkIndex
if validate(int(chunkImport.chunkIndex), colors) {
return true
}
}
}
colors[chunkIndex] = 2
return false
}
for i := range c.chunks {
if validate(i, colors) {
break
}
}
}
func (c *linkerContext) generateChunksInParallel(additionalFiles []graph.OutputFile) []graph.OutputFile {
c.timer.Begin("Generate chunks")
defer c.timer.End("Generate chunks")
// Generate each chunk on a separate goroutine. When a chunk needs to
// reference the path of another chunk, it will use a temporary path called
// the "uniqueKey" since the final path hasn't been computed yet (and is
// in general uncomputable at this point because paths have hashes that
// include information about chunk dependencies, and chunk dependencies
// can be cyclic due to dynamic imports).
generateWaitGroup := sync.WaitGroup{}
generateWaitGroup.Add(len(c.chunks))
for chunkIndex := range c.chunks {
switch c.chunks[chunkIndex].chunkRepr.(type) {
case *chunkReprJS:
go c.generateChunkJS(chunkIndex, &generateWaitGroup)
case *chunkReprCSS:
go c.generateChunkCSS(chunkIndex, &generateWaitGroup)
}
}
c.enforceNoCyclicChunkImports()
generateWaitGroup.Wait()
// Compute the final hashes of each chunk, then use those to create the final
// paths of each chunk. This can technically be done in parallel but it
// probably doesn't matter so much because we're not hashing that much data.
visited := make([]uint32, len(c.chunks))
var finalBytes []byte
for chunkIndex := range c.chunks {
chunk := &c.chunks[chunkIndex]
var hashSubstitution *string
// Only wait for the hash if necessary
if config.HasPlaceholder(chunk.finalTemplate, config.HashPlaceholder) {
// Compute the final hash using the isolated hashes of the dependencies
hash := xxhash.New()
c.appendIsolatedHashesForImportedChunks(hash, uint32(chunkIndex), visited, ^uint32(chunkIndex))
finalBytes = hash.Sum(finalBytes[:0])
finalString := bundler.HashForFileName(finalBytes)
hashSubstitution = &finalString
}
// Render the last remaining placeholder in the template
chunk.finalRelPath = config.TemplateToString(config.SubstituteTemplate(chunk.finalTemplate, config.PathPlaceholders{
Hash: hashSubstitution,
}))
}
// Generate the final output files by joining file pieces together and
// substituting the temporary paths for the final paths. This substitution
// can be done in parallel for each chunk.
c.timer.Begin("Generate final output files")
var resultsWaitGroup sync.WaitGroup
results := make([][]graph.OutputFile, len(c.chunks))
resultsWaitGroup.Add(len(c.chunks))
for chunkIndex, chunk := range c.chunks {
go func(chunkIndex int, chunk chunkInfo) {
var outputFiles []graph.OutputFile
// Each file may optionally contain additional files to be copied to the
// output directory. This is used by the "file" and "copy" loaders.
var commentPrefix string
var commentSuffix string
switch chunkRepr := chunk.chunkRepr.(type) {
case *chunkReprJS:
for _, sourceIndex := range chunkRepr.filesInChunkInOrder {
outputFiles = append(outputFiles, c.graph.Files[sourceIndex].InputFile.AdditionalFiles...)
}
commentPrefix = "//"
case *chunkReprCSS:
for _, entry := range chunkRepr.importsInChunkInOrder {
if entry.kind == cssImportSourceIndex {
outputFiles = append(outputFiles, c.graph.Files[entry.sourceIndex].InputFile.AdditionalFiles...)
}
}
commentPrefix = "/*"
commentSuffix = " */"
}
// Path substitution for the chunk itself
finalRelDir := c.fs.Dir(chunk.finalRelPath)
outputContentsJoiner, outputSourceMapShifts := c.substituteFinalPaths(chunk.intermediateOutput,
func(finalRelPathForImport string) string {
return c.pathBetweenChunks(finalRelDir, finalRelPathForImport)
})
// Generate the optional legal comments file for this chunk
if len(chunk.externalLegalComments) > 0 {
finalRelPathForLegalComments := chunk.finalRelPath + ".LEGAL.txt"
// Link the file to the legal comments
if c.options.LegalComments == config.LegalCommentsLinkedWithComment {
importPath := c.pathBetweenChunks(finalRelDir, finalRelPathForLegalComments)
importPath = strings.TrimPrefix(importPath, "./")
outputContentsJoiner.EnsureNewlineAtEnd()
outputContentsJoiner.AddString("/*! For license information please see ")
outputContentsJoiner.AddString(importPath)
outputContentsJoiner.AddString(" */\n")
}
// Write the external legal comments file
outputFiles = append(outputFiles, graph.OutputFile{
AbsPath: c.fs.Join(c.options.AbsOutputDir, finalRelPathForLegalComments),
Contents: chunk.externalLegalComments,
JSONMetadataChunk: fmt.Sprintf(
"{\n \"imports\": [],\n \"exports\": [],\n \"inputs\": {},\n \"bytes\": %d\n }", len(chunk.externalLegalComments)),
})
}
// Generate the optional source map for this chunk
if c.options.SourceMap != config.SourceMapNone && chunk.outputSourceMap.HasContent() {
outputSourceMap := chunk.outputSourceMap.Finalize(outputSourceMapShifts)
finalRelPathForSourceMap := chunk.finalRelPath + ".map"
// Potentially write a trailing source map comment
switch c.options.SourceMap {
case config.SourceMapLinkedWithComment:
importPath := c.pathBetweenChunks(finalRelDir, finalRelPathForSourceMap)
importPath = strings.TrimPrefix(importPath, "./")
importURL := url.URL{Path: importPath}
outputContentsJoiner.EnsureNewlineAtEnd()
outputContentsJoiner.AddString(commentPrefix)
outputContentsJoiner.AddString("# sourceMappingURL=")
outputContentsJoiner.AddString(importURL.EscapedPath())
outputContentsJoiner.AddString(commentSuffix)
outputContentsJoiner.AddString("\n")
case config.SourceMapInline, config.SourceMapInlineAndExternal:
outputContentsJoiner.EnsureNewlineAtEnd()
outputContentsJoiner.AddString(commentPrefix)
outputContentsJoiner.AddString("# sourceMappingURL=data:application/json;base64,")
outputContentsJoiner.AddString(base64.StdEncoding.EncodeToString(outputSourceMap))
outputContentsJoiner.AddString(commentSuffix)
outputContentsJoiner.AddString("\n")
}
// Potentially write the external source map file
switch c.options.SourceMap {
case config.SourceMapLinkedWithComment, config.SourceMapInlineAndExternal, config.SourceMapExternalWithoutComment:
outputFiles = append(outputFiles, graph.OutputFile{
AbsPath: c.fs.Join(c.options.AbsOutputDir, finalRelPathForSourceMap),
Contents: outputSourceMap,
JSONMetadataChunk: fmt.Sprintf(
"{\n \"imports\": [],\n \"exports\": [],\n \"inputs\": {},\n \"bytes\": %d\n }", len(outputSourceMap)),
})
}
}
// Finalize the output contents
outputContents := outputContentsJoiner.Done()
// Path substitution for the JSON metadata
var jsonMetadataChunk string
if c.options.NeedsMetafile {
jsonMetadataChunkPieces := c.breakJoinerIntoPieces(chunk.jsonMetadataChunkCallback(len(outputContents)))
jsonMetadataChunkBytes, _ := c.substituteFinalPaths(jsonMetadataChunkPieces, func(finalRelPathForImport string) string {
prettyPaths := resolver.MakePrettyPaths(c.fs, logger.Path{Text: c.fs.Join(c.options.AbsOutputDir, finalRelPathForImport), Namespace: "file"})
return prettyPaths.Select(c.options.MetafilePathStyle)
})
jsonMetadataChunk = string(jsonMetadataChunkBytes.Done())
}
// Generate the output file for this chunk
outputFiles = append(outputFiles, graph.OutputFile{
AbsPath: c.fs.Join(c.options.AbsOutputDir, chunk.finalRelPath),
Contents: outputContents,
JSONMetadataChunk: jsonMetadataChunk,
IsExecutable: chunk.isExecutable,
})
results[chunkIndex] = outputFiles
resultsWaitGroup.Done()
}(chunkIndex, chunk)
}
resultsWaitGroup.Wait()
c.timer.End("Generate final output files")
// Merge the output files from the different goroutines together in order
outputFilesLen := len(additionalFiles)
for _, result := range results {
outputFilesLen += len(result)
}
outputFiles := make([]graph.OutputFile, 0, outputFilesLen)
outputFiles = append(outputFiles, additionalFiles...)
for _, result := range results {
outputFiles = append(outputFiles, result...)
}
return outputFiles
}
// Given a set of output pieces (i.e. a buffer already divided into the spans
// between import paths), substitute the final import paths in and then join
// everything into a single byte buffer.
func (c *linkerContext) substituteFinalPaths(
intermediateOutput intermediateOutput,
modifyPath func(string) string,
) (j helpers.Joiner, shifts []sourcemap.SourceMapShift) {
// Optimization: If there can be no substitutions, just reuse the initial
// joiner that was used when generating the intermediate chunk output
// instead of creating another one and copying the whole file into it.
if intermediateOutput.pieces == nil {
return intermediateOutput.joiner, []sourcemap.SourceMapShift{{}}
}
var shift sourcemap.SourceMapShift
shifts = make([]sourcemap.SourceMapShift, 0, len(intermediateOutput.pieces))
shifts = append(shifts, shift)
for _, piece := range intermediateOutput.pieces {
var dataOffset sourcemap.LineColumnOffset
j.AddBytes(piece.data)
dataOffset.AdvanceBytes(piece.data)
shift.Before.Add(dataOffset)
shift.After.Add(dataOffset)
switch piece.kind {
case outputPieceAssetIndex:
file := c.graph.Files[piece.index]
if len(file.InputFile.AdditionalFiles) != 1 {
panic("Internal error")
}
relPath, _ := c.fs.Rel(c.options.AbsOutputDir, file.InputFile.AdditionalFiles[0].AbsPath)
// Make sure to always use forward slashes, even on Windows
relPath = strings.ReplaceAll(relPath, "\\", "/")
importPath := modifyPath(relPath)
j.AddString(importPath)
shift.Before.AdvanceString(file.InputFile.UniqueKeyForAdditionalFile)
shift.After.AdvanceString(importPath)
shifts = append(shifts, shift)
case outputPieceChunkIndex:
chunk := c.chunks[piece.index]
importPath := modifyPath(chunk.finalRelPath)
j.AddString(importPath)
shift.Before.AdvanceString(chunk.uniqueKey)
shift.After.AdvanceString(importPath)
shifts = append(shifts, shift)
}
}
return
}
func (c *linkerContext) accurateFinalByteCount(output intermediateOutput, chunkFinalRelDir string) int {
count := 0
// Note: The paths generated here must match "substituteFinalPaths" above
for _, piece := range output.pieces {
count += len(piece.data)
switch piece.kind {
case outputPieceAssetIndex:
file := c.graph.Files[piece.index]
if len(file.InputFile.AdditionalFiles) != 1 {
panic("Internal error")
}
relPath, _ := c.fs.Rel(c.options.AbsOutputDir, file.InputFile.AdditionalFiles[0].AbsPath)
// Make sure to always use forward slashes, even on Windows
relPath = strings.ReplaceAll(relPath, "\\", "/")
importPath := c.pathBetweenChunks(chunkFinalRelDir, relPath)
count += len(importPath)
case outputPieceChunkIndex:
chunk := c.chunks[piece.index]
importPath := c.pathBetweenChunks(chunkFinalRelDir, chunk.finalRelPath)
count += len(importPath)
}
}
return count
}
func (c *linkerContext) pathBetweenChunks(fromRelDir string, toRelPath string) string {
// Join with the public path if it has been configured
if c.options.PublicPath != "" {
return joinWithPublicPath(c.options.PublicPath, toRelPath)
}
// Otherwise, return a relative path
relPath, ok := c.fs.Rel(fromRelDir, toRelPath)
if !ok {
c.log.AddError(nil, logger.Range{},
fmt.Sprintf("Cannot traverse from directory %q to chunk %q", fromRelDir, toRelPath))
return ""
}
// Make sure to always use forward slashes, even on Windows
relPath = strings.ReplaceAll(relPath, "\\", "/")
// Make sure the relative path doesn't start with a name, since that could
// be interpreted as a package path instead of a relative path
if !strings.HasPrefix(relPath, "./") && !strings.HasPrefix(relPath, "../") {
relPath = "./" + relPath
}
return relPath
}
func (c *linkerContext) computeCrossChunkDependencies() {
c.timer.Begin("Compute cross-chunk dependencies")
defer c.timer.End("Compute cross-chunk dependencies")
if !c.options.CodeSplitting {
// No need to compute cross-chunk dependencies if there can't be any
return
}
type chunkMeta struct {
imports map[ast.Ref]bool
exports map[ast.Ref]bool
dynamicImports map[int]bool
}
chunkMetas := make([]chunkMeta, len(c.chunks))
// For each chunk, see what symbols it uses from other chunks. Do this in
// parallel because it's the most expensive part of this function.
waitGroup := sync.WaitGroup{}
waitGroup.Add(len(c.chunks))
for chunkIndex, chunk := range c.chunks {
go func(chunkIndex int, chunk chunkInfo) {
chunkMeta := &chunkMetas[chunkIndex]
imports := make(map[ast.Ref]bool)
chunkMeta.imports = imports
chunkMeta.exports = make(map[ast.Ref]bool)
// Go over each file in this chunk
for sourceIndex := range chunk.filesWithPartsInChunk {
// Go over each part in this file that's marked for inclusion in this chunk
switch repr := c.graph.Files[sourceIndex].InputFile.Repr.(type) {
case *graph.JSRepr:
for partIndex, partMeta := range repr.AST.Parts {
if !partMeta.IsLive {
continue
}
part := &repr.AST.Parts[partIndex]
// Rewrite external dynamic imports to point to the chunk for that entry point
for _, importRecordIndex := range part.ImportRecordIndices {
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/sourcemap/sourcemap.go | internal/sourcemap/sourcemap.go | package sourcemap
import (
"bytes"
"strings"
"unicode/utf8"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/logger"
)
type Mapping struct {
GeneratedLine int32 // 0-based
GeneratedColumn int32 // 0-based count of UTF-16 code units
SourceIndex int32 // 0-based
OriginalLine int32 // 0-based
OriginalColumn int32 // 0-based count of UTF-16 code units
OriginalName ast.Index32 // 0-based, optional
}
type SourceMap struct {
Sources []string
SourcesContent []SourceContent
Mappings []Mapping
Names []string
}
type SourceContent struct {
// This stores both the unquoted and the quoted values. We try to use the
// already-quoted value if possible so we don't need to re-quote it
// unnecessarily for maximum performance.
Quoted string
// But sometimes we need to re-quote the value, such as when it contains
// non-ASCII characters and we are in ASCII-only mode. In that case we quote
// this parsed UTF-16 value.
Value []uint16
}
func (sm *SourceMap) Find(line int32, column int32) *Mapping {
mappings := sm.Mappings
// Binary search
count := len(mappings)
index := 0
for count > 0 {
step := count / 2
i := index + step
mapping := mappings[i]
if mapping.GeneratedLine < line || (mapping.GeneratedLine == line && mapping.GeneratedColumn <= column) {
index = i + 1
count -= step + 1
} else {
count = step
}
}
// Handle search failure
if index > 0 {
mapping := &mappings[index-1]
// Match the behavior of the popular "source-map" library from Mozilla
if mapping.GeneratedLine == line {
return mapping
}
}
return nil
}
var base64 = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
// A single base 64 digit can contain 6 bits of data. For the base 64 variable
// length quantities we use in the source map spec, the first bit is the sign,
// the next four bits are the actual value, and the 6th bit is the continuation
// bit. The continuation bit tells us whether there are more digits in this
// value following this digit.
//
// Continuation
// | Sign
// | |
// V V
// 101011
func encodeVLQ(encoded []byte, value int) []byte {
var vlq int
if value < 0 {
vlq = ((-value) << 1) | 1
} else {
vlq = value << 1
}
// Handle the common case
if (vlq >> 5) == 0 {
digit := vlq & 31
encoded = append(encoded, base64[digit])
return encoded
}
for {
digit := vlq & 31
vlq >>= 5
// If there are still more digits in this value, we must make sure the
// continuation bit is marked
if vlq != 0 {
digit |= 32
}
encoded = append(encoded, base64[digit])
if vlq == 0 {
break
}
}
return encoded
}
func DecodeVLQ(encoded []byte, start int) (int, int) {
shift := 0
vlq := 0
// Scan over the input
for {
index := bytes.IndexByte(base64, encoded[start])
if index < 0 {
break
}
// Decode a single byte
vlq |= (index & 31) << shift
start++
shift += 5
// Stop if there's no continuation bit
if (index & 32) == 0 {
break
}
}
// Recover the value
value := vlq >> 1
if (vlq & 1) != 0 {
value = -value
}
return value, start
}
func DecodeVLQUTF16(encoded []uint16) (int32, int, bool) {
n := len(encoded)
if n == 0 {
return 0, 0, false
}
// Scan over the input
current := 0
shift := 0
var vlq int32
for {
if current >= n {
return 0, 0, false
}
index := int32(bytes.IndexByte(base64, byte(encoded[current])))
if index < 0 {
return 0, 0, false
}
// Decode a single byte
vlq |= (index & 31) << shift
current++
shift += 5
// Stop if there's no continuation bit
if (index & 32) == 0 {
break
}
}
// Recover the value
var value = vlq >> 1
if (vlq & 1) != 0 {
value = -value
}
return value, current, true
}
type LineColumnOffset struct {
Lines int
Columns int
}
func (a LineColumnOffset) ComesBefore(b LineColumnOffset) bool {
return a.Lines < b.Lines || (a.Lines == b.Lines && a.Columns < b.Columns)
}
func (a *LineColumnOffset) Add(b LineColumnOffset) {
if b.Lines == 0 {
a.Columns += b.Columns
} else {
a.Lines += b.Lines
a.Columns = b.Columns
}
}
func (offset *LineColumnOffset) AdvanceBytes(bytes []byte) {
columns := offset.Columns
for len(bytes) > 0 {
c, width := utf8.DecodeRune(bytes)
bytes = bytes[width:]
switch c {
case '\r', '\n', '\u2028', '\u2029':
// Handle Windows-specific "\r\n" newlines
if c == '\r' && len(bytes) > 0 && bytes[0] == '\n' {
columns++
continue
}
offset.Lines++
columns = 0
default:
// Mozilla's "source-map" library counts columns using UTF-16 code units
if c <= 0xFFFF {
columns++
} else {
columns += 2
}
}
}
offset.Columns = columns
}
func (offset *LineColumnOffset) AdvanceString(text string) {
columns := offset.Columns
for i, c := range text {
switch c {
case '\r', '\n', '\u2028', '\u2029':
// Handle Windows-specific "\r\n" newlines
if c == '\r' && i+1 < len(text) && text[i+1] == '\n' {
columns++
continue
}
offset.Lines++
columns = 0
default:
// Mozilla's "source-map" library counts columns using UTF-16 code units
if c <= 0xFFFF {
columns++
} else {
columns += 2
}
}
}
offset.Columns = columns
}
type SourceMapPieces struct {
Prefix []byte
Mappings []byte
Suffix []byte
}
func (pieces SourceMapPieces) HasContent() bool {
return len(pieces.Prefix)+len(pieces.Mappings)+len(pieces.Suffix) > 0
}
type SourceMapShift struct {
Before LineColumnOffset
After LineColumnOffset
}
func (pieces SourceMapPieces) Finalize(shifts []SourceMapShift) []byte {
// An optimized path for when there are no shifts
if len(shifts) == 1 {
bytes := pieces.Prefix
minCap := len(bytes) + len(pieces.Mappings) + len(pieces.Suffix)
if cap(bytes) < minCap {
bytes = append(make([]byte, 0, minCap), bytes...)
}
bytes = append(bytes, pieces.Mappings...)
bytes = append(bytes, pieces.Suffix...)
return bytes
}
startOfRun := 0
current := 0
generated := LineColumnOffset{}
prevShiftColumnDelta := 0
j := helpers.Joiner{}
// Start the source map
j.AddBytes(pieces.Prefix)
// This assumes that a) all mappings are valid and b) all mappings are ordered
// by increasing generated position. This should be the case for all mappings
// generated by esbuild, which should be the only mappings we process here.
for current < len(pieces.Mappings) {
// Handle a line break
if pieces.Mappings[current] == ';' {
generated.Lines++
generated.Columns = 0
prevShiftColumnDelta = 0
current++
continue
}
potentialEndOfRun := current
// Read the generated column
generatedColumnDelta, next := DecodeVLQ(pieces.Mappings, current)
generated.Columns += generatedColumnDelta
current = next
potentialStartOfRun := current
// Skip over the original position information if present
if current < len(pieces.Mappings) {
_, current = DecodeVLQ(pieces.Mappings, current) // The original source
_, current = DecodeVLQ(pieces.Mappings, current) // The original line
_, current = DecodeVLQ(pieces.Mappings, current) // The original column
// Skip over the original name if present
if current < len(pieces.Mappings) {
_, current = DecodeVLQ(pieces.Mappings, current)
}
}
// Skip a trailing comma
if current < len(pieces.Mappings) && pieces.Mappings[current] == ',' {
current++
}
// Detect crossing shift boundaries
didCrossBoundary := false
for len(shifts) > 1 && shifts[1].Before.ComesBefore(generated) {
shifts = shifts[1:]
didCrossBoundary = true
}
if !didCrossBoundary {
continue
}
// This shift isn't relevant if the next mapping after this shift is on a
// following line. In that case, don't split and keep scanning instead.
shift := shifts[0]
if shift.After.Lines != generated.Lines {
continue
}
// Add all previous mappings in a single run for efficiency. Since source
// mappings are relative, no data needs to be modified inside this run.
j.AddBytes(pieces.Mappings[startOfRun:potentialEndOfRun])
// Then modify the first mapping across the shift boundary with the updated
// generated column value. It's simplest to only support column shifts. This
// is reasonable because import paths should not contain newlines.
if shift.Before.Lines != shift.After.Lines {
panic("Unexpected line change when shifting source maps")
}
shiftColumnDelta := shift.After.Columns - shift.Before.Columns
j.AddBytes(encodeVLQ(nil, generatedColumnDelta+shiftColumnDelta-prevShiftColumnDelta))
prevShiftColumnDelta = shiftColumnDelta
// Finally, start the next run after the end of this generated column offset
startOfRun = potentialStartOfRun
}
// Finish the source map
j.AddBytes(pieces.Mappings[startOfRun:])
j.AddBytes(pieces.Suffix)
return j.Done()
}
// Coordinates in source maps are stored using relative offsets for size
// reasons. When joining together chunks of a source map that were emitted
// in parallel for different parts of a file, we need to fix up the first
// segment of each chunk to be relative to the end of the previous chunk.
type SourceMapState struct {
// This isn't stored in the source map. It's only used by the bundler to join
// source map chunks together correctly.
GeneratedLine int
// These are stored in the source map in VLQ format.
GeneratedColumn int
SourceIndex int
OriginalLine int
OriginalColumn int
OriginalName int
HasOriginalName bool
}
// Source map chunks are computed in parallel for speed. Each chunk is relative
// to the zero state instead of being relative to the end state of the previous
// chunk, since it's impossible to know the end state of the previous chunk in
// a parallel computation.
//
// After all chunks are computed, they are joined together in a second pass.
// This rewrites the first mapping in each chunk to be relative to the end
// state of the previous chunk.
func AppendSourceMapChunk(j *helpers.Joiner, prevEndState SourceMapState, startState SourceMapState, buffer MappingsBuffer) {
// Handle line breaks in between this mapping and the previous one
if startState.GeneratedLine != 0 {
j.AddBytes(bytes.Repeat([]byte{';'}, startState.GeneratedLine))
prevEndState.GeneratedColumn = 0
}
// Skip past any leading semicolons, which indicate line breaks
semicolons := 0
for buffer.Data[semicolons] == ';' {
semicolons++
}
if semicolons > 0 {
j.AddBytes(buffer.Data[:semicolons])
prevEndState.GeneratedColumn = 0
startState.GeneratedColumn = 0
}
// Strip off the first mapping from the buffer. The first mapping should be
// for the start of the original file (the printer always generates one for
// the start of the file).
//
// Note that we do not want to strip off the original name, even though it
// could be a part of the first mapping. This will be handled using a special
// case below instead. Original names are optional and are often omitted, so
// we handle it uniformly by saving an index to the first original name,
// which may or may not be a part of the first mapping.
var sourceIndex int
var originalLine int
var originalColumn int
omitSource := false
generatedColumn, i := DecodeVLQ(buffer.Data, semicolons)
if i == len(buffer.Data) || strings.IndexByte(",;", buffer.Data[i]) != -1 {
omitSource = true
} else {
sourceIndex, i = DecodeVLQ(buffer.Data, i)
originalLine, i = DecodeVLQ(buffer.Data, i)
originalColumn, i = DecodeVLQ(buffer.Data, i)
}
// Rewrite the first mapping to be relative to the end state of the previous
// chunk. We now know what the end state is because we're in the second pass
// where all chunks have already been generated.
startState.GeneratedColumn += generatedColumn
startState.SourceIndex += sourceIndex
startState.OriginalLine += originalLine
startState.OriginalColumn += originalColumn
prevEndState.HasOriginalName = false // This is handled separately below
rewritten, _ := appendMappingToBuffer(nil, j.LastByte(), prevEndState, startState, omitSource)
j.AddBytes(rewritten)
// Next, if there's an original name, we need to rewrite that as well to be
// relative to that of the previous chunk.
if buffer.FirstNameOffset.IsValid() {
before := int(buffer.FirstNameOffset.GetIndex())
originalName, after := DecodeVLQ(buffer.Data, before)
originalName += startState.OriginalName - prevEndState.OriginalName
j.AddBytes(buffer.Data[i:before])
j.AddBytes(encodeVLQ(nil, originalName))
j.AddBytes(buffer.Data[after:])
return
}
// Otherwise, just append everything after that without modification
j.AddBytes(buffer.Data[i:])
}
func appendMappingToBuffer(
buffer []byte, lastByte byte, prevState SourceMapState, currentState SourceMapState, omitSource bool,
) ([]byte, ast.Index32) {
// Put commas in between mappings
if lastByte != 0 && lastByte != ';' && lastByte != '"' {
buffer = append(buffer, ',')
}
// Record the mapping (note that the generated line is recorded using ';' elsewhere)
buffer = encodeVLQ(buffer, currentState.GeneratedColumn-prevState.GeneratedColumn)
if !omitSource {
buffer = encodeVLQ(buffer, currentState.SourceIndex-prevState.SourceIndex)
buffer = encodeVLQ(buffer, currentState.OriginalLine-prevState.OriginalLine)
buffer = encodeVLQ(buffer, currentState.OriginalColumn-prevState.OriginalColumn)
}
// Record the optional original name
var nameOffset ast.Index32
if currentState.HasOriginalName {
nameOffset = ast.MakeIndex32(uint32(len(buffer)))
buffer = encodeVLQ(buffer, currentState.OriginalName-prevState.OriginalName)
}
return buffer, nameOffset
}
type LineOffsetTable struct {
// The source map specification is very loose and does not specify what
// column numbers actually mean. The popular "source-map" library from Mozilla
// appears to interpret them as counts of UTF-16 code units, so we generate
// those too for compatibility.
//
// We keep mapping tables around to accelerate conversion from byte offsets
// to UTF-16 code unit counts. However, this mapping takes up a lot of memory
// and generates a lot of garbage. Since most JavaScript is ASCII and the
// mapping for ASCII is 1:1, we avoid creating a table for ASCII-only lines
// as an optimization.
columnsForNonASCII []int32
byteOffsetToFirstNonASCII int32
byteOffsetToStartOfLine int32
}
func GenerateLineOffsetTables(contents string, approximateLineCount int32) []LineOffsetTable {
var ColumnsForNonASCII []int32
ByteOffsetToFirstNonASCII := int32(0)
lineByteOffset := 0
columnByteOffset := 0
column := int32(0)
// Preallocate the top-level table using the approximate line count from the lexer
lineOffsetTables := make([]LineOffsetTable, 0, approximateLineCount)
for i, c := range contents {
// Mark the start of the next line
if column == 0 {
lineByteOffset = i
}
// Start the mapping if this character is non-ASCII
if c > 0x7F && ColumnsForNonASCII == nil {
columnByteOffset = i - lineByteOffset
ByteOffsetToFirstNonASCII = int32(columnByteOffset)
ColumnsForNonASCII = []int32{}
}
// Update the per-byte column offsets
if ColumnsForNonASCII != nil {
for lineBytesSoFar := i - lineByteOffset; columnByteOffset <= lineBytesSoFar; columnByteOffset++ {
ColumnsForNonASCII = append(ColumnsForNonASCII, column)
}
}
switch c {
case '\r', '\n', '\u2028', '\u2029':
// Handle Windows-specific "\r\n" newlines
if c == '\r' && i+1 < len(contents) && contents[i+1] == '\n' {
column++
continue
}
lineOffsetTables = append(lineOffsetTables, LineOffsetTable{
byteOffsetToStartOfLine: int32(lineByteOffset),
byteOffsetToFirstNonASCII: ByteOffsetToFirstNonASCII,
columnsForNonASCII: ColumnsForNonASCII,
})
columnByteOffset = 0
ByteOffsetToFirstNonASCII = 0
ColumnsForNonASCII = nil
column = 0
default:
// Mozilla's "source-map" library counts columns using UTF-16 code units
if c <= 0xFFFF {
column++
} else {
column += 2
}
}
}
// Mark the start of the next line
if column == 0 {
lineByteOffset = len(contents)
}
// Do one last update for the column at the end of the file
if ColumnsForNonASCII != nil {
for lineBytesSoFar := len(contents) - lineByteOffset; columnByteOffset <= lineBytesSoFar; columnByteOffset++ {
ColumnsForNonASCII = append(ColumnsForNonASCII, column)
}
}
lineOffsetTables = append(lineOffsetTables, LineOffsetTable{
byteOffsetToStartOfLine: int32(lineByteOffset),
byteOffsetToFirstNonASCII: ByteOffsetToFirstNonASCII,
columnsForNonASCII: ColumnsForNonASCII,
})
return lineOffsetTables
}
type MappingsBuffer struct {
Data []byte
FirstNameOffset ast.Index32
}
type Chunk struct {
Buffer MappingsBuffer
QuotedNames [][]byte
// This end state will be used to rewrite the start of the following source
// map chunk so that the delta-encoded VLQ numbers are preserved.
EndState SourceMapState
// There probably isn't a source mapping at the end of the file (nor should
// there be) but if we're appending another source map chunk after this one,
// we'll need to know how many characters were in the last line we generated.
FinalGeneratedColumn int
ShouldIgnore bool
}
type ChunkBuilder struct {
inputSourceMap *SourceMap
sourceMap []byte
quotedNames [][]byte
namesMap map[string]uint32
lineOffsetTables []LineOffsetTable
prevOriginalName string
prevState SourceMapState
lastGeneratedUpdate int
generatedColumn int
prevGeneratedLen int
prevOriginalLoc logger.Loc
firstNameOffset ast.Index32
hasPrevState bool
asciiOnly bool
// This is a workaround for a bug in the popular "source-map" library:
// https://github.com/mozilla/source-map/issues/261. The library will
// sometimes return null when querying a source map unless every line
// starts with a mapping at column zero.
//
// The workaround is to replicate the previous mapping if a line ends
// up not starting with a mapping. This is done lazily because we want
// to avoid replicating the previous mapping if we don't need to.
lineStartsWithMapping bool
coverLinesWithoutMappings bool
}
func MakeChunkBuilder(inputSourceMap *SourceMap, lineOffsetTables []LineOffsetTable, asciiOnly bool) ChunkBuilder {
return ChunkBuilder{
inputSourceMap: inputSourceMap,
prevOriginalLoc: logger.Loc{Start: -1},
lineOffsetTables: lineOffsetTables,
asciiOnly: asciiOnly,
namesMap: make(map[string]uint32),
// We automatically repeat the previous source mapping if we ever generate
// a line that doesn't start with a mapping. This helps give files more
// complete mapping coverage without gaps.
//
// However, we probably shouldn't do this if the input file has a nested
// source map that we will be remapping through. We have no idea what state
// that source map is in and it could be pretty scrambled.
//
// I've seen cases where blindly repeating the last mapping for subsequent
// lines gives very strange and unhelpful results with source maps from
// other tools.
coverLinesWithoutMappings: inputSourceMap == nil,
}
}
func (b *ChunkBuilder) AddSourceMapping(originalLoc logger.Loc, originalName string, output []byte) {
// Avoid generating duplicate mappings
if originalLoc == b.prevOriginalLoc && (b.prevGeneratedLen == len(output) || b.prevOriginalName == originalName) {
return
}
b.prevOriginalLoc = originalLoc
b.prevGeneratedLen = len(output)
b.prevOriginalName = originalName
// Binary search to find the line
lineOffsetTables := b.lineOffsetTables
count := len(lineOffsetTables)
originalLine := 0
for count > 0 {
step := count / 2
i := originalLine + step
if lineOffsetTables[i].byteOffsetToStartOfLine <= originalLoc.Start {
originalLine = i + 1
count = count - step - 1
} else {
count = step
}
}
originalLine--
// Use the line to compute the column
line := &lineOffsetTables[originalLine]
originalColumn := int(originalLoc.Start - line.byteOffsetToStartOfLine)
if line.columnsForNonASCII != nil && originalColumn >= int(line.byteOffsetToFirstNonASCII) {
originalColumn = int(line.columnsForNonASCII[originalColumn-int(line.byteOffsetToFirstNonASCII)])
}
b.updateGeneratedLineAndColumn(output)
// If this line doesn't start with a mapping and we're about to add a mapping
// that's not at the start, insert a mapping first so the line starts with one.
if b.coverLinesWithoutMappings && !b.lineStartsWithMapping && b.generatedColumn > 0 && b.hasPrevState {
b.appendMappingWithoutRemapping(SourceMapState{
GeneratedLine: b.prevState.GeneratedLine,
GeneratedColumn: 0,
SourceIndex: b.prevState.SourceIndex,
OriginalLine: b.prevState.OriginalLine,
OriginalColumn: b.prevState.OriginalColumn,
})
}
b.appendMapping(originalName, SourceMapState{
GeneratedLine: b.prevState.GeneratedLine,
GeneratedColumn: b.generatedColumn,
OriginalLine: originalLine,
OriginalColumn: originalColumn,
})
// This line now has a mapping on it, so don't insert another one
b.lineStartsWithMapping = true
}
func (b *ChunkBuilder) GenerateChunk(output []byte) Chunk {
b.updateGeneratedLineAndColumn(output)
shouldIgnore := true
for _, c := range b.sourceMap {
if c != ';' {
shouldIgnore = false
break
}
}
return Chunk{
Buffer: MappingsBuffer{
Data: b.sourceMap,
FirstNameOffset: b.firstNameOffset,
},
QuotedNames: b.quotedNames,
EndState: b.prevState,
FinalGeneratedColumn: b.generatedColumn,
ShouldIgnore: shouldIgnore,
}
}
// Scan over the printed text since the last source mapping and update the
// generated line and column numbers
func (b *ChunkBuilder) updateGeneratedLineAndColumn(output []byte) {
for i, c := range string(output[b.lastGeneratedUpdate:]) {
switch c {
case '\r', '\n', '\u2028', '\u2029':
// Handle Windows-specific "\r\n" newlines
if c == '\r' {
newlineCheck := b.lastGeneratedUpdate + i + 1
if newlineCheck < len(output) && output[newlineCheck] == '\n' {
continue
}
}
// If we're about to move to the next line and the previous line didn't have
// any mappings, add a mapping at the start of the previous line.
if b.coverLinesWithoutMappings && !b.lineStartsWithMapping && b.hasPrevState {
b.appendMappingWithoutRemapping(SourceMapState{
GeneratedLine: b.prevState.GeneratedLine,
GeneratedColumn: 0,
SourceIndex: b.prevState.SourceIndex,
OriginalLine: b.prevState.OriginalLine,
OriginalColumn: b.prevState.OriginalColumn,
})
}
b.prevState.GeneratedLine++
b.prevState.GeneratedColumn = 0
b.generatedColumn = 0
b.sourceMap = append(b.sourceMap, ';')
// This new line doesn't have a mapping yet
b.lineStartsWithMapping = false
default:
// Mozilla's "source-map" library counts columns using UTF-16 code units
if c <= 0xFFFF {
b.generatedColumn++
} else {
b.generatedColumn += 2
}
}
}
b.lastGeneratedUpdate = len(output)
}
func (b *ChunkBuilder) appendMapping(originalName string, currentState SourceMapState) {
// If the input file had a source map, map all the way back to the original
if b.inputSourceMap != nil {
mapping := b.inputSourceMap.Find(
int32(currentState.OriginalLine),
int32(currentState.OriginalColumn))
// Some locations won't have a mapping
if mapping == nil {
return
}
currentState.SourceIndex = int(mapping.SourceIndex)
currentState.OriginalLine = int(mapping.OriginalLine)
currentState.OriginalColumn = int(mapping.OriginalColumn)
// Map all the way back to the original name if present. Otherwise, keep
// the original name from esbuild, which corresponds to the name in the
// intermediate source code. This is important for tools that only emit
// a name mapping when the name is different than the original name.
if mapping.OriginalName.IsValid() {
originalName = b.inputSourceMap.Names[mapping.OriginalName.GetIndex()]
}
}
// Optionally reference the original name
if originalName != "" {
i, ok := b.namesMap[originalName]
if !ok {
i = uint32(len(b.quotedNames))
b.quotedNames = append(b.quotedNames, helpers.QuoteForJSON(originalName, b.asciiOnly))
b.namesMap[originalName] = i
}
currentState.OriginalName = int(i)
currentState.HasOriginalName = true
}
b.appendMappingWithoutRemapping(currentState)
}
func (b *ChunkBuilder) appendMappingWithoutRemapping(currentState SourceMapState) {
var lastByte byte
if len(b.sourceMap) != 0 {
lastByte = b.sourceMap[len(b.sourceMap)-1]
}
var nameOffset ast.Index32
b.sourceMap, nameOffset = appendMappingToBuffer(b.sourceMap, lastByte, b.prevState, currentState, false)
prevOriginalName := b.prevState.OriginalName
b.prevState = currentState
if !currentState.HasOriginalName {
// Revert the original name change if it's invalid
b.prevState.OriginalName = prevOriginalName
} else if !b.firstNameOffset.IsValid() {
// Keep track of the first name offset so we can jump right to it later
b.firstNameOffset = nameOffset
}
b.hasPrevState = true
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/resolver/yarnpnp_test.go | internal/resolver/yarnpnp_test.go | package resolver
import (
"encoding/json"
"fmt"
"io/ioutil"
"strings"
"testing"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/fs"
"github.com/evanw/esbuild/internal/js_parser"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/test"
)
type pnpTestExpectation struct {
Manifest interface{}
Tests []pnpTest
}
type pnpTest struct {
It string
Imported string
Importer string
Expected string
}
func TestYarnPnP(t *testing.T) {
t.Helper()
contents, err := ioutil.ReadFile("testExpectations.json")
if err != nil {
t.Fatalf("Failed to read testExpectations.json: %s", err.Error())
}
var expectations []pnpTestExpectation
err = json.Unmarshal(contents, &expectations)
if err != nil {
t.Fatalf("Failed to parse testExpectations.json: %s", err.Error())
}
for i, expectation := range expectations {
path := fmt.Sprintf("testExpectations[%d].manifest", i)
contents, err := json.Marshal(expectation.Manifest)
if err != nil {
t.Fatalf("Failed to generate JSON: %s", err.Error())
}
source := logger.Source{
KeyPath: logger.Path{Text: path},
PrettyPaths: logger.PrettyPaths{Abs: path, Rel: path},
Contents: string(contents),
}
tempLog := logger.NewDeferLog(logger.DeferLogAll, nil)
expr, ok := js_parser.ParseJSON(tempLog, source, js_parser.JSONOptions{})
if !ok {
t.Fatalf("Failed to re-parse JSON: %s", path)
}
msgs := tempLog.Done()
if len(msgs) != 0 {
t.Fatalf("Log not empty after re-parsing JSON: %s", path)
}
manifest := compileYarnPnPData(path, "/path/to/project/", expr, source)
for _, current := range expectation.Tests {
func(current pnpTest) {
t.Run(current.It, func(t *testing.T) {
fs := fs.MockFS(nil, fs.MockUnix, "/")
r := resolverQuery{Resolver: NewResolver(config.BuildCall, fs, logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil), nil, &config.Options{})}
result := r.resolveToUnqualified(current.Imported, current.Importer, manifest)
var observed string
switch result.status {
case pnpSuccess:
observed = fs.Join(result.pkgDirPath, result.pkgSubpath)
case pnpSkipped:
observed = current.Imported
default:
observed = "error!"
}
// If a we aren't going through PnP, then we should just run the
// normal node module resolution rules instead of throwing an error.
// However, this test requires us to throw an error, which seems
// incorrect. So we change the expected value of the test instead.
expected := current.Expected
if current.It == `shouldn't go through PnP when trying to resolve dependencies from packages covered by ignorePatternData` {
expected = current.Imported
} else if observed != "error!" && !strings.HasSuffix(observed, "/") {
// This is important for matching Yarn PnP's expectations in tests,
// but it's important for esbuild that the slash isn't present.
// Otherwise esbuild's implementation of node module resolution
// (which runs after Yarn PnP resolution) will fail. Specifically
// "foo/" will look for "foo/foo.js" instead of "foo/index.js".
observed += "/"
}
test.AssertEqualWithDiff(t, observed, expected)
})
}(current)
}
}
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/resolver/tsconfig_json.go | internal/resolver/tsconfig_json.go | package resolver
import (
"fmt"
"strings"
"github.com/evanw/esbuild/internal/cache"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/fs"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_lexer"
"github.com/evanw/esbuild/internal/js_parser"
"github.com/evanw/esbuild/internal/logger"
)
type TSConfigJSON struct {
AbsPath string
// The absolute path of "compilerOptions.baseUrl"
BaseURL *string
// This is used if "Paths" is non-nil. It's equal to "BaseURL" except if
// "BaseURL" is missing, in which case it is as if "BaseURL" was ".". This
// is to implement the "paths without baseUrl" feature from TypeScript 4.1.
// More info: https://github.com/microsoft/TypeScript/issues/31869
BaseURLForPaths string
// The verbatim values of "compilerOptions.paths". The keys are patterns to
// match and the values are arrays of fallback paths to search. Each key and
// each fallback path can optionally have a single "*" wildcard character.
// If both the key and the value have a wildcard, the substring matched by
// the wildcard is substituted into the fallback path. The keys represent
// module-style path names and the fallback paths are relative to the
// "baseUrl" value in the "tsconfig.json" file.
Paths *TSConfigPaths
tsTargetKey tsTargetKey
TSStrict *config.TSAlwaysStrict
TSAlwaysStrict *config.TSAlwaysStrict
JSXSettings config.TSConfigJSX
Settings config.TSConfig
}
func (derived *TSConfigJSON) applyExtendedConfig(base TSConfigJSON) {
if base.tsTargetKey.Range.Len > 0 {
derived.tsTargetKey = base.tsTargetKey
}
if base.TSStrict != nil {
derived.TSStrict = base.TSStrict
}
if base.TSAlwaysStrict != nil {
derived.TSAlwaysStrict = base.TSAlwaysStrict
}
if base.BaseURL != nil {
derived.BaseURL = base.BaseURL
}
if base.Paths != nil {
derived.Paths = base.Paths
derived.BaseURLForPaths = base.BaseURLForPaths
}
derived.JSXSettings.ApplyExtendedConfig(base.JSXSettings)
derived.Settings.ApplyExtendedConfig(base.Settings)
}
func (config *TSConfigJSON) TSAlwaysStrictOrStrict() *config.TSAlwaysStrict {
if config.TSAlwaysStrict != nil {
return config.TSAlwaysStrict
}
// If "alwaysStrict" is absent, it defaults to "strict" instead
return config.TSStrict
}
// This information is only used for error messages
type tsTargetKey struct {
LowerValue string
Source logger.Source
Range logger.Range
}
type TSConfigPath struct {
Text string
Loc logger.Loc
}
type TSConfigPaths struct {
Map map[string][]TSConfigPath
// This may be different from the original "tsconfig.json" source if the
// "paths" value is from another file via an "extends" clause.
Source logger.Source
}
func ParseTSConfigJSON(
log logger.Log,
source logger.Source,
jsonCache *cache.JSONCache,
fs fs.FS,
fileDir string,
configDir string,
extends func(string, logger.Range) *TSConfigJSON,
) *TSConfigJSON {
// Unfortunately "tsconfig.json" isn't actually JSON. It's some other
// format that appears to be defined by the implementation details of the
// TypeScript compiler.
//
// Attempt to parse it anyway by modifying the JSON parser, but just for
// these particular files. This is likely not a completely accurate
// emulation of what the TypeScript compiler does (e.g. string escape
// behavior may also be different).
json, ok := jsonCache.Parse(log, source, js_parser.JSONOptions{Flavor: js_lexer.TSConfigJSON})
if !ok {
return nil
}
var result TSConfigJSON
result.AbsPath = source.KeyPath.Text
tracker := logger.MakeLineColumnTracker(&source)
// Parse "extends"
if extends != nil {
if valueJSON, _, ok := getProperty(json, "extends"); ok {
if value, ok := getString(valueJSON); ok {
if base := extends(value, source.RangeOfString(valueJSON.Loc)); base != nil {
result.applyExtendedConfig(*base)
}
} else if array, ok := valueJSON.Data.(*js_ast.EArray); ok {
for _, item := range array.Items {
if str, ok := getString(item); ok {
if base := extends(str, source.RangeOfString(item.Loc)); base != nil {
result.applyExtendedConfig(*base)
}
}
}
}
}
}
// Parse "compilerOptions"
if compilerOptionsJSON, _, ok := getProperty(json, "compilerOptions"); ok {
// Parse "baseUrl"
if valueJSON, _, ok := getProperty(compilerOptionsJSON, "baseUrl"); ok {
if value, ok := getString(valueJSON); ok {
value = getSubstitutedPathWithConfigDirTemplate(fs, value, configDir)
if !fs.IsAbs(value) {
value = fs.Join(fileDir, value)
}
result.BaseURL = &value
}
}
// Parse "jsx"
if valueJSON, _, ok := getProperty(compilerOptionsJSON, "jsx"); ok {
if value, ok := getString(valueJSON); ok {
switch strings.ToLower(value) {
case "preserve":
result.JSXSettings.JSX = config.TSJSXPreserve
case "react-native":
result.JSXSettings.JSX = config.TSJSXReactNative
case "react":
result.JSXSettings.JSX = config.TSJSXReact
case "react-jsx":
result.JSXSettings.JSX = config.TSJSXReactJSX
case "react-jsxdev":
result.JSXSettings.JSX = config.TSJSXReactJSXDev
}
}
}
// Parse "jsxFactory"
if valueJSON, _, ok := getProperty(compilerOptionsJSON, "jsxFactory"); ok {
if value, ok := getString(valueJSON); ok {
result.JSXSettings.JSXFactory = parseMemberExpressionForJSX(log, &source, &tracker, valueJSON.Loc, value)
}
}
// Parse "jsxFragmentFactory"
if valueJSON, _, ok := getProperty(compilerOptionsJSON, "jsxFragmentFactory"); ok {
if value, ok := getString(valueJSON); ok {
result.JSXSettings.JSXFragmentFactory = parseMemberExpressionForJSX(log, &source, &tracker, valueJSON.Loc, value)
}
}
// Parse "jsxImportSource"
if valueJSON, _, ok := getProperty(compilerOptionsJSON, "jsxImportSource"); ok {
if value, ok := getString(valueJSON); ok {
result.JSXSettings.JSXImportSource = &value
}
}
// Parse "experimentalDecorators"
if valueJSON, _, ok := getProperty(compilerOptionsJSON, "experimentalDecorators"); ok {
if value, ok := getBool(valueJSON); ok {
if value {
result.Settings.ExperimentalDecorators = config.True
} else {
result.Settings.ExperimentalDecorators = config.False
}
}
}
// Parse "useDefineForClassFields"
if valueJSON, _, ok := getProperty(compilerOptionsJSON, "useDefineForClassFields"); ok {
if value, ok := getBool(valueJSON); ok {
if value {
result.Settings.UseDefineForClassFields = config.True
} else {
result.Settings.UseDefineForClassFields = config.False
}
}
}
// Parse "target"
if valueJSON, keyLoc, ok := getProperty(compilerOptionsJSON, "target"); ok {
if value, ok := getString(valueJSON); ok {
lowerValue := strings.ToLower(value)
ok := true
// See https://www.typescriptlang.org/tsconfig#target
switch lowerValue {
case "es3", "es5", "es6", "es2015", "es2016", "es2017", "es2018", "es2019", "es2020", "es2021":
result.Settings.Target = config.TSTargetBelowES2022
case "es2022", "es2023", "es2024", "esnext":
result.Settings.Target = config.TSTargetAtOrAboveES2022
default:
ok = false
if !helpers.IsInsideNodeModules(source.KeyPath.Text) {
log.AddID(logger.MsgID_TSConfigJSON_InvalidTarget, logger.Warning, &tracker, source.RangeOfString(valueJSON.Loc),
fmt.Sprintf("Unrecognized target environment %q", value))
}
}
if ok {
result.tsTargetKey = tsTargetKey{
Source: source,
Range: source.RangeOfString(keyLoc),
LowerValue: lowerValue,
}
}
}
}
// Parse "strict"
if valueJSON, keyLoc, ok := getProperty(compilerOptionsJSON, "strict"); ok {
if value, ok := getBool(valueJSON); ok {
valueRange := js_lexer.RangeOfIdentifier(source, valueJSON.Loc)
result.TSStrict = &config.TSAlwaysStrict{
Name: "strict",
Value: value,
Source: source,
Range: logger.Range{Loc: keyLoc, Len: valueRange.End() - keyLoc.Start},
}
}
}
// Parse "alwaysStrict"
if valueJSON, keyLoc, ok := getProperty(compilerOptionsJSON, "alwaysStrict"); ok {
if value, ok := getBool(valueJSON); ok {
valueRange := js_lexer.RangeOfIdentifier(source, valueJSON.Loc)
result.TSAlwaysStrict = &config.TSAlwaysStrict{
Name: "alwaysStrict",
Value: value,
Source: source,
Range: logger.Range{Loc: keyLoc, Len: valueRange.End() - keyLoc.Start},
}
}
}
// Parse "importsNotUsedAsValues"
if valueJSON, _, ok := getProperty(compilerOptionsJSON, "importsNotUsedAsValues"); ok {
if value, ok := getString(valueJSON); ok {
switch value {
case "remove":
result.Settings.ImportsNotUsedAsValues = config.TSImportsNotUsedAsValues_Remove
case "preserve":
result.Settings.ImportsNotUsedAsValues = config.TSImportsNotUsedAsValues_Preserve
case "error":
result.Settings.ImportsNotUsedAsValues = config.TSImportsNotUsedAsValues_Error
default:
log.AddID(logger.MsgID_TSConfigJSON_InvalidImportsNotUsedAsValues, logger.Warning, &tracker, source.RangeOfString(valueJSON.Loc),
fmt.Sprintf("Invalid value %q for \"importsNotUsedAsValues\"", value))
}
}
}
// Parse "preserveValueImports"
if valueJSON, _, ok := getProperty(compilerOptionsJSON, "preserveValueImports"); ok {
if value, ok := getBool(valueJSON); ok {
if value {
result.Settings.PreserveValueImports = config.True
} else {
result.Settings.PreserveValueImports = config.False
}
}
}
// Parse "verbatimModuleSyntax"
if valueJSON, _, ok := getProperty(compilerOptionsJSON, "verbatimModuleSyntax"); ok {
if value, ok := getBool(valueJSON); ok {
if value {
result.Settings.VerbatimModuleSyntax = config.True
} else {
result.Settings.VerbatimModuleSyntax = config.False
}
}
}
// Parse "paths"
if valueJSON, _, ok := getProperty(compilerOptionsJSON, "paths"); ok {
if paths, ok := valueJSON.Data.(*js_ast.EObject); ok {
result.BaseURLForPaths = fileDir
result.Paths = &TSConfigPaths{Source: source, Map: make(map[string][]TSConfigPath)}
for _, prop := range paths.Properties {
if key, ok := getString(prop.Key); ok {
if !isValidTSConfigPathPattern(key, log, &source, &tracker, prop.Key.Loc) {
continue
}
// The "paths" field is an object which maps a pattern to an
// array of remapping patterns to try, in priority order. See
// the documentation for examples of how this is used:
// https://www.typescriptlang.org/docs/handbook/module-resolution.html#path-mapping.
//
// One particular example:
//
// {
// "compilerOptions": {
// "baseUrl": "projectRoot",
// "paths": {
// "*": [
// "*",
// "generated/*"
// ]
// }
// }
// }
//
// Matching "folder1/file2" should first check "projectRoot/folder1/file2"
// and then, if that didn't work, also check "projectRoot/generated/folder1/file2".
if array, ok := prop.ValueOrNil.Data.(*js_ast.EArray); ok {
for _, item := range array.Items {
if str, ok := getString(item); ok {
if isValidTSConfigPathPattern(str, log, &source, &tracker, item.Loc) {
str = getSubstitutedPathWithConfigDirTemplate(fs, str, configDir)
result.Paths.Map[key] = append(result.Paths.Map[key], TSConfigPath{Text: str, Loc: item.Loc})
}
}
}
} else {
log.AddID(logger.MsgID_TSConfigJSON_InvalidPaths, logger.Warning, &tracker, source.RangeOfString(prop.ValueOrNil.Loc), fmt.Sprintf(
"Substitutions for pattern %q should be an array", key))
}
}
}
}
}
}
// Warn about compiler options not wrapped in "compilerOptions".
// For example: https://github.com/evanw/esbuild/issues/3301
if obj, ok := json.Data.(*js_ast.EObject); ok {
loop:
for _, prop := range obj.Properties {
if key, ok := prop.Key.Data.(*js_ast.EString); ok && key.Value != nil {
key := helpers.UTF16ToString(key.Value)
switch key {
case "alwaysStrict",
"baseUrl",
"experimentalDecorators",
"importsNotUsedAsValues",
"jsx",
"jsxFactory",
"jsxFragmentFactory",
"jsxImportSource",
"paths",
"preserveValueImports",
"strict",
"target",
"useDefineForClassFields",
"verbatimModuleSyntax":
log.AddIDWithNotes(logger.MsgID_TSConfigJSON_InvalidTopLevelOption, logger.Warning, &tracker, source.RangeOfString(prop.Key.Loc),
fmt.Sprintf("Expected the %q option to be nested inside a \"compilerOptions\" object", key),
[]logger.MsgData{})
break loop
}
}
}
}
return &result
}
// See: https://github.com/microsoft/TypeScript/pull/58042
func getSubstitutedPathWithConfigDirTemplate(fs fs.FS, value string, basePath string) string {
if strings.HasPrefix(value, "${configDir}") {
return fs.Join(basePath, "./"+value[12:])
}
return value
}
func parseMemberExpressionForJSX(log logger.Log, source *logger.Source, tracker *logger.LineColumnTracker, loc logger.Loc, text string) []string {
if text == "" {
return nil
}
parts := strings.Split(text, ".")
for _, part := range parts {
if !js_ast.IsIdentifier(part) {
warnRange := source.RangeOfString(loc)
log.AddID(logger.MsgID_TSConfigJSON_InvalidJSX, logger.Warning, tracker, warnRange, fmt.Sprintf("Invalid JSX member expression: %q", text))
return nil
}
}
return parts
}
func isValidTSConfigPathPattern(text string, log logger.Log, source *logger.Source, tracker *logger.LineColumnTracker, loc logger.Loc) bool {
foundAsterisk := false
for i := 0; i < len(text); i++ {
if text[i] == '*' {
if foundAsterisk {
r := source.RangeOfString(loc)
log.AddID(logger.MsgID_TSConfigJSON_InvalidPaths, logger.Warning, tracker, r, fmt.Sprintf(
"Invalid pattern %q, must have at most one \"*\" character", text))
return false
}
foundAsterisk = true
}
}
return true
}
func isSlash(c byte) bool {
return c == '/' || c == '\\'
}
func isValidTSConfigPathNoBaseURLPattern(text string, log logger.Log, source *logger.Source, tracker **logger.LineColumnTracker, loc logger.Loc) bool {
var c0 byte
var c1 byte
var c2 byte
n := len(text)
if n > 0 {
c0 = text[0]
if n > 1 {
c1 = text[1]
if n > 2 {
c2 = text[2]
}
}
}
// Relative "." or ".."
if c0 == '.' && (n == 1 || (n == 2 && c1 == '.')) {
return true
}
// Relative "./" or "../" or ".\\" or "..\\"
if c0 == '.' && (isSlash(c1) || (c1 == '.' && isSlash(c2))) {
return true
}
// Absolute POSIX "/" or UNC "\\"
if isSlash(c0) {
return true
}
// Absolute DOS "c:/" or "c:\\"
if ((c0 >= 'a' && c0 <= 'z') || (c0 >= 'A' && c0 <= 'Z')) && c1 == ':' && isSlash(c2) {
return true
}
r := source.RangeOfString(loc)
if *tracker == nil {
t := logger.MakeLineColumnTracker(source)
*tracker = &t
}
log.AddID(logger.MsgID_TSConfigJSON_InvalidPaths, logger.Warning, *tracker, r, fmt.Sprintf(
"Non-relative path %q is not allowed when \"baseUrl\" is not set (did you forget a leading \"./\"?)", text))
return false
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/resolver/resolver.go | internal/resolver/resolver.go | package resolver
import (
"errors"
"fmt"
"path"
"regexp"
"sort"
"strings"
"sync"
"syscall"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/cache"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/fs"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/logger"
)
var defaultMainFields = map[config.Platform][]string{
// Note that this means if a package specifies "main", "module", and
// "browser" then "browser" will win out over "module". This is the
// same behavior as webpack: https://github.com/webpack/webpack/issues/4674.
//
// This is deliberate because the presence of the "browser" field is a
// good signal that the "module" field may have non-browser stuff in it,
// which will crash or fail to be bundled when targeting the browser.
config.PlatformBrowser: {"browser", "module", "main"},
// Note that this means if a package specifies "module" and "main", the ES6
// module will not be selected. This means tree shaking will not work when
// targeting node environments.
//
// This is unfortunately necessary for compatibility. Some packages
// incorrectly treat the "module" field as "code for the browser". It
// actually means "code for ES6 environments" which includes both node
// and the browser.
//
// For example, the package "@firebase/app" prints a warning on startup about
// the bundler incorrectly using code meant for the browser if the bundler
// selects the "module" field instead of the "main" field.
//
// If you want to enable tree shaking when targeting node, you will have to
// configure the main fields to be "module" and then "main". Keep in mind
// that some packages may break if you do this.
config.PlatformNode: {"main", "module"},
// The neutral platform is for people that don't want esbuild to try to
// pick good defaults for their platform. In that case, the list of main
// fields is empty by default. You must explicitly configure it yourself.
config.PlatformNeutral: {},
}
// These are the main fields to use when the "main fields" setting is configured
// to something unusual, such as something without the "main" field.
var mainFieldsForFailure = []string{"main", "module"}
// Path resolution is a mess. One tricky issue is the "module" override for the
// "main" field in "package.json" files. Bundlers generally prefer "module" over
// "main" but that breaks packages that export a function in "main" for use with
// "require()", since resolving to "module" means an object will be returned. We
// attempt to handle this automatically by having import statements resolve to
// "module" but switch that out later for "main" if "require()" is used too.
type PathPair struct {
// Either secondary will be empty, or primary will be "module" and secondary
// will be "main"
Primary logger.Path
Secondary logger.Path
IsExternal bool
}
func (pp *PathPair) iter() []*logger.Path {
result := []*logger.Path{&pp.Primary, &pp.Secondary}
if !pp.HasSecondary() {
result = result[:1]
}
return result
}
func (pp *PathPair) HasSecondary() bool {
return pp.Secondary.Text != ""
}
type SideEffectsData struct {
Source *logger.Source
// If non-empty, this false value came from a plugin
PluginName string
Range logger.Range
// If true, "sideEffects" was an array. If false, "sideEffects" was false.
IsSideEffectsArrayInJSON bool
}
type ResolveResult struct {
PathPair PathPair
// If this was resolved by a plugin, the plugin gets to store its data here
PluginData interface{}
DifferentCase *fs.DifferentCase
// If present, any ES6 imports to this file can be considered to have no side
// effects. This means they should be removed if unused.
PrimarySideEffectsData *SideEffectsData
// These are from "tsconfig.json"
TSConfigJSX config.TSConfigJSX
TSConfig *config.TSConfig
TSAlwaysStrict *config.TSAlwaysStrict
// This is the "type" field from "package.json"
ModuleTypeData js_ast.ModuleTypeData
}
type suggestionRange uint8
const (
suggestionRangeFull suggestionRange = iota
suggestionRangeEnd
)
type DebugMeta struct {
notes []logger.MsgData
suggestionText string
suggestionMessage string
suggestionRange suggestionRange
ModifiedImportPath string
}
func (dm DebugMeta) LogErrorMsg(log logger.Log, source *logger.Source, r logger.Range, text string, suggestion string, notes []logger.MsgData) {
tracker := logger.MakeLineColumnTracker(source)
if source != nil && dm.suggestionMessage != "" {
suggestionRange := r
if dm.suggestionRange == suggestionRangeEnd {
suggestionRange = logger.Range{Loc: logger.Loc{Start: r.End() - 1}}
}
data := tracker.MsgData(suggestionRange, dm.suggestionMessage)
data.Location.Suggestion = dm.suggestionText
dm.notes = append(dm.notes, data)
}
msg := logger.Msg{
Kind: logger.Error,
Data: tracker.MsgData(r, text),
Notes: append(dm.notes, notes...),
}
if msg.Data.Location != nil && suggestion != "" {
msg.Data.Location.Suggestion = suggestion
}
log.AddMsg(msg)
}
type Resolver struct {
fs fs.FS
log logger.Log
caches *cache.CacheSet
tsConfigOverride *TSConfigJSON
// These are sets that represent various conditions for the "exports" field
// in package.json.
esmConditionsDefault map[string]bool
esmConditionsImport map[string]bool
esmConditionsRequire map[string]bool
// A special filtered import order for CSS "@import" imports.
//
// The "resolve extensions" setting determines the order of implicit
// extensions to try when resolving imports with the extension omitted.
// Sometimes people create a JavaScript/TypeScript file and a CSS file with
// the same name when they create a component. At a high level, users expect
// implicit extensions to resolve to the JS file when being imported from JS
// and to resolve to the CSS file when being imported from CSS.
//
// Different bundlers handle this in different ways. Parcel handles this by
// having the resolver prefer the same extension as the importing file in
// front of the configured "resolve extensions" order. Webpack's "css-loader"
// plugin just explicitly configures a special "resolve extensions" order
// consisting of only ".css" for CSS files.
//
// It's unclear what behavior is best here. What we currently do is to create
// a special filtered version of the configured "resolve extensions" order
// for CSS files that filters out any extension that has been explicitly
// configured with a non-CSS loader. This still gives users control over the
// order but avoids the scenario where we match an import in a CSS file to a
// JavaScript-related file. It's probably not perfect with plugins in the
// picture but it's better than some alternatives and probably pretty good.
cssExtensionOrder []string
// A special sorted import order for imports inside packages.
//
// The "resolve extensions" setting determines the order of implicit
// extensions to try when resolving imports with the extension omitted.
// Sometimes people author a package using TypeScript and publish both the
// compiled JavaScript and the original TypeScript. The compiled JavaScript
// depends on the "tsconfig.json" settings that were passed to "tsc" when
// it was compiled, and we don't know what they are (they may even be
// unknowable if the "tsconfig.json" file wasn't published).
//
// To work around this, we sort TypeScript file extensions after JavaScript
// file extensions (but only within packages) so that esbuild doesn't load
// the original source code in these scenarios. Instead we should load the
// compiled code, which is what will be loaded by node at run-time.
nodeModulesExtensionOrder []string
// This cache maps a directory path to information about that directory and
// all parent directories
dirCache map[string]*dirInfo
pnpManifestWasChecked bool
pnpManifest *pnpData
options config.Options
// This mutex serves two purposes. First of all, it guards access to "dirCache"
// which is potentially mutated during path resolution. But this mutex is also
// necessary for performance. The "React admin" benchmark mysteriously runs
// twice as fast when this mutex is locked around the whole resolve operation
// instead of around individual accesses to "dirCache". For some reason,
// reducing parallelism in the resolver helps the rest of the bundler go
// faster. I'm not sure why this is but please don't change this unless you
// do a lot of testing with various benchmarks and there aren't any regressions.
mutex sync.Mutex
}
type resolverQuery struct {
*Resolver
debugMeta *DebugMeta
debugLogs *debugLogs
kind ast.ImportKind
}
func NewResolver(call config.APICall, fs fs.FS, log logger.Log, caches *cache.CacheSet, options *config.Options) *Resolver {
// Filter out non-CSS extensions for CSS "@import" imports
cssExtensionOrder := make([]string, 0, len(options.ExtensionOrder))
for _, ext := range options.ExtensionOrder {
if loader := config.LoaderFromFileExtension(options.ExtensionToLoader, ext); loader == config.LoaderNone || loader.IsCSS() {
cssExtensionOrder = append(cssExtensionOrder, ext)
}
}
// Sort all TypeScript file extensions after all JavaScript file extensions
// for imports of files inside of "node_modules" directories. But insert
// the TypeScript file extensions right after the last JavaScript file
// extension instead of at the end so that they might come before the
// first CSS file extension, which is important to people that publish
// TypeScript and CSS code to npm with the same file names for both.
nodeModulesExtensionOrder := make([]string, 0, len(options.ExtensionOrder))
split := 0
for i, ext := range options.ExtensionOrder {
if loader := config.LoaderFromFileExtension(options.ExtensionToLoader, ext); loader == config.LoaderJS || loader == config.LoaderJSX {
split = i + 1 // Split after the last JavaScript extension
}
}
if split != 0 { // Only do this if there are any JavaScript extensions
for _, ext := range options.ExtensionOrder[:split] { // Non-TypeScript extensions before the split
if loader := config.LoaderFromFileExtension(options.ExtensionToLoader, ext); !loader.IsTypeScript() {
nodeModulesExtensionOrder = append(nodeModulesExtensionOrder, ext)
}
}
for _, ext := range options.ExtensionOrder { // All TypeScript extensions
if loader := config.LoaderFromFileExtension(options.ExtensionToLoader, ext); loader.IsTypeScript() {
nodeModulesExtensionOrder = append(nodeModulesExtensionOrder, ext)
}
}
for _, ext := range options.ExtensionOrder[split:] { // Non-TypeScript extensions after the split
if loader := config.LoaderFromFileExtension(options.ExtensionToLoader, ext); !loader.IsTypeScript() {
nodeModulesExtensionOrder = append(nodeModulesExtensionOrder, ext)
}
}
}
// Generate the condition sets for interpreting the "exports" field
esmConditionsDefault := map[string]bool{"default": true}
esmConditionsImport := map[string]bool{"import": true}
esmConditionsRequire := map[string]bool{"require": true}
for _, condition := range options.Conditions {
esmConditionsDefault[condition] = true
}
switch options.Platform {
case config.PlatformBrowser:
esmConditionsDefault["browser"] = true
case config.PlatformNode:
esmConditionsDefault["node"] = true
}
for key := range esmConditionsDefault {
esmConditionsImport[key] = true
esmConditionsRequire[key] = true
}
fs.Cwd()
res := &Resolver{
fs: fs,
log: log,
options: *options,
caches: caches,
dirCache: make(map[string]*dirInfo),
cssExtensionOrder: cssExtensionOrder,
nodeModulesExtensionOrder: nodeModulesExtensionOrder,
esmConditionsDefault: esmConditionsDefault,
esmConditionsImport: esmConditionsImport,
esmConditionsRequire: esmConditionsRequire,
}
// Handle the "tsconfig.json" override when the resolver is created. This
// isn't done when we validate the build options both because the code for
// "tsconfig.json" handling is already in the resolver, and because we want
// watch mode to pick up changes to "tsconfig.json" and rebuild.
var debugMeta DebugMeta
if options.TSConfigPath != "" || options.TSConfigRaw != "" {
r := resolverQuery{
Resolver: res,
debugMeta: &debugMeta,
}
var visited map[string]bool
var err error
if call == config.BuildCall {
visited = make(map[string]bool)
}
if options.TSConfigPath != "" {
if r.log.Level <= logger.LevelDebug {
r.debugLogs = &debugLogs{what: fmt.Sprintf("Resolving tsconfig file %q", options.TSConfigPath)}
}
res.tsConfigOverride, err = r.parseTSConfig(options.TSConfigPath, visited, fs.Dir(options.TSConfigPath))
} else {
source := logger.Source{
KeyPath: logger.Path{Text: fs.Join(fs.Cwd(), "<tsconfig.json>"), Namespace: "file"},
PrettyPaths: logger.PrettyPaths{Abs: "<tsconfig.json>", Rel: "<tsconfig.json>"},
Contents: options.TSConfigRaw,
}
res.tsConfigOverride, err = r.parseTSConfigFromSource(source, visited, fs.Cwd())
}
if err != nil {
if err == syscall.ENOENT {
prettyPaths := MakePrettyPaths(r.fs, logger.Path{Text: options.TSConfigPath, Namespace: "file"})
r.log.AddError(nil, logger.Range{}, fmt.Sprintf("Cannot find tsconfig file %q",
prettyPaths.Select(options.LogPathStyle)))
} else if err != errParseErrorAlreadyLogged {
prettyPaths := MakePrettyPaths(r.fs, logger.Path{Text: options.TSConfigPath, Namespace: "file"})
r.log.AddError(nil, logger.Range{}, fmt.Sprintf("Cannot read file %q: %s",
prettyPaths.Select(options.LogPathStyle), err.Error()))
}
} else {
r.flushDebugLogs(flushDueToSuccess)
}
}
// Mutate the provided options by settings from "tsconfig.json" if present
if res.tsConfigOverride != nil {
options.TS.Config = res.tsConfigOverride.Settings
res.tsConfigOverride.JSXSettings.ApplyTo(&options.JSX)
options.TSAlwaysStrict = res.tsConfigOverride.TSAlwaysStrictOrStrict()
}
return res
}
func (res *Resolver) Resolve(sourceDir string, importPath string, kind ast.ImportKind) (*ResolveResult, DebugMeta) {
var debugMeta DebugMeta
r := resolverQuery{
Resolver: res,
debugMeta: &debugMeta,
kind: kind,
}
if r.log.Level <= logger.LevelDebug {
r.debugLogs = &debugLogs{what: fmt.Sprintf(
"Resolving import %q in directory %q of type %q",
importPath, sourceDir, kind.StringForMetafile())}
}
// Apply package alias substitutions first
if r.options.PackageAliases != nil && IsPackagePath(importPath) {
if r.debugLogs != nil {
r.debugLogs.addNote("Checking for package alias matches")
}
longestKey := ""
longestValue := ""
for key, value := range r.options.PackageAliases {
if len(key) > len(longestKey) && strings.HasPrefix(importPath, key) && (len(importPath) == len(key) || importPath[len(key)] == '/') {
longestKey = key
longestValue = value
}
}
if longestKey != "" {
debugMeta.ModifiedImportPath = longestValue
if tail := importPath[len(longestKey):]; tail != "/" {
// Don't include the trailing characters if they are equal to a
// single slash. This comes up because you can abuse this quirk of
// node's path resolution to force node to load the package from the
// file system instead of as a built-in module. For example, "util"
// is node's built-in module while "util/" is one on the file system.
// Leaving the trailing slash in place causes problems for people:
// https://github.com/evanw/esbuild/issues/2730. It should be ok to
// always strip the trailing slash even when using the alias feature
// to swap one package for another (except when you swap a reference
// to one built-in node module with another but really why would you
// do that).
debugMeta.ModifiedImportPath += tail
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Matched with alias from %q to %q", longestKey, longestValue))
r.debugLogs.addNote(fmt.Sprintf(" Modified import path from %q to %q", importPath, debugMeta.ModifiedImportPath))
}
importPath = debugMeta.ModifiedImportPath
// Resolve the package using the current path instead of the original
// path. This is trying to resolve the substitute in the top-level
// package instead of the nested package, which lets the top-level
// package control the version of the substitution. It's also critical
// when using Yarn PnP because Yarn PnP doesn't allow nested packages
// to "reach outside" of their normal dependency lists.
sourceDir = r.fs.Cwd()
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Changed resolve directory to %q", sourceDir))
}
} else if r.debugLogs != nil {
r.debugLogs.addNote(" Failed to find any package alias matches")
}
}
// Certain types of URLs default to being external for convenience
if isExplicitlyExternal := r.isExternal(r.options.ExternalSettings.PreResolve, importPath, kind); isExplicitlyExternal ||
// "fill: url(#filter);"
(kind == ast.ImportURL && strings.HasPrefix(importPath, "#")) ||
// "background: url(http://example.com/images/image.png);"
strings.HasPrefix(importPath, "http://") ||
// "background: url(https://example.com/images/image.png);"
strings.HasPrefix(importPath, "https://") ||
// "background: url(//example.com/images/image.png);"
strings.HasPrefix(importPath, "//") {
if r.debugLogs != nil {
if isExplicitlyExternal {
r.debugLogs.addNote(fmt.Sprintf("The path %q was marked as external by the user", importPath))
} else {
r.debugLogs.addNote("Marking this path as implicitly external")
}
}
r.flushDebugLogs(flushDueToSuccess)
return &ResolveResult{
PathPair: PathPair{Primary: logger.Path{Text: importPath}, IsExternal: true},
}, debugMeta
}
if pathPair, ok, sideEffects := r.checkForBuiltInNodeModules(importPath); ok {
r.flushDebugLogs(flushDueToSuccess)
return &ResolveResult{
PathPair: pathPair,
PrimarySideEffectsData: sideEffects,
}, debugMeta
}
if parsed, ok := ParseDataURL(importPath); ok {
// "import 'data:text/javascript,console.log(123)';"
// "@import 'data:text/css,body{background:white}';"
if parsed.DecodeMIMEType() != MIMETypeUnsupported {
if r.debugLogs != nil {
r.debugLogs.addNote("Putting this path in the \"dataurl\" namespace")
}
r.flushDebugLogs(flushDueToSuccess)
return &ResolveResult{
PathPair: PathPair{Primary: logger.Path{Text: importPath, Namespace: "dataurl"}},
}, debugMeta
}
// "background: url(data:image/png;base64,iVBORw0KGgo=);"
if r.debugLogs != nil {
r.debugLogs.addNote("Marking this data URL as external")
}
r.flushDebugLogs(flushDueToSuccess)
return &ResolveResult{
PathPair: PathPair{Primary: logger.Path{Text: importPath}, IsExternal: true},
}, debugMeta
}
// Fail now if there is no directory to resolve in. This can happen for
// virtual modules (e.g. stdin) if a resolve directory is not specified.
if sourceDir == "" {
if r.debugLogs != nil {
r.debugLogs.addNote("Cannot resolve this path without a directory")
}
r.flushDebugLogs(flushDueToFailure)
return nil, debugMeta
}
// Glob imports only work in a multi-path context
if strings.ContainsRune(importPath, '*') {
if r.debugLogs != nil {
r.debugLogs.addNote("Cannot resolve a path containing a wildcard character in a single-path context")
}
r.flushDebugLogs(flushDueToFailure)
return nil, debugMeta
}
r.mutex.Lock()
defer r.mutex.Unlock()
// Check for the Yarn PnP manifest if it hasn't already been checked for
if !r.pnpManifestWasChecked {
r.pnpManifestWasChecked = true
// Use the current working directory to find the Yarn PnP manifest. We
// can't necessarily use the entry point locations because the entry
// point locations aren't necessarily file paths. For example, they could
// be HTTP URLs that will be handled by a plugin.
for dirInfo := r.dirInfoCached(r.fs.Cwd()); dirInfo != nil; dirInfo = dirInfo.parent {
if absPath := dirInfo.pnpManifestAbsPath; absPath != "" {
if strings.HasSuffix(absPath, ".json") {
if json, source := r.extractYarnPnPDataFromJSON(absPath, pnpReportErrorsAboutMissingFiles); json.Data != nil {
r.pnpManifest = compileYarnPnPData(absPath, r.fs.Dir(absPath), json, source)
}
} else {
if json, source := r.tryToExtractYarnPnPDataFromJS(absPath, pnpReportErrorsAboutMissingFiles); json.Data != nil {
r.pnpManifest = compileYarnPnPData(absPath, r.fs.Dir(absPath), json, source)
}
}
if r.debugLogs != nil && r.pnpManifest != nil && r.pnpManifest.invalidIgnorePatternData != "" {
r.debugLogs.addNote(" Invalid Go regular expression for \"ignorePatternData\": " + r.pnpManifest.invalidIgnorePatternData)
}
break
}
}
}
sourceDirInfo := r.dirInfoCached(sourceDir)
if sourceDirInfo == nil {
// Bail if the directory is missing for some reason
return nil, debugMeta
}
result := r.resolveWithoutSymlinks(sourceDir, sourceDirInfo, importPath)
if result == nil {
// If resolution failed, try again with the URL query and/or hash removed
suffix := strings.IndexAny(importPath, "?#")
if suffix < 1 {
r.flushDebugLogs(flushDueToFailure)
return nil, debugMeta
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("Retrying resolution after removing the suffix %q", importPath[suffix:]))
}
if result2 := r.resolveWithoutSymlinks(sourceDir, sourceDirInfo, importPath[:suffix]); result2 == nil {
r.flushDebugLogs(flushDueToFailure)
return nil, debugMeta
} else {
result = result2
result.PathPair.Primary.IgnoredSuffix = importPath[suffix:]
if result.PathPair.HasSecondary() {
result.PathPair.Secondary.IgnoredSuffix = importPath[suffix:]
}
}
}
// If successful, resolve symlinks using the directory info cache
r.finalizeResolve(result)
r.flushDebugLogs(flushDueToSuccess)
return result, debugMeta
}
// This returns nil on failure and non-nil on success. Note that this may
// return an empty array to indicate a successful search that returned zero
// results.
func (res *Resolver) ResolveGlob(sourceDir string, importPathPattern []helpers.GlobPart, kind ast.ImportKind, prettyPattern string) (map[string]ResolveResult, *logger.Msg) {
var debugMeta DebugMeta
r := resolverQuery{
Resolver: res,
debugMeta: &debugMeta,
kind: kind,
}
if r.log.Level <= logger.LevelDebug {
r.debugLogs = &debugLogs{what: fmt.Sprintf(
"Resolving glob import %s in directory %q of type %q",
prettyPattern, sourceDir, kind.StringForMetafile())}
}
if len(importPathPattern) == 0 {
if r.debugLogs != nil {
r.debugLogs.addNote("Ignoring empty glob pattern")
}
r.flushDebugLogs(flushDueToFailure)
return nil, nil
}
firstPrefix := importPathPattern[0].Prefix
// Glob patterns only work for relative URLs
if !strings.HasPrefix(firstPrefix, "./") && !strings.HasPrefix(firstPrefix, "../") &&
!strings.HasPrefix(firstPrefix, ".\\") && !strings.HasPrefix(firstPrefix, "..\\") {
if kind == ast.ImportEntryPoint {
// Be permissive about forgetting "./" for entry points since it's common
// to omit "./" on the command line. But don't accidentally treat absolute
// paths as relative (even on Windows).
if !r.fs.IsAbs(firstPrefix) {
firstPrefix = "./" + firstPrefix
}
} else {
// Don't allow omitting "./" for other imports since node doesn't let you do this either
if r.debugLogs != nil {
r.debugLogs.addNote("Ignoring glob import that doesn't start with \"./\" or \"../\"")
}
r.flushDebugLogs(flushDueToFailure)
return nil, nil
}
}
// Handle leading directories in the pattern (including "../")
dirPrefix := 0
for {
slash := strings.IndexAny(firstPrefix[dirPrefix:], "/\\")
if slash == -1 {
break
}
if star := strings.IndexByte(firstPrefix[dirPrefix:], '*'); star != -1 && slash > star {
break
}
dirPrefix += slash + 1
}
// If the pattern is an absolute path, then just replace source directory.
// Otherwise join the source directory with the prefix from the pattern.
if suffix := firstPrefix[:dirPrefix]; r.fs.IsAbs(suffix) {
sourceDir = suffix
} else {
sourceDir = r.fs.Join(sourceDir, suffix)
}
r.mutex.Lock()
defer r.mutex.Unlock()
// Look up the directory to start from
sourceDirInfo := r.dirInfoCached(sourceDir)
if sourceDirInfo == nil {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("Failed to find the directory %q", sourceDir))
}
r.flushDebugLogs(flushDueToFailure)
return nil, nil
}
// Turn the glob pattern into a regular expression
canMatchOnSlash := false
wasGlobStar := false
sb := strings.Builder{}
sb.WriteByte('^')
for i, part := range importPathPattern {
prefix := part.Prefix
if i == 0 {
prefix = firstPrefix
}
if wasGlobStar && len(prefix) > 0 && (prefix[0] == '/' || prefix[0] == '\\') {
prefix = prefix[1:] // Move over the "/" after a globstar
}
sb.WriteString(regexp.QuoteMeta(prefix))
switch part.Wildcard {
case helpers.GlobAllIncludingSlash:
// It's a globstar, so match zero or more path segments
sb.WriteString("(?:[^/]*(?:/|$))*")
canMatchOnSlash = true
wasGlobStar = true
case helpers.GlobAllExceptSlash:
// It's not a globstar, so only match one path segment
sb.WriteString("[^/]*")
wasGlobStar = false
}
}
sb.WriteByte('$')
re := regexp.MustCompile(sb.String())
// Initialize "results" to a non-nil value to indicate that the glob is valid
results := make(map[string]ResolveResult)
var visit func(dirInfo *dirInfo, dir string)
visit = func(dirInfo *dirInfo, dir string) {
for _, key := range dirInfo.entries.SortedKeys() {
entry, _ := dirInfo.entries.Get(key)
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("Considering entry %q", r.fs.Join(dirInfo.absPath, key)))
r.debugLogs.increaseIndent()
}
switch entry.Kind(r.fs) {
case fs.DirEntry:
// To avoid infinite loops, don't follow any symlinks
if canMatchOnSlash && entry.Symlink(r.fs) == "" {
if childDirInfo := r.dirInfoCached(r.fs.Join(dirInfo.absPath, key)); childDirInfo != nil {
visit(childDirInfo, fmt.Sprintf("%s%s/", dir, key))
}
}
case fs.FileEntry:
if relPath := dir + key; re.MatchString(relPath) {
var result ResolveResult
if r.isExternal(r.options.ExternalSettings.PreResolve, relPath, kind) {
result.PathPair = PathPair{Primary: logger.Path{Text: relPath}, IsExternal: true}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("The path %q was marked as external by the user", result.PathPair.Primary.Text))
}
} else {
absPath := r.fs.Join(dirInfo.absPath, key)
result.PathPair = PathPair{Primary: logger.Path{Text: absPath, Namespace: "file"}}
}
r.finalizeResolve(&result)
results[relPath] = result
}
}
if r.debugLogs != nil {
r.debugLogs.decreaseIndent()
}
}
}
visit(sourceDirInfo, firstPrefix[:dirPrefix])
var warning *logger.Msg
if len(results) == 0 {
warning = &logger.Msg{
ID: logger.MsgID_Bundler_EmptyGlob,
Kind: logger.Warning,
Data: logger.MsgData{Text: fmt.Sprintf("The glob pattern %s did not match any files", prettyPattern)},
}
}
r.flushDebugLogs(flushDueToSuccess)
return results, warning
}
func (r resolverQuery) isExternal(matchers config.ExternalMatchers, path string, kind ast.ImportKind) bool {
if kind == ast.ImportEntryPoint {
// Never mark an entry point as external. This is not useful.
return false
}
if _, ok := matchers.Exact[path]; ok {
return true
}
for _, pattern := range matchers.Patterns {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("Checking %q against the external pattern %q", path, pattern.Prefix+"*"+pattern.Suffix))
}
if len(path) >= len(pattern.Prefix)+len(pattern.Suffix) &&
strings.HasPrefix(path, pattern.Prefix) &&
strings.HasSuffix(path, pattern.Suffix) {
return true
}
}
return false
}
// This tries to run "Resolve" on a package path as a relative path. If
// successful, the user just forgot a leading "./" in front of the path.
func (res *Resolver) ProbeResolvePackageAsRelative(sourceDir string, importPath string, kind ast.ImportKind) (*ResolveResult, DebugMeta) {
var debugMeta DebugMeta
r := resolverQuery{
Resolver: res,
debugMeta: &debugMeta,
kind: kind,
}
absPath := r.fs.Join(sourceDir, importPath)
r.mutex.Lock()
defer r.mutex.Unlock()
if pair, ok, diffCase := r.loadAsFileOrDirectory(absPath); ok {
result := &ResolveResult{PathPair: pair, DifferentCase: diffCase}
r.finalizeResolve(result)
r.flushDebugLogs(flushDueToSuccess)
return result, debugMeta
}
return nil, debugMeta
}
type debugLogs struct {
what string
indent string
notes []logger.MsgData
}
func (d *debugLogs) addNote(text string) {
if d.indent != "" {
text = d.indent + text
}
d.notes = append(d.notes, logger.MsgData{Text: text, DisableMaximumWidth: true})
}
func (d *debugLogs) increaseIndent() {
d.indent += " "
}
func (d *debugLogs) decreaseIndent() {
d.indent = d.indent[2:]
}
type flushMode uint8
const (
flushDueToFailure flushMode = iota
flushDueToSuccess
)
func (r resolverQuery) flushDebugLogs(mode flushMode) {
if r.debugLogs != nil {
if mode == flushDueToFailure {
r.log.AddIDWithNotes(logger.MsgID_None, logger.Debug, nil, logger.Range{}, r.debugLogs.what, r.debugLogs.notes)
} else if r.log.Level <= logger.LevelVerbose {
r.log.AddIDWithNotes(logger.MsgID_None, logger.Verbose, nil, logger.Range{}, r.debugLogs.what, r.debugLogs.notes)
}
}
}
func (r resolverQuery) finalizeResolve(result *ResolveResult) {
if !result.PathPair.IsExternal && r.isExternal(r.options.ExternalSettings.PostResolve, result.PathPair.Primary.Text, r.kind) {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("The path %q was marked as external by the user", result.PathPair.Primary.Text))
}
result.PathPair.IsExternal = true
} else {
for i, path := range result.PathPair.iter() {
if path.Namespace != "file" {
continue
}
dirInfo := r.dirInfoCached(r.fs.Dir(path.Text))
if dirInfo == nil {
continue
}
base := r.fs.Base(path.Text)
// If the path contains symlinks, rewrite the path to the real path
if !r.options.PreserveSymlinks {
if entry, _ := dirInfo.entries.Get(base); entry != nil {
symlink := entry.Symlink(r.fs)
if symlink != "" {
// This means the entry itself is a symlink
} else if dirInfo.absRealPath != "" {
// There is at least one parent directory with a symlink
symlink = r.fs.Join(dirInfo.absRealPath, base)
}
if symlink != "" {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("Resolved symlink %q to %q", path.Text, symlink))
}
path.Text = symlink
// Look up the directory over again if it was changed
dirInfo = r.dirInfoCached(r.fs.Dir(path.Text))
if dirInfo == nil {
continue
}
base = r.fs.Base(path.Text)
}
}
}
// Path attributes are only taken from the primary path
if i > 0 {
continue
}
// Path attributes are not taken from disabled files
if path.IsDisabled() {
continue
}
// Look up this file in the "sideEffects" map in the nearest enclosing
// directory with a "package.json" file.
//
// Only do this for the primary path. Some packages have the primary
// path marked as having side effects and the secondary path marked
// as not having side effects. This is likely a bug in the package
// definition but we don't want to consider the primary path as not
// having side effects just because the secondary path is marked as
// not having side effects.
if pkgJSON := dirInfo.enclosingPackageJSON; pkgJSON != nil {
if pkgJSON.sideEffectsMap != nil {
hasSideEffects := false
pathLookup := strings.ReplaceAll(path.Text, "\\", "/") // Avoid problems with Windows-style slashes
if pkgJSON.sideEffectsMap[pathLookup] {
// Fast path: map lookup
hasSideEffects = true
} else {
// Slow path: glob tests
for _, re := range pkgJSON.sideEffectsRegexps {
if re.MatchString(pathLookup) {
hasSideEffects = true
break
}
}
}
if !hasSideEffects {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("Marking this file as having no side effects due to %q",
pkgJSON.source.KeyPath.Text))
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/resolver/package_json.go | internal/resolver/package_json.go | package resolver
import (
"fmt"
"net/url"
"path"
"regexp"
"sort"
"strings"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_lexer"
"github.com/evanw/esbuild/internal/js_parser"
"github.com/evanw/esbuild/internal/logger"
)
type packageJSON struct {
name string
mainFields map[string]mainField
moduleTypeData js_ast.ModuleTypeData
// "TypeScript will first check whether package.json contains a "tsconfig"
// field, and if it does, TypeScript will try to load a configuration file
// from that field. If neither exists, TypeScript will try to read from a
// tsconfig.json at the root."
//
// See: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-3-2.html#tsconfigjson-inheritance-via-nodejs-packages
tsconfig string
// Present if the "browser" field is present. This field is intended to be
// used by bundlers and lets you redirect the paths of certain 3rd-party
// modules that don't work in the browser to other modules that shim that
// functionality. That way you don't have to rewrite the code for those 3rd-
// party modules. For example, you might remap the native "util" node module
// to something like https://www.npmjs.com/package/util so it works in the
// browser.
//
// This field contains the original mapping object in "package.json". Mapping
// to a nil path indicates that the module is disabled. As far as I can
// tell, the official spec is an abandoned GitHub repo hosted by a user account:
// https://github.com/defunctzombie/package-browser-field-spec. The npm docs
// say almost nothing: https://docs.npmjs.com/files/package.json.
//
// Note that the non-package "browser" map has to be checked twice to match
// Webpack's behavior: once before resolution and once after resolution. It
// leads to some unintuitive failure cases that we must emulate around missing
// file extensions:
//
// * Given the mapping "./no-ext": "./no-ext-browser.js" the query "./no-ext"
// should match but the query "./no-ext.js" should NOT match.
//
// * Given the mapping "./ext.js": "./ext-browser.js" the query "./ext.js"
// should match and the query "./ext" should ALSO match.
//
browserMap map[string]*string
// If this is non-nil, each entry in this map is the absolute path of a file
// with side effects. Any entry not in this map should be considered to have
// no side effects, which means import statements for these files can be
// removed if none of the imports are used. This is a convention from Webpack:
// https://webpack.js.org/guides/tree-shaking/.
//
// Note that if a file is included, all statements that can't be proven to be
// free of side effects must be included. This convention does not say
// anything about whether any statements within the file have side effects or
// not.
sideEffectsMap map[string]bool
sideEffectsRegexps []*regexp.Regexp
sideEffectsData *SideEffectsData
// This represents the "imports" field in this package.json file.
importsMap *pjMap
// This represents the "exports" field in this package.json file.
exportsMap *pjMap
source logger.Source
}
type mainField struct {
relPath string
keyLoc logger.Loc
}
type browserPathKind uint8
const (
absolutePathKind browserPathKind = iota
packagePathKind
)
func (r resolverQuery) checkBrowserMap(resolveDirInfo *dirInfo, inputPath string, kind browserPathKind) (remapped *string, ok bool) {
// This only applies if the current platform is "browser"
if r.options.Platform != config.PlatformBrowser {
return nil, false
}
// There must be an enclosing directory with a "package.json" file with a "browser" map
if resolveDirInfo.enclosingBrowserScope == nil {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("No \"browser\" map found in directory %q", resolveDirInfo.absPath))
}
return nil, false
}
packageJSON := resolveDirInfo.enclosingBrowserScope.packageJSON
browserMap := packageJSON.browserMap
type implicitExtensions uint8
const (
includeImplicitExtensions implicitExtensions = iota
skipImplicitExtensions
)
checkPath := func(pathToCheck string, implicitExtensions implicitExtensions) bool {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("Checking for %q in the \"browser\" map in %q",
pathToCheck, packageJSON.source.KeyPath.Text))
}
// Check for equality
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Checking for %q", pathToCheck))
}
remapped, ok = browserMap[pathToCheck]
if ok {
inputPath = pathToCheck
return true
}
// If that failed, try adding implicit extensions
if implicitExtensions == includeImplicitExtensions {
for _, ext := range r.options.ExtensionOrder {
extPath := pathToCheck + ext
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Checking for %q", extPath))
}
remapped, ok = browserMap[extPath]
if ok {
inputPath = extPath
return true
}
}
}
// If that failed, try assuming this is a directory and looking for an "index" file
indexPath := path.Join(pathToCheck, "index")
if IsPackagePath(indexPath) && !IsPackagePath(pathToCheck) {
indexPath = "./" + indexPath
}
// Check for equality
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Checking for %q", indexPath))
}
remapped, ok = browserMap[indexPath]
if ok {
inputPath = indexPath
return true
}
// If that failed, try adding implicit extensions
if implicitExtensions == includeImplicitExtensions {
for _, ext := range r.options.ExtensionOrder {
extPath := indexPath + ext
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Checking for %q", extPath))
}
remapped, ok = browserMap[extPath]
if ok {
inputPath = extPath
return true
}
}
}
return false
}
// Turn absolute paths into paths relative to the "browser" map location
if kind == absolutePathKind {
relPath, ok := r.fs.Rel(resolveDirInfo.enclosingBrowserScope.absPath, inputPath)
if !ok {
return nil, false
}
inputPath = strings.ReplaceAll(relPath, "\\", "/")
}
if inputPath == "." {
// No bundler supports remapping ".", so we don't either
return nil, false
}
// First try the import path as a package path
if !checkPath(inputPath, includeImplicitExtensions) && IsPackagePath(inputPath) {
// If a package path didn't work, try the import path as a relative path
switch kind {
case absolutePathKind:
checkPath("./"+inputPath, includeImplicitExtensions)
case packagePathKind:
// Browserify allows a browser map entry of "./pkg" to override a package
// path of "require('pkg')". This is weird, and arguably a bug. But we
// replicate this bug for compatibility. However, Browserify only allows
// this within the same package. It does not allow such an entry in a
// parent package to override this in a child package. So this behavior
// is disallowed if there is a "node_modules" folder in between the child
// package and the parent package.
isInSamePackage := true
for info := resolveDirInfo; info != nil && info != resolveDirInfo.enclosingBrowserScope; info = info.parent {
if info.isNodeModules {
isInSamePackage = false
break
}
}
if isInSamePackage {
relativePathPrefix := "./"
// Use the relative path from the file containing the import path to the
// enclosing package.json file. This includes any subdirectories within the
// package if there are any.
if relPath, ok := r.fs.Rel(resolveDirInfo.enclosingBrowserScope.absPath, resolveDirInfo.absPath); ok && relPath != "." {
relativePathPrefix += strings.ReplaceAll(relPath, "\\", "/") + "/"
}
// Browserify lets "require('pkg')" match "./pkg" but not "./pkg.js".
// So don't add implicit extensions specifically in this place so we
// match Browserify's behavior.
checkPath(relativePathPrefix+inputPath, skipImplicitExtensions)
}
}
}
if r.debugLogs != nil {
if ok {
if remapped == nil {
r.debugLogs.addNote(fmt.Sprintf("Found %q marked as disabled", inputPath))
} else {
r.debugLogs.addNote(fmt.Sprintf("Found %q mapping to %q", inputPath, *remapped))
}
} else {
r.debugLogs.addNote(fmt.Sprintf("Failed to find %q", inputPath))
}
}
return
}
func (r resolverQuery) parsePackageJSON(inputPath string) *packageJSON {
packageJSONPath := r.fs.Join(inputPath, "package.json")
contents, err, originalError := r.caches.FSCache.ReadFile(r.fs, packageJSONPath)
if r.debugLogs != nil && originalError != nil {
r.debugLogs.addNote(fmt.Sprintf("Failed to read file %q: %s", packageJSONPath, originalError.Error()))
}
if err != nil {
prettyPaths := MakePrettyPaths(r.fs, logger.Path{Text: packageJSONPath, Namespace: "file"})
r.log.AddError(nil, logger.Range{}, fmt.Sprintf("Cannot read file %q: %s",
prettyPaths.Select(r.options.LogPathStyle), err.Error()))
return nil
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("The file %q exists", packageJSONPath))
}
keyPath := logger.Path{Text: packageJSONPath, Namespace: "file"}
jsonSource := logger.Source{
KeyPath: keyPath,
PrettyPaths: MakePrettyPaths(r.fs, keyPath),
Contents: contents,
}
tracker := logger.MakeLineColumnTracker(&jsonSource)
json, ok := r.caches.JSONCache.Parse(r.log, jsonSource, js_parser.JSONOptions{})
if !ok {
return nil
}
packageJSON := &packageJSON{
source: jsonSource,
mainFields: make(map[string]mainField),
}
// Read the "name" field
if nameJSON, _, ok := getProperty(json, "name"); ok {
if nameValue, ok := getString(nameJSON); ok {
packageJSON.name = nameValue
}
}
// Read the "type" field
if typeJSON, typeKeyLoc, ok := getProperty(json, "type"); ok {
if typeValue, ok := getString(typeJSON); ok {
switch typeValue {
case "commonjs":
packageJSON.moduleTypeData = js_ast.ModuleTypeData{
Type: js_ast.ModuleCommonJS_PackageJSON,
Source: &packageJSON.source,
Range: jsonSource.RangeOfString(typeJSON.Loc),
}
case "module":
packageJSON.moduleTypeData = js_ast.ModuleTypeData{
Type: js_ast.ModuleESM_PackageJSON,
Source: &packageJSON.source,
Range: jsonSource.RangeOfString(typeJSON.Loc),
}
default:
notes := []logger.MsgData{{Text: "The \"type\" field must be set to either \"commonjs\" or \"module\"."}}
kind := logger.Warning
// If someone does something like "type": "./index.d.ts" then they
// likely meant "types" instead of "type". Customize the message
// for this and hide it if it's inside a published npm package.
if strings.HasSuffix(typeValue, ".d.ts") {
notes[0] = tracker.MsgData(jsonSource.RangeOfString(typeKeyLoc),
"TypeScript type declarations use the \"types\" field, not the \"type\" field:")
notes[0].Location.Suggestion = "\"types\""
if helpers.IsInsideNodeModules(jsonSource.KeyPath.Text) {
kind = logger.Debug
}
}
r.log.AddIDWithNotes(logger.MsgID_PackageJSON_InvalidType, kind, &tracker, jsonSource.RangeOfString(typeJSON.Loc),
fmt.Sprintf("%q is not a valid value for the \"type\" field", typeValue),
notes)
}
} else {
r.log.AddID(logger.MsgID_PackageJSON_InvalidType, logger.Warning, &tracker, logger.Range{Loc: typeJSON.Loc},
"The value for \"type\" must be a string")
}
}
// Read the "tsconfig" field
if tsconfigJSON, _, ok := getProperty(json, "tsconfig"); ok {
if tsconfigValue, ok := getString(tsconfigJSON); ok {
packageJSON.tsconfig = tsconfigValue
}
}
// Read the "main" fields
mainFields := r.options.MainFields
if mainFields == nil {
mainFields = defaultMainFields[r.options.Platform]
}
for _, field := range mainFields {
if mainJSON, mainLoc, ok := getProperty(json, field); ok {
if main, ok := getString(mainJSON); ok && main != "" {
packageJSON.mainFields[field] = mainField{keyLoc: mainLoc, relPath: main}
}
}
}
for _, field := range mainFieldsForFailure {
if _, ok := packageJSON.mainFields[field]; !ok {
if mainJSON, mainLoc, ok := getProperty(json, field); ok {
if main, ok := getString(mainJSON); ok && main != "" {
packageJSON.mainFields[field] = mainField{keyLoc: mainLoc, relPath: main}
}
}
}
}
// Read the "browser" property, but only when targeting the browser
if browserJSON, _, ok := getProperty(json, "browser"); ok && r.options.Platform == config.PlatformBrowser {
// We both want the ability to have the option of CJS vs. ESM and the
// option of having node vs. browser. The way to do this is to use the
// object literal form of the "browser" field like this:
//
// "main": "dist/index.node.cjs.js",
// "module": "dist/index.node.esm.js",
// "browser": {
// "./dist/index.node.cjs.js": "./dist/index.browser.cjs.js",
// "./dist/index.node.esm.js": "./dist/index.browser.esm.js"
// },
//
if browser, ok := browserJSON.Data.(*js_ast.EObject); ok {
// The value is an object
browserMap := make(map[string]*string)
// Remap all files in the browser field
for _, prop := range browser.Properties {
if key, ok := getString(prop.Key); ok && prop.ValueOrNil.Data != nil {
if value, ok := getString(prop.ValueOrNil); ok {
// If this is a string, it's a replacement package
browserMap[key] = &value
} else if value, ok := getBool(prop.ValueOrNil); ok {
// If this is false, it means the package is disabled
if !value {
browserMap[key] = nil
}
} else {
r.log.AddID(logger.MsgID_PackageJSON_InvalidBrowser, logger.Warning, &tracker, logger.Range{Loc: prop.ValueOrNil.Loc},
"Each \"browser\" mapping must be a string or a boolean")
}
}
}
packageJSON.browserMap = browserMap
}
}
// Read the "sideEffects" property
if sideEffectsJSON, sideEffectsLoc, ok := getProperty(json, "sideEffects"); ok {
switch data := sideEffectsJSON.Data.(type) {
case *js_ast.EBoolean:
if !data.Value {
// Make an empty map for "sideEffects: false", which indicates all
// files in this module can be considered to not have side effects.
packageJSON.sideEffectsMap = make(map[string]bool)
packageJSON.sideEffectsData = &SideEffectsData{
IsSideEffectsArrayInJSON: false,
Source: &jsonSource,
Range: jsonSource.RangeOfString(sideEffectsLoc),
}
}
case *js_ast.EArray:
// The "sideEffects: []" format means all files in this module but not in
// the array can be considered to not have side effects.
packageJSON.sideEffectsMap = make(map[string]bool)
packageJSON.sideEffectsData = &SideEffectsData{
IsSideEffectsArrayInJSON: true,
Source: &jsonSource,
Range: jsonSource.RangeOfString(sideEffectsLoc),
}
for _, itemJSON := range data.Items {
item, ok := itemJSON.Data.(*js_ast.EString)
if !ok || item.Value == nil {
r.log.AddID(logger.MsgID_PackageJSON_InvalidSideEffects, logger.Warning, &tracker, logger.Range{Loc: itemJSON.Loc},
"Expected string in array for \"sideEffects\"")
continue
}
// Reference: https://github.com/webpack/webpack/blob/ed175cd22f89eb9fecd0a70572a3fd0be028e77c/lib/optimize/SideEffectsFlagPlugin.js
pattern := helpers.UTF16ToString(item.Value)
if !strings.ContainsRune(pattern, '/') {
pattern = "**/" + pattern
}
absPattern := r.fs.Join(inputPath, pattern)
absPattern = strings.ReplaceAll(absPattern, "\\", "/") // Avoid problems with Windows-style slashes
re, hadWildcard := globstarToEscapedRegexp(absPattern)
// Wildcard patterns require more expensive matching
if hadWildcard {
packageJSON.sideEffectsRegexps = append(packageJSON.sideEffectsRegexps, regexp.MustCompile(re))
continue
}
// Normal strings can be matched with a map lookup
packageJSON.sideEffectsMap[absPattern] = true
}
default:
r.log.AddID(logger.MsgID_PackageJSON_InvalidSideEffects, logger.Warning, &tracker, logger.Range{Loc: sideEffectsJSON.Loc},
"The value for \"sideEffects\" must be a boolean or an array")
}
}
// Read the "imports" map
if importsJSON, importsLoc, ok := getProperty(json, "imports"); ok {
if importsMap := parseImportsExportsMap(jsonSource, r.log, importsJSON, "imports", importsLoc); importsMap != nil {
if importsMap.root.kind != pjObject {
r.log.AddID(logger.MsgID_PackageJSON_InvalidImportsOrExports, logger.Warning, &tracker, importsMap.root.firstToken,
"The value for \"imports\" must be an object")
}
packageJSON.importsMap = importsMap
}
}
// Read the "exports" map
if exportsJSON, exportsLoc, ok := getProperty(json, "exports"); ok {
if exportsMap := parseImportsExportsMap(jsonSource, r.log, exportsJSON, "exports", exportsLoc); exportsMap != nil {
packageJSON.exportsMap = exportsMap
}
}
return packageJSON
}
// Reference: https://github.com/fitzgen/glob-to-regexp/blob/2abf65a834259c6504ed3b80e85f893f8cd99127/index.js
func globstarToEscapedRegexp(glob string) (string, bool) {
sb := strings.Builder{}
sb.WriteByte('^')
hadWildcard := false
n := len(glob)
for i := 0; i < n; i++ {
c := glob[i]
switch c {
case '\\', '^', '$', '.', '+', '|', '(', ')', '[', ']', '{', '}':
sb.WriteByte('\\')
sb.WriteByte(c)
case '?':
sb.WriteByte('.')
hadWildcard = true
case '*':
// Move over all consecutive "*"'s.
// Also store the previous and next characters
prevChar := -1
if i > 0 {
prevChar = int(glob[i-1])
}
starCount := 1
for i+1 < n && glob[i+1] == '*' {
starCount++
i++
}
nextChar := -1
if i+1 < n {
nextChar = int(glob[i+1])
}
// Determine if this is a globstar segment
isGlobstar := starCount > 1 && // multiple "*"'s
(prevChar == '/' || prevChar == -1) && // from the start of the segment
(nextChar == '/' || nextChar == -1) // to the end of the segment
if isGlobstar {
// It's a globstar, so match zero or more path segments
sb.WriteString("(?:[^/]*(?:/|$))*")
i++ // Move over the "/"
} else {
// It's not a globstar, so only match one path segment
sb.WriteString("[^/]*")
}
hadWildcard = true
default:
sb.WriteByte(c)
}
}
sb.WriteByte('$')
return sb.String(), hadWildcard
}
// Reference: https://nodejs.org/api/esm.html#esm_resolver_algorithm_specification
type pjMap struct {
root pjEntry
propertyKey string
propertyKeyLoc logger.Loc
}
type pjKind uint8
const (
pjNull pjKind = iota
pjString
pjArray
pjObject
pjInvalid
)
type pjEntry struct {
strData string
arrData []pjEntry
mapData []pjMapEntry // Can't be a "map" because order matters
expansionKeys expansionKeysArray
firstToken logger.Range
kind pjKind
}
type pjMapEntry struct {
key string
value pjEntry
keyRange logger.Range
}
// This type is just so we can use Go's native sort function
type expansionKeysArray []pjMapEntry
func (a expansionKeysArray) Len() int { return len(a) }
func (a expansionKeysArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
func (a expansionKeysArray) Less(i int, j int) bool {
// Assert: keyA ends with "/" or contains only a single "*".
// Assert: keyB ends with "/" or contains only a single "*".
keyA := a[i].key
keyB := a[j].key
// Let baseLengthA be the index of "*" in keyA plus one, if keyA contains "*", or the length of keyA otherwise.
// Let baseLengthB be the index of "*" in keyB plus one, if keyB contains "*", or the length of keyB otherwise.
starA := strings.IndexByte(keyA, '*')
starB := strings.IndexByte(keyB, '*')
var baseLengthA int
var baseLengthB int
if starA >= 0 {
baseLengthA = starA
} else {
baseLengthA = len(keyA)
}
if starB >= 0 {
baseLengthB = starB
} else {
baseLengthB = len(keyB)
}
// If baseLengthA is greater than baseLengthB, return -1.
// If baseLengthB is greater than baseLengthA, return 1.
if baseLengthA > baseLengthB {
return true
}
if baseLengthB > baseLengthA {
return false
}
// If keyA does not contain "*", return 1.
// If keyB does not contain "*", return -1.
if starA < 0 {
return false
}
if starB < 0 {
return true
}
// If the length of keyA is greater than the length of keyB, return -1.
// If the length of keyB is greater than the length of keyA, return 1.
if len(keyA) > len(keyB) {
return true
}
if len(keyB) > len(keyA) {
return false
}
return false
}
func (entry pjEntry) valueForKey(key string) (pjEntry, bool) {
for _, item := range entry.mapData {
if item.key == key {
return item.value, true
}
}
return pjEntry{}, false
}
func parseImportsExportsMap(source logger.Source, log logger.Log, json js_ast.Expr, propertyKey string, propertyKeyLoc logger.Loc) *pjMap {
var visit func(expr js_ast.Expr) pjEntry
tracker := logger.MakeLineColumnTracker(&source)
visit = func(expr js_ast.Expr) pjEntry {
var firstToken logger.Range
switch e := expr.Data.(type) {
case *js_ast.ENull:
return pjEntry{
kind: pjNull,
firstToken: js_lexer.RangeOfIdentifier(source, expr.Loc),
}
case *js_ast.EString:
return pjEntry{
kind: pjString,
firstToken: source.RangeOfString(expr.Loc),
strData: helpers.UTF16ToString(e.Value),
}
case *js_ast.EArray:
arrData := make([]pjEntry, len(e.Items))
for i, item := range e.Items {
arrData[i] = visit(item)
}
return pjEntry{
kind: pjArray,
firstToken: logger.Range{Loc: expr.Loc, Len: 1},
arrData: arrData,
}
case *js_ast.EObject:
mapData := make([]pjMapEntry, len(e.Properties))
expansionKeys := make(expansionKeysArray, 0, len(e.Properties))
firstToken := logger.Range{Loc: expr.Loc, Len: 1}
isConditionalSugar := false
type DeadCondition struct {
reason string
ranges []logger.Range
notes []logger.MsgData
}
var foundDefault logger.Range
var foundImport logger.Range
var foundRequire logger.Range
var deadCondition DeadCondition
for i, property := range e.Properties {
keyStr, _ := property.Key.Data.(*js_ast.EString)
key := helpers.UTF16ToString(keyStr.Value)
keyRange := source.RangeOfString(property.Key.Loc)
// If exports is an Object with both a key starting with "." and a key
// not starting with ".", throw an Invalid Package Configuration error.
curIsConditionalSugar := !strings.HasPrefix(key, ".")
if i == 0 {
isConditionalSugar = curIsConditionalSugar
} else if isConditionalSugar != curIsConditionalSugar {
prevEntry := mapData[i-1]
log.AddIDWithNotes(logger.MsgID_PackageJSON_InvalidImportsOrExports, logger.Warning, &tracker, keyRange,
"This object cannot contain keys that both start with \".\" and don't start with \".\"",
[]logger.MsgData{tracker.MsgData(prevEntry.keyRange,
fmt.Sprintf("The key %q is incompatible with the previous key %q:", key, prevEntry.key))})
return pjEntry{
kind: pjInvalid,
firstToken: firstToken,
}
}
// Track "dead" conditional branches that can never be reached
if foundDefault.Len != 0 || (foundImport.Len != 0 && foundRequire.Len != 0) {
deadCondition.ranges = append(deadCondition.ranges, keyRange)
// Note: Don't warn about the "default" condition as it's supposed to be a catch-all condition
if deadCondition.reason == "" && key != "default" {
if foundDefault.Len != 0 {
deadCondition.reason = "\"default\""
deadCondition.notes = []logger.MsgData{
tracker.MsgData(foundDefault, "The \"default\" condition comes earlier and will always be chosen:"),
}
} else {
deadCondition.reason = "both \"import\" and \"require\""
deadCondition.notes = []logger.MsgData{
tracker.MsgData(foundImport, "The \"import\" condition comes earlier and will be used for all \"import\" statements:"),
tracker.MsgData(foundRequire, "The \"require\" condition comes earlier and will be used for all \"require\" calls:"),
}
}
}
} else {
switch key {
case "default":
foundDefault = keyRange
case "import":
foundImport = keyRange
case "require":
foundRequire = keyRange
}
}
entry := pjMapEntry{
key: key,
keyRange: keyRange,
value: visit(property.ValueOrNil),
}
if strings.HasSuffix(key, "/") || strings.IndexByte(key, '*') >= 0 {
expansionKeys = append(expansionKeys, entry)
}
mapData[i] = entry
}
// Let expansionKeys be the list of keys of matchObj either ending in "/"
// or containing only a single "*", sorted by the sorting function
// PATTERN_KEY_COMPARE which orders in descending order of specificity.
sort.Stable(expansionKeys)
// Warn about "dead" conditional branches that can never be reached
if deadCondition.reason != "" {
kind := logger.Warning
if helpers.IsInsideNodeModules(source.KeyPath.Text) {
kind = logger.Debug
}
var conditions strings.Builder
conditionWord := "condition"
itComesWord := "it comes"
if len(deadCondition.ranges) > 1 {
conditionWord = "conditions"
itComesWord = "they come"
}
for i, r := range deadCondition.ranges {
if i > 0 {
conditions.WriteString(" and ")
}
conditions.WriteString(source.TextForRange(r))
}
log.AddIDWithNotes(logger.MsgID_PackageJSON_DeadCondition, kind, &tracker, deadCondition.ranges[0],
fmt.Sprintf("The %s %s here will never be used as %s after %s", conditionWord, conditions.String(), itComesWord, deadCondition.reason),
deadCondition.notes)
}
return pjEntry{
kind: pjObject,
firstToken: firstToken,
mapData: mapData,
expansionKeys: expansionKeys,
}
case *js_ast.EBoolean:
firstToken = js_lexer.RangeOfIdentifier(source, expr.Loc)
case *js_ast.ENumber:
firstToken = source.RangeOfNumber(expr.Loc)
default:
firstToken.Loc = expr.Loc
}
log.AddID(logger.MsgID_PackageJSON_InvalidImportsOrExports, logger.Warning, &tracker, firstToken,
"This value must be a string, an object, an array, or null")
return pjEntry{
kind: pjInvalid,
firstToken: firstToken,
}
}
root := visit(json)
if root.kind == pjNull {
return nil
}
return &pjMap{
root: root,
propertyKey: propertyKey,
propertyKeyLoc: propertyKeyLoc,
}
}
func (entry pjEntry) keysStartWithDot() bool {
return len(entry.mapData) > 0 && strings.HasPrefix(entry.mapData[0].key, ".")
}
type pjStatus uint8
const (
pjStatusUndefined pjStatus = iota
pjStatusUndefinedNoConditionsMatch // A more friendly error message for when no conditions are matched
pjStatusNull
pjStatusExact
pjStatusExactEndsWithStar
pjStatusInexact // This means we may need to try CommonJS-style extension suffixes
pjStatusPackageResolve // Need to re-run package resolution on the result
// Module specifier is an invalid URL, package name or package subpath specifier.
pjStatusInvalidModuleSpecifier
// package.json configuration is invalid or contains an invalid configuration.
pjStatusInvalidPackageConfiguration
// Package exports or imports define a target module for the package that is an invalid type or string target.
pjStatusInvalidPackageTarget
// Package exports do not define or permit a target subpath in the package for the given module.
pjStatusPackagePathNotExported
// Package imports do not define the specifiespecifier
pjStatusPackageImportNotDefined
// The package or module requested does not exist.
pjStatusModuleNotFound
pjStatusModuleNotFoundMissingExtension // The user just needs to add the missing extension
// The resolved path corresponds to a directory, which is not a supported target for module imports.
pjStatusUnsupportedDirectoryImport
pjStatusUnsupportedDirectoryImportMissingIndex // The user just needs to add the missing "/index.js" suffix
)
func (status pjStatus) isUndefined() bool {
return status == pjStatusUndefined || status == pjStatusUndefinedNoConditionsMatch
}
type pjDebug struct {
// If the status is "pjStatusInvalidPackageTarget" or "pjStatusInvalidModuleSpecifier",
// then this is the reason. It always starts with " because".
invalidBecause string
// If the status is "pjStatusUndefinedNoConditionsMatch", this is the set of
// conditions that didn't match, in the order that they were found in the file.
// This information is used for error messages.
unmatchedConditions []logger.Span
// This is the range of the token to use for error messages
token logger.Range
// If true, the token is a "null" literal
isBecauseOfNullLiteral bool
}
func (r resolverQuery) esmHandlePostConditions(
resolved string,
status pjStatus,
debug pjDebug,
) (string, pjStatus, pjDebug) {
if status != pjStatusExact && status != pjStatusExactEndsWithStar && status != pjStatusInexact {
return resolved, status, debug
}
// If resolved contains any percent encodings of "/" or "\" ("%2f" and "%5C"
// respectively), then throw an Invalid Module Specifier error.
resolvedPath, err := url.PathUnescape(resolved)
if err != nil {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("The path %q contains invalid URL escapes: %s", resolved, err.Error()))
}
return resolved, pjStatusInvalidModuleSpecifier, debug
}
var found string
if strings.Contains(resolved, "%2f") {
found = "%2f"
} else if strings.Contains(resolved, "%2F") {
found = "%2F"
} else if strings.Contains(resolved, "%5c") {
found = "%5c"
} else if strings.Contains(resolved, "%5C") {
found = "%5C"
}
if found != "" {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("The path %q is not allowed to contain %q", resolved, found))
}
return resolved, pjStatusInvalidModuleSpecifier, debug
}
// If the file at resolved is a directory, then throw an Unsupported Directory
// Import error.
if strings.HasSuffix(resolvedPath, "/") || strings.HasSuffix(resolvedPath, "\\") {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("The path %q is not allowed to end with a slash", resolved))
}
return resolved, pjStatusUnsupportedDirectoryImport, debug
}
// Set resolved to the real path of resolved.
return resolvedPath, status, debug
}
func (r resolverQuery) esmPackageImportsResolve(
specifier string,
imports pjEntry,
conditions map[string]bool,
) (string, pjStatus, pjDebug) {
// ALGORITHM DEVIATION: Provide a friendly error message if "imports" is not an object
if imports.kind != pjObject {
return "", pjStatusInvalidPackageConfiguration, pjDebug{token: imports.firstToken}
}
resolved, status, debug := r.esmPackageImportsExportsResolve(specifier, imports, "/", true, conditions)
if status != pjStatusNull && status != pjStatusUndefined {
return resolved, status, debug
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("The package import %q is not defined", specifier))
}
return specifier, pjStatusPackageImportNotDefined, pjDebug{token: imports.firstToken}
}
func (r resolverQuery) esmPackageExportsResolve(
packageURL string,
subpath string,
exports pjEntry,
conditions map[string]bool,
) (string, pjStatus, pjDebug) {
if exports.kind == pjInvalid {
if r.debugLogs != nil {
r.debugLogs.addNote("Invalid package configuration")
}
return "", pjStatusInvalidPackageConfiguration, pjDebug{token: exports.firstToken}
}
debugToReturn := pjDebug{token: exports.firstToken}
if subpath == "." {
mainExport := pjEntry{kind: pjNull}
if exports.kind == pjString || exports.kind == pjArray || (exports.kind == pjObject && !exports.keysStartWithDot()) {
mainExport = exports
} else if exports.kind == pjObject {
if dot, ok := exports.valueForKey("."); ok {
if r.debugLogs != nil {
r.debugLogs.addNote("Using the entry for \".\"")
}
mainExport = dot
}
}
if mainExport.kind != pjNull {
resolved, status, debug := r.esmPackageTargetResolve(packageURL, mainExport, "", false, false, conditions)
if status != pjStatusNull && status != pjStatusUndefined {
return resolved, status, debug
} else {
debugToReturn = debug
}
}
} else if exports.kind == pjObject && exports.keysStartWithDot() {
resolved, status, debug := r.esmPackageImportsExportsResolve(subpath, exports, packageURL, false, conditions)
if status != pjStatusNull && status != pjStatusUndefined {
return resolved, status, debug
} else {
debugToReturn = debug
}
}
if r.debugLogs != nil {
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/resolver/dataurl.go | internal/resolver/dataurl.go | package resolver
import (
"encoding/base64"
"fmt"
"net/url"
"strings"
)
type DataURL struct {
mimeType string
data string
isBase64 bool
}
func ParseDataURL(url string) (parsed DataURL, ok bool) {
if strings.HasPrefix(url, "data:") {
if comma := strings.IndexByte(url, ','); comma != -1 {
parsed.mimeType = url[len("data:"):comma]
parsed.data = url[comma+1:]
if strings.HasSuffix(parsed.mimeType, ";base64") {
parsed.mimeType = parsed.mimeType[:len(parsed.mimeType)-len(";base64")]
parsed.isBase64 = true
}
ok = true
}
}
return
}
type MIMEType uint8
const (
MIMETypeUnsupported MIMEType = iota
MIMETypeTextCSS
MIMETypeTextJavaScript
MIMETypeApplicationJSON
)
func (parsed DataURL) DecodeMIMEType() MIMEType {
// Remove things like ";charset=utf-8"
mimeType := parsed.mimeType
if semicolon := strings.IndexByte(mimeType, ';'); semicolon != -1 {
mimeType = mimeType[:semicolon]
}
// Hard-code a few supported types
switch mimeType {
case "text/css":
return MIMETypeTextCSS
case "text/javascript":
return MIMETypeTextJavaScript
case "application/json":
return MIMETypeApplicationJSON
default:
return MIMETypeUnsupported
}
}
func (parsed DataURL) DecodeData() (string, error) {
// Try to read base64 data
if parsed.isBase64 {
bytes, err := base64.StdEncoding.DecodeString(parsed.data)
if err != nil {
return "", fmt.Errorf("could not decode base64 data: %s", err.Error())
}
return string(bytes), nil
}
// Try to read percent-escaped data
content, err := url.PathUnescape(parsed.data)
if err != nil {
return "", fmt.Errorf("could not decode percent-escaped data: %s", err.Error())
}
return content, nil
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/resolver/yarnpnp.go | internal/resolver/yarnpnp.go | package resolver
// This file implements the Yarn PnP specification: https://yarnpkg.com/advanced/pnp-spec/
import (
"fmt"
"path"
"regexp"
"strings"
"syscall"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_parser"
"github.com/evanw/esbuild/internal/logger"
)
type pnpData struct {
// Keys are the package idents, values are sets of references. Combining the
// ident with each individual reference yields the set of affected locators.
fallbackExclusionList map[string]map[string]bool
// A map of locators that all packages are allowed to access, regardless
// whether they list them in their dependencies or not.
fallbackPool map[string]pnpIdentAndReference
// A nullable regexp. If set, all project-relative importer paths should be
// matched against it. If the match succeeds, the resolution should follow
// the classic Node.js resolution algorithm rather than the Plug'n'Play one.
// Note that unlike other paths in the manifest, the one checked against this
// regexp won't begin by `./`.
ignorePatternData *regexp.Regexp
invalidIgnorePatternData string
// This is the main part of the PnP data file. This table contains the list
// of all packages, first keyed by package ident then by package reference.
// One entry will have `null` in both fields and represents the absolute
// top-level package.
packageRegistryData map[string]map[string]pnpPackage
packageLocatorsByLocations map[string]pnpPackageLocatorByLocation
// If true, should a dependency resolution fail for an importer that isn't
// explicitly listed in `fallbackExclusionList`, the runtime must first check
// whether the resolution would succeed for any of the packages in
// `fallbackPool`; if it would, transparently return this resolution. Note
// that all dependencies from the top-level package are implicitly part of
// the fallback pool, even if not listed here.
enableTopLevelFallback bool
tracker logger.LineColumnTracker
absPath string
absDirPath string
}
// This is called both a "locator" and a "dependency target" in the specification.
// When it's used as a dependency target, it can only be in one of three states:
//
// 1. A reference, to link with the dependency name
// In this case ident is "".
//
// 2. An aliased package
// In this case neither ident nor reference are "".
//
// 3. A missing peer dependency
// In this case ident and reference are "".
type pnpIdentAndReference struct {
ident string // Empty if null
reference string // Empty if null
span logger.Range
}
type pnpPackage struct {
packageDependencies map[string]pnpIdentAndReference
packageLocation string
packageDependenciesRange logger.Range
discardFromLookup bool
}
type pnpPackageLocatorByLocation struct {
locator pnpIdentAndReference
discardFromLookup bool
}
func parseBareIdentifier(specifier string) (ident string, modulePath string, ok bool) {
slash := strings.IndexByte(specifier, '/')
// If specifier starts with "@", then
if strings.HasPrefix(specifier, "@") {
// If specifier doesn't contain a "/" separator, then
if slash == -1 {
// Throw an error
return
}
// Otherwise,
// Set ident to the substring of specifier until the second "/" separator or the end of string, whatever happens first
if slash2 := strings.IndexByte(specifier[slash+1:], '/'); slash2 != -1 {
ident = specifier[:slash+1+slash2]
} else {
ident = specifier
}
} else {
// Otherwise,
// Set ident to the substring of specifier until the first "/" separator or the end of string, whatever happens first
if slash != -1 {
ident = specifier[:slash]
} else {
ident = specifier
}
}
// Set modulePath to the substring of specifier starting from ident.length
modulePath = specifier[len(ident):]
// Return {ident, modulePath}
ok = true
return
}
type pnpStatus uint8
const (
pnpErrorGeneric pnpStatus = iota
pnpErrorDependencyNotFound
pnpErrorUnfulfilledPeerDependency
pnpSuccess
pnpSkipped
)
func (status pnpStatus) isError() bool {
return status < pnpSuccess
}
type pnpResult struct {
status pnpStatus
pkgDirPath string
pkgIdent string
pkgSubpath string
// This is for error messages
errorIdent string
errorRange logger.Range
}
// Note: If this returns successfully then the node module resolution algorithm
// (i.e. NM_RESOLVE in the Yarn PnP specification) is always run afterward
func (r resolverQuery) resolveToUnqualified(specifier string, parentURL string, manifest *pnpData) pnpResult {
// Let resolved be undefined
// Let manifest be FIND_PNP_MANIFEST(parentURL)
// (this is already done by the time we get here)
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("Using Yarn PnP manifest from %q", manifest.absPath))
r.debugLogs.addNote(fmt.Sprintf(" Resolving %q in %q", specifier, parentURL))
}
// Let ident and modulePath be the result of PARSE_BARE_IDENTIFIER(specifier)
ident, modulePath, ok := parseBareIdentifier(specifier)
if !ok {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Failed to parse specifier %q into a bare identifier", specifier))
}
return pnpResult{status: pnpErrorGeneric}
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Parsed bare identifier %q and module path %q", ident, modulePath))
}
// Let parentLocator be FIND_LOCATOR(manifest, parentURL)
parentLocator, ok := r.findLocator(manifest, parentURL)
// If parentLocator is null, then
// Set resolved to NM_RESOLVE(specifier, parentURL) and return it
if !ok {
return pnpResult{status: pnpSkipped}
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Found parent locator: [%s, %s]", quoteOrNullIfEmpty(parentLocator.ident), quoteOrNullIfEmpty(parentLocator.reference)))
}
// Let parentPkg be GET_PACKAGE(manifest, parentLocator)
parentPkg, ok := r.getPackage(manifest, parentLocator.ident, parentLocator.reference)
if !ok {
// We aren't supposed to get here according to the Yarn PnP specification
return pnpResult{status: pnpErrorGeneric}
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Found parent package at %q", parentPkg.packageLocation))
}
// Let referenceOrAlias be the entry from parentPkg.packageDependencies referenced by ident
referenceOrAlias, ok := parentPkg.packageDependencies[ident]
// If referenceOrAlias is null or undefined, then
if !ok || referenceOrAlias.reference == "" {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Failed to find %q in \"packageDependencies\" of parent package", ident))
}
// If manifest.enableTopLevelFallback is true, then
if manifest.enableTopLevelFallback {
if r.debugLogs != nil {
r.debugLogs.addNote(" Searching for a fallback because \"enableTopLevelFallback\" is true")
}
// If parentLocator isn't in manifest.fallbackExclusionList, then
if set := manifest.fallbackExclusionList[parentLocator.ident]; !set[parentLocator.reference] {
// Let fallback be RESOLVE_VIA_FALLBACK(manifest, ident)
fallback, _ := r.resolveViaFallback(manifest, ident)
// If fallback is neither null nor undefined
if fallback.reference != "" {
// Set referenceOrAlias to fallback
referenceOrAlias = fallback
ok = true
}
} else if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Stopping because [%s, %s] is in \"fallbackExclusionList\"",
quoteOrNullIfEmpty(parentLocator.ident), quoteOrNullIfEmpty(parentLocator.reference)))
}
}
}
// If referenceOrAlias is still undefined, then
if !ok {
// Throw a resolution error
return pnpResult{
status: pnpErrorDependencyNotFound,
errorIdent: ident,
errorRange: parentPkg.packageDependenciesRange,
}
}
// If referenceOrAlias is still null, then
if referenceOrAlias.reference == "" {
// Note: It means that parentPkg has an unfulfilled peer dependency on ident
// Throw a resolution error
return pnpResult{
status: pnpErrorUnfulfilledPeerDependency,
errorIdent: ident,
errorRange: referenceOrAlias.span,
}
}
if r.debugLogs != nil {
var referenceOrAliasStr string
if referenceOrAlias.ident != "" {
referenceOrAliasStr = fmt.Sprintf("[%q, %q]", referenceOrAlias.ident, referenceOrAlias.reference)
} else {
referenceOrAliasStr = quoteOrNullIfEmpty(referenceOrAlias.reference)
}
r.debugLogs.addNote(fmt.Sprintf(" Found dependency locator: [%s, %s]", quoteOrNullIfEmpty(ident), referenceOrAliasStr))
}
// Otherwise, if referenceOrAlias is an array, then
var dependencyPkg pnpPackage
if referenceOrAlias.ident != "" {
// Let alias be referenceOrAlias
alias := referenceOrAlias
// Let dependencyPkg be GET_PACKAGE(manifest, alias)
dependencyPkg, ok = r.getPackage(manifest, alias.ident, alias.reference)
if !ok {
// We aren't supposed to get here according to the Yarn PnP specification
return pnpResult{status: pnpErrorGeneric}
}
} else {
// Otherwise,
// Let dependencyPkg be GET_PACKAGE(manifest, {ident, reference})
dependencyPkg, ok = r.getPackage(manifest, ident, referenceOrAlias.reference)
if !ok {
// We aren't supposed to get here according to the Yarn PnP specification
return pnpResult{status: pnpErrorGeneric}
}
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Found package %q at %q", ident, dependencyPkg.packageLocation))
}
// Return path.resolve(manifest.dirPath, dependencyPkg.packageLocation, modulePath)
absDirPath := manifest.absDirPath
isWindows := !strings.HasPrefix(absDirPath, "/")
if isWindows {
// Yarn converts Windows-style paths with volume labels into Unix-style
// paths with a "/" prefix for the purpose of joining them together here.
// So "C:\foo\bar.txt" becomes "/C:/foo/bar.txt". This is very important
// because Yarn also stores a single global cache on the "C:" drive, many
// developers do their work on the "D:" drive, and Yarn uses "../C:" to
// traverse between the "D:" drive and the "C:" drive. Windows doesn't
// allow you to do that ("D:\.." is just "D:\") so without temporarily
// swapping to Unix-style paths here, esbuild would otherwise fail in this
// case while Yarn itself would succeed.
absDirPath = "/" + strings.ReplaceAll(absDirPath, "\\", "/")
}
pkgDirPath := path.Join(absDirPath, dependencyPkg.packageLocation)
if isWindows && strings.HasPrefix(pkgDirPath, "/") {
// Convert the Unix-style path back into a Windows-style path afterwards
pkgDirPath = strings.ReplaceAll(pkgDirPath[1:], "\\", "//")
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Resolved %q via Yarn PnP to %q with subpath %q", specifier, pkgDirPath, modulePath))
}
return pnpResult{
status: pnpSuccess,
pkgDirPath: pkgDirPath,
pkgIdent: ident,
pkgSubpath: modulePath,
}
}
func (r resolverQuery) findLocator(manifest *pnpData, moduleUrl string) (pnpIdentAndReference, bool) {
// Let relativeUrl be the relative path between manifest and moduleUrl
relativeUrl, ok := r.fs.Rel(manifest.absDirPath, moduleUrl)
if !ok {
return pnpIdentAndReference{}, false
} else {
// Relative URLs on Windows will use \ instead of /, which will break
// everything we do below. Use normal slashes to keep things working.
relativeUrl = strings.ReplaceAll(relativeUrl, "\\", "/")
}
// The relative path must not start with ./; trim it if needed
relativeUrl = strings.TrimPrefix(relativeUrl, "./")
// If relativeUrl matches manifest.ignorePatternData, then
if manifest.ignorePatternData != nil && manifest.ignorePatternData.MatchString(relativeUrl) {
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Ignoring %q because it matches \"ignorePatternData\"", relativeUrl))
}
// Return null
return pnpIdentAndReference{}, false
}
// Note: Make sure relativeUrl always starts with a ./ or ../
if !strings.HasSuffix(relativeUrl, "/") {
relativeUrl += "/"
}
if !strings.HasPrefix(relativeUrl, "./") && !strings.HasPrefix(relativeUrl, "../") {
relativeUrl = "./" + relativeUrl
}
// This is the inner loop from Yarn's PnP resolver implementation. This is
// different from the specification, which contains a hypothetical slow
// algorithm instead. The algorithm from the specification can sometimes
// produce different results from the one used by the implementation, so
// we follow the implementation.
for {
entry, ok := manifest.packageLocatorsByLocations[relativeUrl]
if !ok || entry.discardFromLookup {
// Remove the last path component and try again
relativeUrl = relativeUrl[:strings.LastIndexByte(relativeUrl[:len(relativeUrl)-1], '/')+1]
if relativeUrl == "" {
break
}
continue
}
return entry.locator, true
}
return pnpIdentAndReference{}, false
}
func (r resolverQuery) resolveViaFallback(manifest *pnpData, ident string) (pnpIdentAndReference, bool) {
// Let topLevelPkg be GET_PACKAGE(manifest, {null, null})
topLevelPkg, ok := r.getPackage(manifest, "", "")
if !ok {
// We aren't supposed to get here according to the Yarn PnP specification
return pnpIdentAndReference{}, false
}
// Let referenceOrAlias be the entry from topLevelPkg.packageDependencies referenced by ident
referenceOrAlias, ok := topLevelPkg.packageDependencies[ident]
// If referenceOrAlias is defined, then
if ok {
// Return it immediately
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf(" Found fallback for %q in \"packageDependencies\" of top-level package: [%s, %s]", ident,
quoteOrNullIfEmpty(referenceOrAlias.ident), quoteOrNullIfEmpty(referenceOrAlias.reference)))
}
return referenceOrAlias, true
}
// Otherwise,
// Let referenceOrAlias be the entry from manifest.fallbackPool referenced by ident
referenceOrAlias, ok = manifest.fallbackPool[ident]
// Return it immediately, whether it's defined or not
if r.debugLogs != nil {
if ok {
r.debugLogs.addNote(fmt.Sprintf(" Found fallback for %q in \"fallbackPool\": [%s, %s]", ident,
quoteOrNullIfEmpty(referenceOrAlias.ident), quoteOrNullIfEmpty(referenceOrAlias.reference)))
} else {
r.debugLogs.addNote(fmt.Sprintf(" Failed to find fallback for %q in \"fallbackPool\"", ident))
}
}
return referenceOrAlias, ok
}
func (r resolverQuery) getPackage(manifest *pnpData, ident string, reference string) (pnpPackage, bool) {
if inner, ok := manifest.packageRegistryData[ident]; ok {
if pkg, ok := inner[reference]; ok {
return pkg, true
}
}
if r.debugLogs != nil {
// We aren't supposed to get here according to the Yarn PnP specification:
// "Note: pkg cannot be undefined here; all packages referenced in any of the
// Plug'n'Play data tables MUST have a corresponding entry inside packageRegistryData."
r.debugLogs.addNote(fmt.Sprintf(" Yarn PnP invariant violation: GET_PACKAGE failed to find a package: [%s, %s]",
quoteOrNullIfEmpty(ident), quoteOrNullIfEmpty(reference)))
}
return pnpPackage{}, false
}
func quoteOrNullIfEmpty(str string) string {
if str != "" {
return fmt.Sprintf("%q", str)
}
return "null"
}
func compileYarnPnPData(absPath string, absDirPath string, json js_ast.Expr, source logger.Source) *pnpData {
data := pnpData{
absPath: absPath,
absDirPath: absDirPath,
tracker: logger.MakeLineColumnTracker(&source),
}
if value, _, ok := getProperty(json, "enableTopLevelFallback"); ok {
if enableTopLevelFallback, ok := getBool(value); ok {
data.enableTopLevelFallback = enableTopLevelFallback
}
}
if value, _, ok := getProperty(json, "fallbackExclusionList"); ok {
if array, ok := value.Data.(*js_ast.EArray); ok {
data.fallbackExclusionList = make(map[string]map[string]bool, len(array.Items))
for _, item := range array.Items {
if tuple, ok := item.Data.(*js_ast.EArray); ok && len(tuple.Items) == 2 {
if ident, ok := getStringOrNull(tuple.Items[0]); ok {
if array2, ok := tuple.Items[1].Data.(*js_ast.EArray); ok {
references := make(map[string]bool, len(array2.Items))
for _, item2 := range array2.Items {
if reference, ok := getString(item2); ok {
references[reference] = true
}
}
data.fallbackExclusionList[ident] = references
}
}
}
}
}
}
if value, _, ok := getProperty(json, "fallbackPool"); ok {
if array, ok := value.Data.(*js_ast.EArray); ok {
data.fallbackPool = make(map[string]pnpIdentAndReference, len(array.Items))
for _, item := range array.Items {
if array2, ok := item.Data.(*js_ast.EArray); ok && len(array2.Items) == 2 {
if ident, ok := getString(array2.Items[0]); ok {
if dependencyTarget, ok := getDependencyTarget(array2.Items[1]); ok {
data.fallbackPool[ident] = dependencyTarget
}
}
}
}
}
}
if value, _, ok := getProperty(json, "ignorePatternData"); ok {
if ignorePatternData, ok := getString(value); ok {
// The Go regular expression engine doesn't support some of the features
// that JavaScript regular expressions support, including "(?!" negative
// lookaheads which Yarn uses. This is deliberate on Go's part. See this:
// https://github.com/golang/go/issues/18868.
//
// Yarn uses this feature to exclude the "." and ".." path segments in
// the middle of a relative path. However, we shouldn't ever generate
// such path segments in the first place. So as a hack, we just remove
// the specific character sequences used by Yarn for this so that the
// regular expression is more likely to be able to be compiled.
ignorePatternData = strings.ReplaceAll(ignorePatternData, `(?!\.)`, "")
ignorePatternData = strings.ReplaceAll(ignorePatternData, `(?!(?:^|\/)\.)`, "")
ignorePatternData = strings.ReplaceAll(ignorePatternData, `(?!\.{1,2}(?:\/|$))`, "")
ignorePatternData = strings.ReplaceAll(ignorePatternData, `(?!(?:^|\/)\.{1,2}(?:\/|$))`, "")
if reg, err := regexp.Compile(ignorePatternData); err == nil {
data.ignorePatternData = reg
} else {
data.invalidIgnorePatternData = ignorePatternData
}
}
}
if value, _, ok := getProperty(json, "packageRegistryData"); ok {
if array, ok := value.Data.(*js_ast.EArray); ok {
data.packageRegistryData = make(map[string]map[string]pnpPackage, len(array.Items))
data.packageLocatorsByLocations = make(map[string]pnpPackageLocatorByLocation)
for _, item := range array.Items {
if tuple, ok := item.Data.(*js_ast.EArray); ok && len(tuple.Items) == 2 {
if packageIdent, ok := getStringOrNull(tuple.Items[0]); ok {
if array2, ok := tuple.Items[1].Data.(*js_ast.EArray); ok {
references := make(map[string]pnpPackage, len(array2.Items))
data.packageRegistryData[packageIdent] = references
for _, item2 := range array2.Items {
if tuple2, ok := item2.Data.(*js_ast.EArray); ok && len(tuple2.Items) == 2 {
if packageReference, ok := getStringOrNull(tuple2.Items[0]); ok {
pkg := tuple2.Items[1]
if packageLocation, _, ok := getProperty(pkg, "packageLocation"); ok {
if packageDependencies, _, ok := getProperty(pkg, "packageDependencies"); ok {
if packageLocation, ok := getString(packageLocation); ok {
if array3, ok := packageDependencies.Data.(*js_ast.EArray); ok {
deps := make(map[string]pnpIdentAndReference, len(array3.Items))
discardFromLookup := false
for _, dep := range array3.Items {
if array4, ok := dep.Data.(*js_ast.EArray); ok && len(array4.Items) == 2 {
if ident, ok := getString(array4.Items[0]); ok {
if dependencyTarget, ok := getDependencyTarget(array4.Items[1]); ok {
deps[ident] = dependencyTarget
}
}
}
}
if value, _, ok := getProperty(pkg, "discardFromLookup"); ok {
if value, ok := getBool(value); ok {
discardFromLookup = value
}
}
references[packageReference] = pnpPackage{
packageLocation: packageLocation,
packageDependencies: deps,
packageDependenciesRange: logger.Range{
Loc: packageDependencies.Loc,
Len: array3.CloseBracketLoc.Start + 1 - packageDependencies.Loc.Start,
},
discardFromLookup: discardFromLookup,
}
// This is what Yarn's PnP implementation does (specifically in
// "hydrateRuntimeState"), so we replicate that behavior here:
if entry, ok := data.packageLocatorsByLocations[packageLocation]; !ok {
data.packageLocatorsByLocations[packageLocation] = pnpPackageLocatorByLocation{
locator: pnpIdentAndReference{ident: packageIdent, reference: packageReference},
discardFromLookup: discardFromLookup,
}
} else {
entry.discardFromLookup = entry.discardFromLookup && discardFromLookup
if !discardFromLookup {
entry.locator = pnpIdentAndReference{ident: packageIdent, reference: packageReference}
}
data.packageLocatorsByLocations[packageLocation] = entry
}
}
}
}
}
}
}
}
}
}
}
}
}
}
return &data
}
func getStringOrNull(json js_ast.Expr) (string, bool) {
switch value := json.Data.(type) {
case *js_ast.EString:
return helpers.UTF16ToString(value.Value), true
case *js_ast.ENull:
return "", true
}
return "", false
}
func getDependencyTarget(json js_ast.Expr) (pnpIdentAndReference, bool) {
switch d := json.Data.(type) {
case *js_ast.ENull:
return pnpIdentAndReference{span: logger.Range{Loc: json.Loc, Len: 4}}, true
case *js_ast.EString:
return pnpIdentAndReference{reference: helpers.UTF16ToString(d.Value), span: logger.Range{Loc: json.Loc}}, true
case *js_ast.EArray:
if len(d.Items) == 2 {
if name, ok := getString(d.Items[0]); ok {
if reference, ok := getString(d.Items[1]); ok {
return pnpIdentAndReference{
ident: name,
reference: reference,
span: logger.Range{Loc: json.Loc, Len: d.CloseBracketLoc.Start + 1 - json.Loc.Start},
}, true
}
}
}
}
return pnpIdentAndReference{}, false
}
type pnpDataMode uint8
const (
pnpIgnoreErrorsAboutMissingFiles pnpDataMode = iota
pnpReportErrorsAboutMissingFiles
)
func (r resolverQuery) extractYarnPnPDataFromJSON(pnpDataPath string, mode pnpDataMode) (result js_ast.Expr, source logger.Source) {
contents, err, originalError := r.caches.FSCache.ReadFile(r.fs, pnpDataPath)
if r.debugLogs != nil && originalError != nil {
r.debugLogs.addNote(fmt.Sprintf("Failed to read file %q: %s", pnpDataPath, originalError.Error()))
}
if err != nil {
if mode == pnpReportErrorsAboutMissingFiles || err != syscall.ENOENT {
prettyPaths := MakePrettyPaths(r.fs, logger.Path{Text: pnpDataPath, Namespace: "file"})
r.log.AddError(nil, logger.Range{}, fmt.Sprintf("Cannot read file %q: %s",
prettyPaths.Select(r.options.LogPathStyle), err.Error()))
}
return
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("The file %q exists", pnpDataPath))
}
keyPath := logger.Path{Text: pnpDataPath, Namespace: "file"}
source = logger.Source{
KeyPath: keyPath,
PrettyPaths: MakePrettyPaths(r.fs, keyPath),
Contents: contents,
}
result, _ = r.caches.JSONCache.Parse(r.log, source, js_parser.JSONOptions{})
return
}
func (r resolverQuery) tryToExtractYarnPnPDataFromJS(pnpDataPath string, mode pnpDataMode) (result js_ast.Expr, source logger.Source) {
contents, err, originalError := r.caches.FSCache.ReadFile(r.fs, pnpDataPath)
if r.debugLogs != nil && originalError != nil {
r.debugLogs.addNote(fmt.Sprintf("Failed to read file %q: %s", pnpDataPath, originalError.Error()))
}
if err != nil {
if mode == pnpReportErrorsAboutMissingFiles || err != syscall.ENOENT {
prettyPaths := MakePrettyPaths(r.fs, logger.Path{Text: pnpDataPath, Namespace: "file"})
r.log.AddError(nil, logger.Range{}, fmt.Sprintf("Cannot read file %q: %s",
prettyPaths.Select(r.options.LogPathStyle), err.Error()))
}
return
}
if r.debugLogs != nil {
r.debugLogs.addNote(fmt.Sprintf("The file %q exists", pnpDataPath))
}
keyPath := logger.Path{Text: pnpDataPath, Namespace: "file"}
source = logger.Source{
KeyPath: keyPath,
PrettyPaths: MakePrettyPaths(r.fs, keyPath),
Contents: contents,
}
ast, _ := r.caches.JSCache.Parse(r.log, source, js_parser.OptionsForYarnPnP())
if r.debugLogs != nil && ast.ManifestForYarnPnP.Data != nil {
r.debugLogs.addNote(fmt.Sprintf(" Extracted JSON data from %q", pnpDataPath))
}
return ast.ManifestForYarnPnP, source
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_color_spaces.go | internal/css_parser/css_color_spaces.go | package css_parser
import (
"math"
"github.com/evanw/esbuild/internal/helpers"
)
// Wrap float64 math to avoid compiler optimizations that break determinism
type F64 = helpers.F64
// Reference: https://drafts.csswg.org/css-color/#color-conversion-code
type colorSpace uint8
const (
colorSpace_a98_rgb colorSpace = iota
colorSpace_display_p3
colorSpace_hsl
colorSpace_hwb
colorSpace_lab
colorSpace_lch
colorSpace_oklab
colorSpace_oklch
colorSpace_prophoto_rgb
colorSpace_rec2020
colorSpace_srgb
colorSpace_srgb_linear
colorSpace_xyz
colorSpace_xyz_d50
colorSpace_xyz_d65
)
func (colorSpace colorSpace) isPolar() bool {
switch colorSpace {
case colorSpace_hsl, colorSpace_hwb, colorSpace_lch, colorSpace_oklch:
return true
}
return false
}
type hueMethod uint8
const (
shorterHue hueMethod = iota
longerHue
increasingHue
decreasingHue
)
func lin_srgb(r F64, g F64, b F64) (F64, F64, F64) {
f := func(val F64) F64 {
if abs := val.Abs(); abs.Value() < 0.04045 {
return val.DivConst(12.92)
} else {
return abs.AddConst(0.055).DivConst(1.055).PowConst(2.4).WithSignFrom(val)
}
}
return f(r), f(g), f(b)
}
func gam_srgb(r F64, g F64, b F64) (F64, F64, F64) {
f := func(val F64) F64 {
if abs := val.Abs(); abs.Value() > 0.0031308 {
return abs.PowConst(1 / 2.4).MulConst(1.055).SubConst(0.055).WithSignFrom(val)
} else {
return val.MulConst(12.92)
}
}
return f(r), f(g), f(b)
}
func lin_srgb_to_xyz(r F64, g F64, b F64) (F64, F64, F64) {
M := [9]float64{
506752.0 / 1228815, 87881.0 / 245763, 12673.0 / 70218,
87098.0 / 409605, 175762.0 / 245763, 12673.0 / 175545,
7918.0 / 409605, 87881.0 / 737289, 1001167.0 / 1053270,
}
return multiplyMatrices(M, r, g, b)
}
func xyz_to_lin_srgb(x F64, y F64, z F64) (F64, F64, F64) {
M := [9]float64{
12831.0 / 3959, -329.0 / 214, -1974.0 / 3959,
-851781.0 / 878810, 1648619.0 / 878810, 36519.0 / 878810,
705.0 / 12673, -2585.0 / 12673, 705.0 / 667,
}
return multiplyMatrices(M, x, y, z)
}
func lin_p3(r F64, g F64, b F64) (F64, F64, F64) {
return lin_srgb(r, g, b)
}
func gam_p3(r F64, g F64, b F64) (F64, F64, F64) {
return gam_srgb(r, g, b)
}
func lin_p3_to_xyz(r F64, g F64, b F64) (F64, F64, F64) {
M := [9]float64{
608311.0 / 1250200, 189793.0 / 714400, 198249.0 / 1000160,
35783.0 / 156275, 247089.0 / 357200, 198249.0 / 2500400,
0.0 / 1, 32229.0 / 714400, 5220557.0 / 5000800,
}
return multiplyMatrices(M, r, g, b)
}
func xyz_to_lin_p3(x F64, y F64, z F64) (F64, F64, F64) {
M := [9]float64{
446124.0 / 178915, -333277.0 / 357830, -72051.0 / 178915,
-14852.0 / 17905, 63121.0 / 35810, 423.0 / 17905,
11844.0 / 330415, -50337.0 / 660830, 316169.0 / 330415,
}
return multiplyMatrices(M, x, y, z)
}
func lin_prophoto(r F64, g F64, b F64) (F64, F64, F64) {
f := func(val F64) F64 {
const Et2 = 16.0 / 512
if abs := val.Abs(); abs.Value() <= Et2 {
return val.DivConst(16)
} else {
return abs.PowConst(1.8).WithSignFrom(val)
}
}
return f(r), f(g), f(b)
}
func gam_prophoto(r F64, g F64, b F64) (F64, F64, F64) {
f := func(val F64) F64 {
const Et = 1.0 / 512
if abs := val.Abs(); abs.Value() >= Et {
return abs.PowConst(1 / 1.8).WithSignFrom(val)
} else {
return val.MulConst(16)
}
}
return f(r), f(g), f(b)
}
func lin_prophoto_to_xyz(r F64, g F64, b F64) (F64, F64, F64) {
M := [9]float64{
0.7977604896723027, 0.13518583717574031, 0.0313493495815248,
0.2880711282292934, 0.7118432178101014, 0.00008565396060525902,
0.0, 0.0, 0.8251046025104601,
}
return multiplyMatrices(M, r, g, b)
}
func xyz_to_lin_prophoto(x F64, y F64, z F64) (F64, F64, F64) {
M := [9]float64{
1.3457989731028281, -0.25558010007997534, -0.05110628506753401,
-0.5446224939028347, 1.5082327413132781, 0.02053603239147973,
0.0, 0.0, 1.2119675456389454,
}
return multiplyMatrices(M, x, y, z)
}
func lin_a98rgb(r F64, g F64, b F64) (F64, F64, F64) {
f := func(val F64) F64 {
return val.Abs().PowConst(563.0 / 256).WithSignFrom(val)
}
return f(r), f(g), f(b)
}
func gam_a98rgb(r F64, g F64, b F64) (F64, F64, F64) {
f := func(val F64) F64 {
return val.Abs().PowConst(256.0 / 563).WithSignFrom(val)
}
return f(r), f(g), f(b)
}
func lin_a98rgb_to_xyz(r F64, g F64, b F64) (F64, F64, F64) {
M := [9]float64{
573536.0 / 994567, 263643.0 / 1420810, 187206.0 / 994567,
591459.0 / 1989134, 6239551.0 / 9945670, 374412.0 / 4972835,
53769.0 / 1989134, 351524.0 / 4972835, 4929758.0 / 4972835,
}
return multiplyMatrices(M, r, g, b)
}
func xyz_to_lin_a98rgb(x F64, y F64, z F64) (F64, F64, F64) {
M := [9]float64{
1829569.0 / 896150, -506331.0 / 896150, -308931.0 / 896150,
-851781.0 / 878810, 1648619.0 / 878810, 36519.0 / 878810,
16779.0 / 1248040, -147721.0 / 1248040, 1266979.0 / 1248040,
}
return multiplyMatrices(M, x, y, z)
}
func lin_2020(r F64, g F64, b F64) (F64, F64, F64) {
f := func(val F64) F64 {
const α = 1.09929682680944
const β = 0.018053968510807
if abs := val.Abs(); abs.Value() < β*4.5 {
return val.DivConst(4.5)
} else {
return abs.AddConst(α - 1).DivConst(α).PowConst(1 / 0.45).WithSignFrom(val)
}
}
return f(r), f(g), f(b)
}
func gam_2020(r F64, g F64, b F64) (F64, F64, F64) {
f := func(val F64) F64 {
const α = 1.09929682680944
const β = 0.018053968510807
if abs := val.Abs(); abs.Value() > β {
return abs.PowConst(0.45).MulConst(α).SubConst(α - 1).WithSignFrom(val)
} else {
return val.MulConst(4.5)
}
}
return f(r), f(g), f(b)
}
func lin_2020_to_xyz(r F64, g F64, b F64) (F64, F64, F64) {
var M = [9]float64{
63426534.0 / 99577255, 20160776.0 / 139408157, 47086771.0 / 278816314,
26158966.0 / 99577255, 472592308.0 / 697040785, 8267143.0 / 139408157,
0.0 / 1, 19567812.0 / 697040785, 295819943.0 / 278816314,
}
return multiplyMatrices(M, r, g, b)
}
func xyz_to_lin_2020(x F64, y F64, z F64) (F64, F64, F64) {
M := [9]float64{
30757411.0 / 17917100, -6372589.0 / 17917100, -4539589.0 / 17917100,
-19765991.0 / 29648200, 47925759.0 / 29648200, 467509.0 / 29648200,
792561.0 / 44930125, -1921689.0 / 44930125, 42328811.0 / 44930125,
}
return multiplyMatrices(M, x, y, z)
}
func d65_to_d50(x F64, y F64, z F64) (F64, F64, F64) {
M := [9]float64{
1.0479297925449969, 0.022946870601609652, -0.05019226628920524,
0.02962780877005599, 0.9904344267538799, -0.017073799063418826,
-0.009243040646204504, 0.015055191490298152, 0.7518742814281371,
}
return multiplyMatrices(M, x, y, z)
}
func d50_to_d65(x F64, y F64, z F64) (F64, F64, F64) {
M := [9]float64{
0.955473421488075, -0.02309845494876471, 0.06325924320057072,
-0.0283697093338637, 1.0099953980813041, 0.021041441191917323,
0.012314014864481998, -0.020507649298898964, 1.330365926242124,
}
return multiplyMatrices(M, x, y, z)
}
const d50_x = 0.3457 / 0.3585
const d50_z = (1.0 - 0.3457 - 0.3585) / 0.3585
func xyz_to_lab(x F64, y F64, z F64) (F64, F64, F64) {
const ε = 216.0 / 24389
const κ = 24389.0 / 27
x = x.DivConst(d50_x)
z = z.DivConst(d50_z)
var f0, f1, f2 F64
if x.Value() > ε {
f0 = x.Cbrt()
} else {
f0 = x.MulConst(κ).AddConst(16).DivConst(116)
}
if y.Value() > ε {
f1 = y.Cbrt()
} else {
f1 = y.MulConst(κ).AddConst(16).DivConst(116)
}
if z.Value() > ε {
f2 = z.Cbrt()
} else {
f2 = z.MulConst(κ).AddConst(16).DivConst(116)
}
return f1.MulConst(116).SubConst(16),
f0.Sub(f1).MulConst(500),
f1.Sub(f2).MulConst(200)
}
func lab_to_xyz(l F64, a F64, b F64) (x F64, y F64, z F64) {
const κ = 24389.0 / 27
const ε = 216.0 / 24389
f1 := l.AddConst(16).DivConst(116)
f0 := a.DivConst(500).Add(f1)
f2 := f1.Sub(b.DivConst(200))
f0_3 := f0.Cubed()
f2_3 := f2.Cubed()
if f0_3.Value() > ε {
x = f0_3
} else {
x = f0.MulConst(116).SubConst(16).DivConst(κ)
}
if l.Value() > κ*ε {
y = l.AddConst(16).DivConst(116)
y = y.Cubed()
} else {
y = l.DivConst(κ)
}
if f2_3.Value() > ε {
z = f2_3
} else {
z = f2.MulConst(116).SubConst(16).DivConst(κ)
}
return x.MulConst(d50_x), y, z.MulConst(d50_z)
}
func lab_to_lch(l F64, a F64, b F64) (F64, F64, F64) {
hue := b.Atan2(a).MulConst(180 / math.Pi)
if hue.Value() < 0 {
hue = hue.AddConst(360)
}
return l,
a.Squared().Add(b.Squared()).Sqrt(),
hue
}
func lch_to_lab(l F64, c F64, h F64) (F64, F64, F64) {
return l,
h.MulConst(math.Pi / 180).Cos().Mul(c),
h.MulConst(math.Pi / 180).Sin().Mul(c)
}
func xyz_to_oklab(x F64, y F64, z F64) (F64, F64, F64) {
XYZtoLMS := [9]float64{
0.8190224432164319, 0.3619062562801221, -0.12887378261216414,
0.0329836671980271, 0.9292868468965546, 0.03614466816999844,
0.048177199566046255, 0.26423952494422764, 0.6335478258136937,
}
LMStoOKLab := [9]float64{
0.2104542553, 0.7936177850, -0.0040720468,
1.9779984951, -2.4285922050, 0.4505937099,
0.0259040371, 0.7827717662, -0.8086757660,
}
l, m, s := multiplyMatrices(XYZtoLMS, x, y, z)
return multiplyMatrices(LMStoOKLab, l.Cbrt(), m.Cbrt(), s.Cbrt())
}
func oklab_to_xyz(l F64, a F64, b F64) (F64, F64, F64) {
LMStoXYZ := [9]float64{
1.2268798733741557, -0.5578149965554813, 0.28139105017721583,
-0.04057576262431372, 1.1122868293970594, -0.07171106666151701,
-0.07637294974672142, -0.4214933239627914, 1.5869240244272418,
}
OKLabtoLMS := [9]float64{
0.99999999845051981432, 0.39633779217376785678, 0.21580375806075880339,
1.0000000088817607767, -0.1055613423236563494, -0.063854174771705903402,
1.0000000546724109177, -0.089484182094965759684, -1.2914855378640917399,
}
l, m, s := multiplyMatrices(OKLabtoLMS, l, a, b)
return multiplyMatrices(LMStoXYZ, l.Cubed(), m.Cubed(), s.Cubed())
}
func oklab_to_oklch(l F64, a F64, b F64) (F64, F64, F64) {
return lab_to_lch(l, a, b)
}
func oklch_to_oklab(l F64, c F64, h F64) (F64, F64, F64) {
return lch_to_lab(l, c, h)
}
func multiplyMatrices(A [9]float64, b0 F64, b1 F64, b2 F64) (F64, F64, F64) {
return b0.MulConst(A[0]).Add(b1.MulConst(A[1])).Add(b2.MulConst(A[2])),
b0.MulConst(A[3]).Add(b1.MulConst(A[4])).Add(b2.MulConst(A[5])),
b0.MulConst(A[6]).Add(b1.MulConst(A[7])).Add(b2.MulConst(A[8]))
}
func delta_eok(L1 F64, a1 F64, b1 F64, L2 F64, a2 F64, b2 F64) F64 {
ΔL_sq := L1.Sub(L2).Squared()
Δa_sq := a1.Sub(a2).Squared()
Δb_sq := b1.Sub(b2).Squared()
return ΔL_sq.Add(Δa_sq).Add(Δb_sq).Sqrt()
}
func gamut_mapping_xyz_to_srgb(x F64, y F64, z F64) (F64, F64, F64) {
origin_l, origin_c, origin_h := oklab_to_oklch(xyz_to_oklab(x, y, z))
if origin_l.Value() >= 1 || origin_l.Value() <= 0 {
return origin_l, origin_l, origin_l
}
oklch_to_srgb := func(l F64, c F64, h F64) (F64, F64, F64) {
l, a, b := oklch_to_oklab(l, c, h)
x, y, z := oklab_to_xyz(l, a, b)
r, g, b := xyz_to_lin_srgb(x, y, z)
return gam_srgb(r, g, b)
}
srgb_to_oklab := func(r F64, g F64, b F64) (F64, F64, F64) {
r, g, b = lin_srgb(r, g, b)
x, y, z := lin_srgb_to_xyz(r, g, b)
return xyz_to_oklab(x, y, z)
}
inGamut := func(r F64, g F64, b F64) bool {
return r.Value() >= 0 && r.Value() <= 1 &&
g.Value() >= 0 && g.Value() <= 1 &&
b.Value() >= 0 && b.Value() <= 1
}
r, g, b := oklch_to_srgb(origin_l, origin_c, origin_h)
if inGamut(r, g, b) {
return r, g, b
}
const JND = 0.02
const epsilon = 0.0001
min := helpers.NewF64(0.0)
max := origin_c
clip := func(x F64) F64 {
if x.Value() < 0 {
return helpers.NewF64(0)
}
if x.Value() > 1 {
return helpers.NewF64(1)
}
return x
}
for max.Sub(min).Value() > epsilon {
chroma := min.Add(max).DivConst(2)
origin_c = chroma
r, g, b = oklch_to_srgb(origin_l, origin_c, origin_h)
if inGamut(r, g, b) {
min = chroma
continue
}
clipped_r, clipped_g, clipped_b := clip(r), clip(g), clip(b)
L1, a1, b1 := srgb_to_oklab(clipped_r, clipped_b, clipped_g)
L2, a2, b2 := srgb_to_oklab(r, g, b)
E := delta_eok(L1, a1, b1, L2, a2, b2)
if E.Value() < JND {
return clipped_r, clipped_g, clipped_b
}
max = chroma
}
return r, g, b
}
func hsl_to_rgb(hue F64, sat F64, light F64) (F64, F64, F64) {
hue = hue.DivConst(360)
hue = hue.Sub(hue.Floor())
hue = hue.MulConst(360)
sat = sat.DivConst(100)
light = light.DivConst(100)
f := func(n float64) F64 {
k := hue.DivConst(30).AddConst(n)
k = k.DivConst(12)
k = k.Sub(k.Floor())
k = k.MulConst(12)
a := helpers.Min2(light, light.Neg().AddConst(1)).Mul(sat)
return light.Sub(helpers.Max2(helpers.NewF64(-1), helpers.Min3(k.SubConst(3), k.Neg().AddConst(9), helpers.NewF64(1))).Mul(a))
}
return f(0), f(8), f(4)
}
func rgb_to_hsl(red F64, green F64, blue F64) (F64, F64, F64) {
max := helpers.Max3(red, green, blue)
min := helpers.Min3(red, green, blue)
hue, sat, light := helpers.NewF64(math.NaN()), helpers.NewF64(0.0), min.Add(max).DivConst(2)
d := max.Sub(min)
if d.Value() != 0 {
if div := helpers.Min2(light, light.Neg().AddConst(1)); div.Value() != 0 {
sat = max.Sub(light).Div(div)
}
switch max {
case red:
hue = green.Sub(blue).Div(d)
if green.Value() < blue.Value() {
hue = hue.AddConst(6)
}
case green:
hue = blue.Sub(red).Div(d).AddConst(2)
case blue:
hue = red.Sub(green).Div(d).AddConst(4)
}
hue = hue.MulConst(60)
}
return hue, sat.MulConst(100), light.MulConst(100)
}
func hwb_to_rgb(hue F64, white F64, black F64) (F64, F64, F64) {
white = white.DivConst(100)
black = black.DivConst(100)
if white.Add(black).Value() >= 1 {
gray := white.Div(white.Add(black))
return gray, gray, gray
}
delta := white.Add(black).Neg().AddConst(1)
r, g, b := hsl_to_rgb(hue, helpers.NewF64(100), helpers.NewF64(50))
r = delta.Mul(r).Add(white)
g = delta.Mul(g).Add(white)
b = delta.Mul(b).Add(white)
return r, g, b
}
func rgb_to_hwb(red F64, green F64, blue F64) (F64, F64, F64) {
h, _, _ := rgb_to_hsl(red, green, blue)
white := helpers.Min3(red, green, blue)
black := helpers.Max3(red, green, blue).Neg().AddConst(1)
return h, white.MulConst(100), black.MulConst(100)
}
func xyz_to_colorSpace(x F64, y F64, z F64, colorSpace colorSpace) (F64, F64, F64) {
switch colorSpace {
case colorSpace_a98_rgb:
return gam_a98rgb(xyz_to_lin_a98rgb(x, y, z))
case colorSpace_display_p3:
return gam_p3(xyz_to_lin_p3(x, y, z))
case colorSpace_hsl:
return rgb_to_hsl(gam_srgb(xyz_to_lin_srgb(x, y, z)))
case colorSpace_hwb:
return rgb_to_hwb(gam_srgb(xyz_to_lin_srgb(x, y, z)))
case colorSpace_lab:
return xyz_to_lab(d65_to_d50(x, y, z))
case colorSpace_lch:
return lab_to_lch(xyz_to_lab(d65_to_d50(x, y, z)))
case colorSpace_oklab:
return xyz_to_oklab(x, y, z)
case colorSpace_oklch:
return oklab_to_oklch(xyz_to_oklab(x, y, z))
case colorSpace_prophoto_rgb:
return gam_prophoto(xyz_to_lin_prophoto(d65_to_d50(x, y, z)))
case colorSpace_rec2020:
return gam_2020(xyz_to_lin_2020(x, y, z))
case colorSpace_srgb:
return gam_srgb(xyz_to_lin_srgb(x, y, z))
case colorSpace_srgb_linear:
return xyz_to_lin_srgb(x, y, z)
case colorSpace_xyz, colorSpace_xyz_d65:
return x, y, z
case colorSpace_xyz_d50:
return d65_to_d50(x, y, z)
default:
panic("Internal error")
}
}
func colorSpace_to_xyz(v0 F64, v1 F64, v2 F64, colorSpace colorSpace) (F64, F64, F64) {
switch colorSpace {
case colorSpace_a98_rgb:
return lin_a98rgb_to_xyz(lin_a98rgb(v0, v1, v2))
case colorSpace_display_p3:
return lin_p3_to_xyz(lin_p3(v0, v1, v2))
case colorSpace_hsl:
return lin_srgb_to_xyz(lin_srgb(hsl_to_rgb(v0, v1, v2)))
case colorSpace_hwb:
return lin_srgb_to_xyz(lin_srgb(hwb_to_rgb(v0, v1, v2)))
case colorSpace_lab:
return d50_to_d65(lab_to_xyz(v0, v1, v2))
case colorSpace_lch:
return d50_to_d65(lab_to_xyz(lch_to_lab(v0, v1, v2)))
case colorSpace_oklab:
return oklab_to_xyz(v0, v1, v2)
case colorSpace_oklch:
return oklab_to_xyz(oklch_to_oklab(v0, v1, v2))
case colorSpace_prophoto_rgb:
return d50_to_d65(lin_prophoto_to_xyz(lin_prophoto(v0, v1, v2)))
case colorSpace_rec2020:
return lin_2020_to_xyz(lin_2020(v0, v1, v2))
case colorSpace_srgb:
return lin_srgb_to_xyz(lin_srgb(v0, v1, v2))
case colorSpace_srgb_linear:
return lin_srgb_to_xyz(v0, v1, v2)
case colorSpace_xyz, colorSpace_xyz_d65:
return v0, v1, v2
case colorSpace_xyz_d50:
return d50_to_d65(v0, v1, v2)
default:
panic("Internal error")
}
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls_gradient.go | internal/css_parser/css_decls_gradient.go | package css_parser
import (
"fmt"
"math"
"strconv"
"strings"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/logger"
)
type gradientKind uint8
const (
linearGradient gradientKind = iota
radialGradient
conicGradient
)
type parsedGradient struct {
leadingTokens []css_ast.Token
colorStops []colorStop
kind gradientKind
repeating bool
}
type colorStop struct {
positions []css_ast.Token
color css_ast.Token
midpoint css_ast.Token // Absent if "midpoint.Kind == css_lexer.T(0)"
}
func parseGradient(token css_ast.Token) (gradient parsedGradient, success bool) {
if token.Kind != css_lexer.TFunction {
return
}
switch strings.ToLower(token.Text) {
case "linear-gradient":
gradient.kind = linearGradient
case "radial-gradient":
gradient.kind = radialGradient
case "conic-gradient":
gradient.kind = conicGradient
case "repeating-linear-gradient":
gradient.kind = linearGradient
gradient.repeating = true
case "repeating-radial-gradient":
gradient.kind = radialGradient
gradient.repeating = true
case "repeating-conic-gradient":
gradient.kind = conicGradient
gradient.repeating = true
default:
return
}
// Bail if any token is a "var()" since it may introduce commas
tokens := *token.Children
for _, t := range tokens {
if t.Kind == css_lexer.TFunction && strings.EqualFold(t.Text, "var") {
return
}
}
// Try to strip the initial tokens
if len(tokens) > 0 && !looksLikeColor(tokens[0]) {
i := 0
for i < len(tokens) && tokens[i].Kind != css_lexer.TComma {
i++
}
gradient.leadingTokens = tokens[:i]
if i < len(tokens) {
tokens = tokens[i+1:]
} else {
tokens = nil
}
}
// Try to parse the color stops
for len(tokens) > 0 {
// Parse the color
color := tokens[0]
if !looksLikeColor(color) {
return
}
tokens = tokens[1:]
// Parse up to two positions
var positions []css_ast.Token
for len(positions) < 2 && len(tokens) > 0 {
position := tokens[0]
if position.Kind.IsNumeric() || (position.Kind == css_lexer.TFunction && strings.EqualFold(position.Text, "calc")) {
positions = append(positions, position)
} else {
break
}
tokens = tokens[1:]
}
// Parse the comma
var midpoint css_ast.Token
if len(tokens) > 0 {
if tokens[0].Kind != css_lexer.TComma {
return
}
tokens = tokens[1:]
if len(tokens) == 0 {
return
}
// Parse the midpoint, if any
if len(tokens) > 0 && tokens[0].Kind.IsNumeric() {
midpoint = tokens[0]
tokens = tokens[1:]
// Followed by a mandatory comma
if len(tokens) == 0 || tokens[0].Kind != css_lexer.TComma {
return
}
tokens = tokens[1:]
}
}
// Add the color stop
gradient.colorStops = append(gradient.colorStops, colorStop{
color: color,
positions: positions,
midpoint: midpoint,
})
}
success = true
return
}
func (p *parser) generateGradient(token css_ast.Token, gradient parsedGradient) css_ast.Token {
var children []css_ast.Token
commaToken := p.commaToken(token.Loc)
children = append(children, gradient.leadingTokens...)
for _, stop := range gradient.colorStops {
if len(children) > 0 {
children = append(children, commaToken)
}
if len(stop.positions) == 0 && stop.midpoint.Kind == css_lexer.T(0) {
stop.color.Whitespace &= ^css_ast.WhitespaceAfter
}
children = append(children, stop.color)
children = append(children, stop.positions...)
if stop.midpoint.Kind != css_lexer.T(0) {
children = append(children, commaToken, stop.midpoint)
}
}
token.Children = &children
return token
}
func (p *parser) lowerAndMinifyGradient(token css_ast.Token, wouldClipColor *bool) css_ast.Token {
gradient, ok := parseGradient(token)
if !ok {
return token
}
lowerMidpoints := p.options.unsupportedCSSFeatures.Has(compat.GradientMidpoints)
lowerColorSpaces := p.options.unsupportedCSSFeatures.Has(compat.ColorFunctions)
lowerInterpolation := p.options.unsupportedCSSFeatures.Has(compat.GradientInterpolation)
// Assume that if the browser doesn't support color spaces in gradients, then
// it doesn't correctly interpolate non-sRGB colors even when a color space
// is not specified. This is the case for Firefox 120, for example, which has
// support for the "color()" syntax but not for color spaces in gradients.
// There is no entry in our feature support matrix for this edge case so we
// make this assumption instead.
//
// Note that this edge case means we have to _replace_ the original gradient
// with the expanded one instead of inserting a fallback before it. Otherwise
// Firefox 120 would use the original gradient instead of the fallback because
// it supports the syntax, but just renders it incorrectly.
if lowerInterpolation {
lowerColorSpaces = true
}
// Potentially expand the gradient to handle unsupported features
didExpand := false
if lowerMidpoints || lowerColorSpaces || lowerInterpolation {
if colorStops, ok := tryToParseColorStops(gradient); ok {
hasColorSpace := false
hasMidpoint := false
for _, stop := range colorStops {
if stop.hasColorSpace {
hasColorSpace = true
}
if stop.midpoint != nil {
hasMidpoint = true
}
}
remaining, colorSpace, hueMethod, hasInterpolation := removeColorInterpolation(gradient.leadingTokens)
if (hasInterpolation && lowerInterpolation) || (hasColorSpace && lowerColorSpaces) || (hasMidpoint && lowerMidpoints) {
if hasInterpolation {
tryToExpandGradient(token.Loc, &gradient, colorStops, remaining, colorSpace, hueMethod)
} else {
if hasColorSpace {
colorSpace = colorSpace_oklab
} else {
colorSpace = colorSpace_srgb
}
tryToExpandGradient(token.Loc, &gradient, colorStops, gradient.leadingTokens, colorSpace, shorterHue)
}
didExpand = true
}
}
}
// Lower all colors in the gradient stop
for i, stop := range gradient.colorStops {
gradient.colorStops[i].color = p.lowerAndMinifyColor(stop.color, wouldClipColor)
}
if p.options.unsupportedCSSFeatures.Has(compat.GradientDoublePosition) {
// Replace double positions with duplicated single positions
for _, stop := range gradient.colorStops {
if len(stop.positions) > 1 {
gradient.colorStops = switchToSinglePositions(gradient.colorStops)
break
}
}
} else if p.options.minifySyntax {
// Replace duplicated single positions with double positions
for i, stop := range gradient.colorStops {
if i > 0 && len(stop.positions) == 1 {
if prev := gradient.colorStops[i-1]; len(prev.positions) == 1 && prev.midpoint.Kind == css_lexer.T(0) &&
css_ast.TokensEqual([]css_ast.Token{prev.color}, []css_ast.Token{stop.color}, nil) {
gradient.colorStops = switchToDoublePositions(gradient.colorStops)
break
}
}
}
}
if p.options.minifySyntax || didExpand {
gradient.colorStops = removeImpliedPositions(gradient.kind, gradient.colorStops)
}
return p.generateGradient(token, gradient)
}
func removeImpliedPositions(kind gradientKind, colorStops []colorStop) []colorStop {
if len(colorStops) == 0 {
return colorStops
}
positions := make([]valueWithUnit, len(colorStops))
for i, stop := range colorStops {
if len(stop.positions) == 1 {
if pos, ok := tryToParseValue(stop.positions[0], kind); ok {
positions[i] = pos
continue
}
}
positions[i].value = helpers.NewF64(math.NaN())
}
start := 0
for start < len(colorStops) {
if startPos := positions[start]; !startPos.value.IsNaN() {
end := start + 1
run:
for colorStops[end-1].midpoint.Kind == css_lexer.T(0) && end < len(colorStops) {
endPos := positions[end]
if endPos.value.IsNaN() || endPos.unit != startPos.unit {
break
}
// Check that all values in this run are implied. Interpolation is done
// using the start and end positions instead of the first and second
// positions because it's more accurate.
for i := start + 1; i < end; i++ {
t := helpers.NewF64(float64(i - start)).DivConst(float64(end - start))
impliedValue := helpers.Lerp(startPos.value, endPos.value, t)
if positions[i].value.Sub(impliedValue).Abs().Value() > 0.01 {
break run
}
}
end++
}
// Clear out all implied values
if end-start > 1 {
for i := start + 1; i+1 < end; i++ {
colorStops[i].positions = nil
}
start = end - 1
continue
}
}
start++
}
if first := colorStops[0].positions; len(first) == 1 &&
((first[0].Kind == css_lexer.TPercentage && first[0].PercentageValue() == "0") ||
(first[0].Kind == css_lexer.TDimension && first[0].DimensionValue() == "0")) {
colorStops[0].positions = nil
}
if last := colorStops[len(colorStops)-1].positions; len(last) == 1 &&
last[0].Kind == css_lexer.TPercentage && last[0].PercentageValue() == "100" {
colorStops[len(colorStops)-1].positions = nil
}
return colorStops
}
func switchToSinglePositions(double []colorStop) (single []colorStop) {
for _, stop := range double {
for i := range stop.positions {
stop.positions[i].Whitespace = css_ast.WhitespaceBefore
}
for len(stop.positions) > 1 {
clone := stop
clone.positions = stop.positions[:1]
clone.midpoint = css_ast.Token{}
single = append(single, clone)
stop.positions = stop.positions[1:]
}
single = append(single, stop)
}
return
}
func switchToDoublePositions(single []colorStop) (double []colorStop) {
for i := 0; i < len(single); i++ {
stop := single[i]
if i+1 < len(single) && len(stop.positions) == 1 && stop.midpoint.Kind == css_lexer.T(0) {
if next := single[i+1]; len(next.positions) == 1 &&
css_ast.TokensEqual([]css_ast.Token{stop.color}, []css_ast.Token{next.color}, nil) {
double = append(double, colorStop{
color: stop.color,
positions: []css_ast.Token{stop.positions[0], next.positions[0]},
midpoint: next.midpoint,
})
i++
continue
}
}
double = append(double, stop)
}
return
}
func removeColorInterpolation(tokens []css_ast.Token) ([]css_ast.Token, colorSpace, hueMethod, bool) {
for i := 0; i+1 < len(tokens); i++ {
if in := tokens[i]; in.Kind == css_lexer.TIdent && strings.EqualFold(in.Text, "in") {
if space := tokens[i+1]; space.Kind == css_lexer.TIdent {
var colorSpace colorSpace
hueMethod := shorterHue
start := i
end := i + 2
// Parse the color space
switch strings.ToLower(space.Text) {
case "a98-rgb":
colorSpace = colorSpace_a98_rgb
case "display-p3":
colorSpace = colorSpace_display_p3
case "hsl":
colorSpace = colorSpace_hsl
case "hwb":
colorSpace = colorSpace_hwb
case "lab":
colorSpace = colorSpace_lab
case "lch":
colorSpace = colorSpace_lch
case "oklab":
colorSpace = colorSpace_oklab
case "oklch":
colorSpace = colorSpace_oklch
case "prophoto-rgb":
colorSpace = colorSpace_prophoto_rgb
case "rec2020":
colorSpace = colorSpace_rec2020
case "srgb":
colorSpace = colorSpace_srgb
case "srgb-linear":
colorSpace = colorSpace_srgb_linear
case "xyz":
colorSpace = colorSpace_xyz
case "xyz-d50":
colorSpace = colorSpace_xyz_d50
case "xyz-d65":
colorSpace = colorSpace_xyz_d65
default:
return nil, 0, 0, false
}
// Parse the optional hue mode for polar color spaces
if colorSpace.isPolar() && i+3 < len(tokens) {
if hue := tokens[i+3]; hue.Kind == css_lexer.TIdent && strings.EqualFold(hue.Text, "hue") {
if method := tokens[i+2]; method.Kind == css_lexer.TIdent {
switch strings.ToLower(method.Text) {
case "shorter":
hueMethod = shorterHue
case "longer":
hueMethod = longerHue
case "increasing":
hueMethod = increasingHue
case "decreasing":
hueMethod = decreasingHue
default:
return nil, 0, 0, false
}
end = i + 4
}
}
}
// Remove all parsed tokens
remaining := append(append([]css_ast.Token{}, tokens[:start]...), tokens[end:]...)
if n := len(remaining); n > 0 {
remaining[0].Whitespace &= ^css_ast.WhitespaceBefore
remaining[n-1].Whitespace &= ^css_ast.WhitespaceAfter
}
return remaining, colorSpace, hueMethod, true
}
}
}
return nil, 0, 0, false
}
type valueWithUnit struct {
unit string
value F64
}
type parsedColorStop struct {
// Position information (may be a sum of two different units)
positionTerms []valueWithUnit
// Color midpoint (a.k.a. transition hint) information
midpoint *valueWithUnit
// Non-premultiplied color information in XYZ space
x, y, z, alpha F64
// Non-premultiplied color information in sRGB space
r, g, b F64
// Premultiplied color information in the interpolation color space
v0, v1, v2 F64
// True if the original color has a color space
hasColorSpace bool
}
func tryToParseColorStops(gradient parsedGradient) ([]parsedColorStop, bool) {
var colorStops []parsedColorStop
for _, stop := range gradient.colorStops {
color, ok := parseColor(stop.color)
if !ok {
return nil, false
}
var r, g, b F64
if !color.hasColorSpace {
r = helpers.NewF64(float64(hexR(color.hex))).DivConst(255)
g = helpers.NewF64(float64(hexG(color.hex))).DivConst(255)
b = helpers.NewF64(float64(hexB(color.hex))).DivConst(255)
color.x, color.y, color.z = lin_srgb_to_xyz(lin_srgb(r, g, b))
} else {
r, g, b = gam_srgb(xyz_to_lin_srgb(color.x, color.y, color.z))
}
parsedStop := parsedColorStop{
x: color.x,
y: color.y,
z: color.z,
r: r,
g: g,
b: b,
alpha: helpers.NewF64(float64(hexA(color.hex))).DivConst(255),
hasColorSpace: color.hasColorSpace,
}
for i, position := range stop.positions {
if position, ok := tryToParseValue(position, gradient.kind); ok {
parsedStop.positionTerms = []valueWithUnit{position}
} else {
return nil, false
}
// Expand double positions
if i+1 < len(stop.positions) {
colorStops = append(colorStops, parsedStop)
}
}
if stop.midpoint.Kind != css_lexer.T(0) {
if midpoint, ok := tryToParseValue(stop.midpoint, gradient.kind); ok {
parsedStop.midpoint = &midpoint
} else {
return nil, false
}
}
colorStops = append(colorStops, parsedStop)
}
// Automatically fill in missing positions
if len(colorStops) > 0 {
type stopInfo struct {
fromPos valueWithUnit
toPos valueWithUnit
fromCount int32
toCount int32
}
// Fill in missing positions for the endpoints first
if first := &colorStops[0]; len(first.positionTerms) == 0 {
first.positionTerms = []valueWithUnit{{value: helpers.NewF64(0), unit: "%"}}
}
if last := &colorStops[len(colorStops)-1]; len(last.positionTerms) == 0 {
last.positionTerms = []valueWithUnit{{value: helpers.NewF64(100), unit: "%"}}
}
// Set all positions to be greater than the position before them
for i, stop := range colorStops {
var prevPos valueWithUnit
for j := i - 1; j >= 0; j-- {
prev := colorStops[j]
if prev.midpoint != nil {
prevPos = *prev.midpoint
break
}
if len(prev.positionTerms) == 1 {
prevPos = prev.positionTerms[0]
break
}
}
if len(stop.positionTerms) == 1 {
if prevPos.unit == stop.positionTerms[0].unit {
stop.positionTerms[0].value = helpers.Max2(prevPos.value, stop.positionTerms[0].value)
}
prevPos = stop.positionTerms[0]
}
if stop.midpoint != nil && prevPos.unit == stop.midpoint.unit {
stop.midpoint.value = helpers.Max2(prevPos.value, stop.midpoint.value)
}
}
// Scan over all other stops with missing positions
infos := make([]stopInfo, len(colorStops))
for i, stop := range colorStops {
if len(stop.positionTerms) == 1 {
continue
}
info := &infos[i]
// Scan backward
for from := i - 1; from >= 0; from-- {
fromStop := colorStops[from]
info.fromCount++
if fromStop.midpoint != nil {
info.fromPos = *fromStop.midpoint
break
}
if len(fromStop.positionTerms) == 1 {
info.fromPos = fromStop.positionTerms[0]
break
}
}
// Scan forward
for to := i; to < len(colorStops); to++ {
info.toCount++
if toStop := colorStops[to]; toStop.midpoint != nil {
info.toPos = *toStop.midpoint
break
}
if to+1 < len(colorStops) {
if toStop := colorStops[to+1]; len(toStop.positionTerms) == 1 {
info.toPos = toStop.positionTerms[0]
break
}
}
}
}
// Then fill in all other missing positions
for i, stop := range colorStops {
if len(stop.positionTerms) != 1 {
info := infos[i]
t := helpers.NewF64(float64(info.fromCount)).DivConst(float64(info.fromCount + info.toCount))
if info.fromPos.unit == info.toPos.unit {
colorStops[i].positionTerms = []valueWithUnit{{
value: helpers.Lerp(info.fromPos.value, info.toPos.value, t),
unit: info.fromPos.unit,
}}
} else {
colorStops[i].positionTerms = []valueWithUnit{{
value: t.Neg().AddConst(1).Mul(info.fromPos.value),
unit: info.fromPos.unit,
}, {
value: t.Mul(info.toPos.value),
unit: info.toPos.unit,
}}
}
}
}
// Midpoints are only supported if they use the same units as their neighbors
for i, stop := range colorStops {
if stop.midpoint != nil {
next := colorStops[i+1]
if len(stop.positionTerms) != 1 || stop.midpoint.unit != stop.positionTerms[0].unit ||
len(next.positionTerms) != 1 || stop.midpoint.unit != next.positionTerms[0].unit {
return nil, false
}
}
}
}
return colorStops, true
}
func tryToParseValue(token css_ast.Token, kind gradientKind) (result valueWithUnit, success bool) {
if kind == conicGradient {
// <angle-percentage>
switch token.Kind {
case css_lexer.TDimension:
degrees, ok := degreesForAngle(token)
if !ok {
return
}
result.value = helpers.NewF64(degrees).MulConst(100.0 / 360)
result.unit = "%"
case css_lexer.TPercentage:
percent, err := strconv.ParseFloat(token.PercentageValue(), 64)
if err != nil {
return
}
result.value = helpers.NewF64(percent)
result.unit = "%"
default:
return
}
} else {
// <length-percentage>
switch token.Kind {
case css_lexer.TNumber:
zero, err := strconv.ParseFloat(token.Text, 64)
if err != nil || zero != 0 {
return
}
result.value = helpers.NewF64(0)
result.unit = "%"
case css_lexer.TDimension:
dimensionValue, err := strconv.ParseFloat(token.DimensionValue(), 64)
if err != nil {
return
}
result.value = helpers.NewF64(dimensionValue)
result.unit = token.DimensionUnit()
case css_lexer.TPercentage:
percentageValue, err := strconv.ParseFloat(token.PercentageValue(), 64)
if err != nil {
return
}
result.value = helpers.NewF64(percentageValue)
result.unit = "%"
default:
return
}
}
success = true
return
}
func tryToExpandGradient(
loc logger.Loc,
gradient *parsedGradient,
colorStops []parsedColorStop,
remaining []css_ast.Token,
colorSpace colorSpace,
hueMethod hueMethod,
) bool {
// Convert color stops into the interpolation color space
for i := range colorStops {
stop := &colorStops[i]
v0, v1, v2 := xyz_to_colorSpace(stop.x, stop.y, stop.z, colorSpace)
stop.v0, stop.v1, stop.v2 = premultiply(v0, v1, v2, stop.alpha, colorSpace)
}
// Duplicate the endpoints if they should wrap around to themselves
if hueMethod == longerHue && colorSpace.isPolar() && len(colorStops) > 0 {
if first := colorStops[0]; len(first.positionTerms) == 1 {
if first.positionTerms[0].value.Value() < 0 {
colorStops[0].positionTerms[0].value = helpers.NewF64(0)
} else if first.positionTerms[0].value.Value() > 0 {
first.midpoint = nil
first.positionTerms = []valueWithUnit{{value: helpers.NewF64(0), unit: first.positionTerms[0].unit}}
colorStops = append([]parsedColorStop{first}, colorStops...)
}
}
if last := colorStops[len(colorStops)-1]; len(last.positionTerms) == 1 {
if last.positionTerms[0].unit != "%" || last.positionTerms[0].value.Value() < 100 {
last.positionTerms = []valueWithUnit{{value: helpers.NewF64(100), unit: "%"}}
colorStops = append(colorStops, last)
}
}
}
var newColorStops []colorStop
var generateColorStops func(
int, parsedColorStop, parsedColorStop,
F64, F64, F64, F64, F64, F64, F64, F64,
F64, F64, F64, F64, F64, F64, F64, F64,
)
generateColorStops = func(
depth int,
from parsedColorStop, to parsedColorStop,
prevX, prevY, prevZ, prevR, prevG, prevB, prevA, prevT F64,
nextX, nextY, nextZ, nextR, nextG, nextB, nextA, nextT F64,
) {
if depth > 4 {
return
}
t := prevT.Add(nextT).DivConst(2)
positionT := t
// Handle midpoints (which we have already checked uses the same units)
if from.midpoint != nil {
fromPos := from.positionTerms[0].value
toPos := to.positionTerms[0].value
stopPos := helpers.Lerp(fromPos, toPos, t)
H := from.midpoint.value.Sub(fromPos).Div(toPos.Sub(fromPos))
P := stopPos.Sub(fromPos).Div(toPos.Sub(fromPos))
if H.Value() <= 0 {
positionT = helpers.NewF64(1)
} else if H.Value() >= 1 {
positionT = helpers.NewF64(0)
} else {
positionT = P.Pow(helpers.NewF64(-1).Div(H.Log2()))
}
}
v0, v1, v2 := interpolateColors(from.v0, from.v1, from.v2, to.v0, to.v1, to.v2, colorSpace, hueMethod, positionT)
a := helpers.Lerp(from.alpha, to.alpha, positionT)
v0, v1, v2 = unpremultiply(v0, v1, v2, a, colorSpace)
x, y, z := colorSpace_to_xyz(v0, v1, v2, colorSpace)
// Stop when the color is similar enough to the sRGB midpoint
const epsilon = 4.0 / 255
r, g, b := gam_srgb(xyz_to_lin_srgb(x, y, z))
dr := r.Mul(a).Sub(prevR.Mul(prevA).Add(nextR.Mul(nextA)).DivConst(2))
dg := g.Mul(a).Sub(prevG.Mul(prevA).Add(nextG.Mul(nextA)).DivConst(2))
db := b.Mul(a).Sub(prevB.Mul(prevA).Add(nextB.Mul(nextA)).DivConst(2))
if d := dr.Squared().Add(dg.Squared()).Add(db.Squared()); d.Value() < epsilon*epsilon {
return
}
// Recursive split before this stop
generateColorStops(depth+1, from, to,
prevX, prevY, prevZ, prevR, prevG, prevB, prevA, prevT,
x, y, z, r, g, b, a, t)
// Generate this stop
color := makeColorToken(loc, x, y, z, a)
positionTerms := interpolatePositions(from.positionTerms, to.positionTerms, t)
position := makePositionToken(loc, positionTerms)
position.Whitespace = css_ast.WhitespaceBefore
newColorStops = append(newColorStops, colorStop{
color: color,
positions: []css_ast.Token{position},
})
// Recursive split after this stop
generateColorStops(depth+1, from, to,
x, y, z, r, g, b, a, t,
nextX, nextY, nextZ, nextR, nextG, nextB, nextA, nextT)
}
for i, stop := range colorStops {
color := makeColorToken(loc, stop.x, stop.y, stop.z, stop.alpha)
position := makePositionToken(loc, stop.positionTerms)
position.Whitespace = css_ast.WhitespaceBefore
newColorStops = append(newColorStops, colorStop{
color: color,
positions: []css_ast.Token{position},
})
// Generate new color stops in between as needed
if i+1 < len(colorStops) {
next := colorStops[i+1]
generateColorStops(0, stop, next,
stop.x, stop.y, stop.z, stop.r, stop.g, stop.b, stop.alpha, helpers.NewF64(0),
next.x, next.y, next.z, next.r, next.g, next.b, next.alpha, helpers.NewF64(1))
}
}
gradient.leadingTokens = remaining
gradient.colorStops = newColorStops
return true
}
func formatFloat(value F64, decimals int) string {
return strings.TrimSuffix(strings.TrimRight(strconv.FormatFloat(value.Value(), 'f', decimals, 64), "0"), ".")
}
func makeDimensionOrPercentToken(loc logger.Loc, value F64, unit string) (token css_ast.Token) {
token.Loc = loc
token.Text = formatFloat(value, 2)
if unit == "%" {
token.Kind = css_lexer.TPercentage
} else {
token.Kind = css_lexer.TDimension
token.UnitOffset = uint16(len(token.Text))
}
token.Text += unit
return
}
func makePositionToken(loc logger.Loc, positionTerms []valueWithUnit) css_ast.Token {
if len(positionTerms) == 1 {
return makeDimensionOrPercentToken(loc, positionTerms[0].value, positionTerms[0].unit)
}
children := make([]css_ast.Token, 0, 1+2*len(positionTerms))
for i, term := range positionTerms {
if i > 0 {
children = append(children, css_ast.Token{
Loc: loc,
Kind: css_lexer.TDelimPlus,
Text: "+",
Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
})
}
children = append(children, makeDimensionOrPercentToken(loc, term.value, term.unit))
}
return css_ast.Token{
Loc: loc,
Kind: css_lexer.TFunction,
Text: "calc",
Children: &children,
}
}
func makeColorToken(loc logger.Loc, x F64, y F64, z F64, a F64) (color css_ast.Token) {
color.Loc = loc
alpha := uint32(a.MulConst(255).Round().Value())
if hex, ok := tryToConvertToHexWithoutClipping(x, y, z, alpha); ok {
color.Kind = css_lexer.THash
if alpha == 255 {
color.Text = fmt.Sprintf("%06x", hex>>8)
} else {
color.Text = fmt.Sprintf("%08x", hex)
}
} else {
children := []css_ast.Token{
{
Loc: loc,
Kind: css_lexer.TIdent,
Text: "xyz",
Whitespace: css_ast.WhitespaceAfter,
},
{
Loc: loc,
Kind: css_lexer.TNumber,
Text: formatFloat(x, 3),
Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
},
{
Loc: loc,
Kind: css_lexer.TNumber,
Text: formatFloat(y, 3),
Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
},
{
Loc: loc,
Kind: css_lexer.TNumber,
Text: formatFloat(z, 3),
Whitespace: css_ast.WhitespaceBefore,
},
}
if a.Value() < 1 {
children = append(children,
css_ast.Token{
Loc: loc,
Kind: css_lexer.TDelimSlash,
Text: "/",
Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
},
css_ast.Token{
Loc: loc,
Kind: css_lexer.TNumber,
Text: formatFloat(a, 3),
Whitespace: css_ast.WhitespaceBefore,
},
)
}
color.Kind = css_lexer.TFunction
color.Text = "color"
color.Children = &children
}
return
}
func interpolateHues(a, b, t F64, hueMethod hueMethod) F64 {
a = a.DivConst(360)
b = b.DivConst(360)
a = a.Sub(a.Floor())
b = b.Sub(b.Floor())
switch hueMethod {
case shorterHue:
delta := b.Sub(a)
if delta.Value() > 0.5 {
a = a.AddConst(1)
}
if delta.Value() < -0.5 {
b = b.AddConst(1)
}
case longerHue:
delta := b.Sub(a)
if delta.Value() > 0 && delta.Value() < 0.5 {
a = a.AddConst(1)
}
if delta.Value() > -0.5 && delta.Value() <= 0 {
b = b.AddConst(1)
}
case increasingHue:
if b.Value() < a.Value() {
b = b.AddConst(1)
}
case decreasingHue:
if a.Value() < b.Value() {
a = a.AddConst(1)
}
}
return helpers.Lerp(a, b, t).MulConst(360)
}
func interpolateColors(
a0, a1, a2 F64, b0, b1, b2 F64,
colorSpace colorSpace, hueMethod hueMethod, t F64,
) (v0 F64, v1 F64, v2 F64) {
v1 = helpers.Lerp(a1, b1, t)
switch colorSpace {
case colorSpace_hsl, colorSpace_hwb:
v2 = helpers.Lerp(a2, b2, t)
v0 = interpolateHues(a0, b0, t, hueMethod)
case colorSpace_lch, colorSpace_oklch:
v0 = helpers.Lerp(a0, b0, t)
v2 = interpolateHues(a2, b2, t, hueMethod)
default:
v0 = helpers.Lerp(a0, b0, t)
v2 = helpers.Lerp(a2, b2, t)
}
return v0, v1, v2
}
func interpolatePositions(a []valueWithUnit, b []valueWithUnit, t F64) (result []valueWithUnit) {
findUnit := func(unit string) int {
for i, x := range result {
if x.unit == unit {
return i
}
}
result = append(result, valueWithUnit{unit: unit})
return len(result) - 1
}
// "result += a * (1 - t)"
for _, term := range a {
ptr := &result[findUnit(term.unit)]
ptr.value = t.Neg().AddConst(1).Mul(term.value).Add(ptr.value)
}
// "result += b * t"
for _, term := range b {
ptr := &result[findUnit(term.unit)]
ptr.value = t.Mul(term.value).Add(ptr.value)
}
// Remove an extra zero value for neatness. We don't remove all
// of them because it may be important to retain a single zero.
if len(result) > 1 {
for i, term := range result {
if term.value.Value() == 0 {
copy(result[i:], result[i+1:])
result = result[:len(result)-1]
break
}
}
}
return
}
func premultiply(v0, v1, v2, alpha F64, colorSpace colorSpace) (F64, F64, F64) {
if alpha.Value() < 1 {
switch colorSpace {
case colorSpace_hsl, colorSpace_hwb:
v2 = v2.Mul(alpha)
case colorSpace_lch, colorSpace_oklch:
v0 = v0.Mul(alpha)
default:
v0 = v0.Mul(alpha)
v2 = v2.Mul(alpha)
}
v1 = v1.Mul(alpha)
}
return v0, v1, v2
}
func unpremultiply(v0, v1, v2, alpha F64, colorSpace colorSpace) (F64, F64, F64) {
if alpha.Value() > 0 && alpha.Value() < 1 {
switch colorSpace {
case colorSpace_hsl, colorSpace_hwb:
v2 = v2.Div(alpha)
case colorSpace_lch, colorSpace_oklch:
v0 = v0.Div(alpha)
default:
v0 = v0.Div(alpha)
v2 = v2.Div(alpha)
}
v1 = v1.Div(alpha)
}
return v0, v1, v2
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls_font_family.go | internal/css_parser/css_decls_font_family.go | package css_parser
import (
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
// These keywords usually require special handling when parsing.
// Declaring a property to have these values explicitly specifies a particular
// defaulting behavior instead of setting the property to that identifier value.
// As specified in CSS Values and Units Level 3, all CSS properties can accept
// these values.
//
// For example, "font-family: 'inherit'" sets the font family to the font named
// "inherit" while "font-family: inherit" sets the font family to the inherited
// value.
//
// Note that other CSS specifications can define additional CSS-wide keywords,
// which we should copy here whenever new ones are created so we can quote those
// identifiers to avoid collisions with any newly-created CSS-wide keywords.
var cssWideAndReservedKeywords = map[string]bool{
// CSS Values and Units Level 3: https://drafts.csswg.org/css-values-3/#common-keywords
"initial": true, // CSS-wide keyword
"inherit": true, // CSS-wide keyword
"unset": true, // CSS-wide keyword
"default": true, // CSS reserved keyword
// CSS Cascading and Inheritance Level 5: https://drafts.csswg.org/css-cascade-5/#defaulting-keywords
"revert": true, // Cascade-dependent keyword
"revert-layer": true, // Cascade-dependent keyword
}
// Font family names that happen to be the same as a keyword value must be
// quoted to prevent confusion with the keywords with the same names. UAs must
// not consider these keywords as matching the <family-name> type.
// Specification: https://drafts.csswg.org/css-fonts/#generic-font-families
var genericFamilyNames = map[string]bool{
"serif": true,
"sans-serif": true,
"cursive": true,
"fantasy": true,
"monospace": true,
"system-ui": true,
"emoji": true,
"math": true,
"fangsong": true,
"ui-serif": true,
"ui-sans-serif": true,
"ui-monospace": true,
"ui-rounded": true,
}
// Specification: https://drafts.csswg.org/css-fonts/#font-family-prop
func (p *parser) mangleFontFamily(tokens []css_ast.Token) ([]css_ast.Token, bool) {
result, rest, ok := p.mangleFamilyNameOrGenericName(nil, tokens)
if !ok {
return nil, false
}
for len(rest) > 0 && rest[0].Kind == css_lexer.TComma {
result, rest, ok = p.mangleFamilyNameOrGenericName(append(result, rest[0]), rest[1:])
if !ok {
return nil, false
}
}
if len(rest) > 0 {
return nil, false
}
return result, true
}
func (p *parser) mangleFamilyNameOrGenericName(result []css_ast.Token, tokens []css_ast.Token) ([]css_ast.Token, []css_ast.Token, bool) {
if len(tokens) > 0 {
t := tokens[0]
// Handle <generic-family>
if t.Kind == css_lexer.TIdent && genericFamilyNames[t.Text] {
return append(result, t), tokens[1:], true
}
// Handle <family-name>
if t.Kind == css_lexer.TString {
// "If a sequence of identifiers is given as a <family-name>, the computed
// value is the name converted to a string by joining all the identifiers
// in the sequence by single spaces."
//
// More information: https://mathiasbynens.be/notes/unquoted-font-family
names := strings.Split(t.Text, " ")
for _, name := range names {
if !isValidCustomIdent(name, genericFamilyNames) {
return append(result, t), tokens[1:], true
}
}
for i, name := range names {
var whitespace css_ast.WhitespaceFlags
if i != 0 || !p.options.minifyWhitespace {
whitespace = css_ast.WhitespaceBefore
}
result = append(result, css_ast.Token{
Loc: t.Loc,
Kind: css_lexer.TIdent,
Text: name,
Whitespace: whitespace,
})
}
return result, tokens[1:], true
}
// "Font family names other than generic families must either be given
// quoted as <string>s, or unquoted as a sequence of one or more
// <custom-ident>."
if t.Kind == css_lexer.TIdent {
for {
if !isValidCustomIdent(t.Text, genericFamilyNames) {
return nil, nil, false
}
result = append(result, t)
tokens = tokens[1:]
if len(tokens) == 0 || tokens[0].Kind != css_lexer.TIdent {
break
}
t = tokens[0]
}
return result, tokens, true
}
}
// Anything other than the cases listed above causes us to bail
return nil, nil, false
}
// Specification: https://drafts.csswg.org/css-values-4/#custom-idents
func isValidCustomIdent(text string, predefinedKeywords map[string]bool) bool {
loweredText := strings.ToLower(text)
if predefinedKeywords[loweredText] {
return false
}
if cssWideAndReservedKeywords[loweredText] {
return false
}
if loweredText == "" {
return false
}
// validate if it contains characters which needs to be escaped
if !css_lexer.WouldStartIdentifierWithoutEscapes(text) {
return false
}
for _, c := range text {
if !css_lexer.IsNameContinue(c) {
return false
}
}
return true
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls_container.go | internal/css_parser/css_decls_container.go | package css_parser
import (
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
// Scan for container names in the "container" shorthand property
func (p *parser) processContainerShorthand(tokens []css_ast.Token) {
// Validate the syntax
for i, t := range tokens {
if t.Kind == css_lexer.TIdent {
continue
}
if t.Kind == css_lexer.TDelimSlash && i+2 == len(tokens) && tokens[i+1].Kind == css_lexer.TIdent {
break
}
return
}
// Convert any local names
for i, t := range tokens {
if t.Kind != css_lexer.TIdent {
break
}
p.handleSingleContainerName(&tokens[i])
}
}
func (p *parser) processContainerName(tokens []css_ast.Token) {
// Validate the syntax
for _, t := range tokens {
if t.Kind != css_lexer.TIdent {
return
}
}
// Convert any local names
for i := range tokens {
p.handleSingleContainerName(&tokens[i])
}
}
func (p *parser) handleSingleContainerName(token *css_ast.Token) {
if lower := strings.ToLower(token.Text); lower == "none" || cssWideAndReservedKeywords[lower] {
return
}
token.Kind = css_lexer.TSymbol
token.PayloadIndex = p.symbolForName(token.Loc, token.Text).Ref.InnerIndex
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls_animation.go | internal/css_parser/css_decls_animation.go | package css_parser
import (
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
// Scan for animation names in the "animation" shorthand property
func (p *parser) processAnimationShorthand(tokens []css_ast.Token) {
type foundFlags struct {
timingFunction bool
iterationCount bool
direction bool
fillMode bool
playState bool
name bool
}
found := foundFlags{}
for i, t := range tokens {
switch t.Kind {
case css_lexer.TComma:
// Reset the flags when we encounter a comma
found = foundFlags{}
case css_lexer.TNumber:
if !found.iterationCount {
found.iterationCount = true
continue
}
case css_lexer.TIdent:
if !found.timingFunction {
switch strings.ToLower(t.Text) {
case "linear", "ease", "ease-in", "ease-out", "ease-in-out", "step-start", "step-end":
found.timingFunction = true
continue
}
}
if !found.iterationCount && strings.ToLower(t.Text) == "infinite" {
found.iterationCount = true
continue
}
if !found.direction {
switch strings.ToLower(t.Text) {
case "normal", "reverse", "alternate", "alternate-reverse":
found.direction = true
continue
}
}
if !found.fillMode {
switch strings.ToLower(t.Text) {
case "none", "forwards", "backwards", "both":
found.fillMode = true
continue
}
}
if !found.playState {
switch strings.ToLower(t.Text) {
case "running", "paused":
found.playState = true
continue
}
}
if !found.name {
p.handleSingleAnimationName(&tokens[i])
found.name = true
continue
}
case css_lexer.TString:
if !found.name {
p.handleSingleAnimationName(&tokens[i])
found.name = true
continue
}
}
}
}
func (p *parser) processAnimationName(tokens []css_ast.Token) {
for i, t := range tokens {
if t.Kind == css_lexer.TIdent || t.Kind == css_lexer.TString {
p.handleSingleAnimationName(&tokens[i])
}
}
}
func (p *parser) handleSingleAnimationName(token *css_ast.Token) {
// Do not transform CSS keywords into symbols because they have special
// meaning in declarations. For example, "animation-name: none" clears
// the animation name. It does not set it to the animation named "none".
// You need to use "animation-name: 'none'" to do that.
//
// Also don't transform strings containing CSS keywords into global symbols
// because global symbols are passed through without being renamed, which
// will print them as keywords. However, we still want to unconditionally
// transform strings into local symbols because local symbols are always
// renamed, so they will never be printed as keywords.
if (token.Kind == css_lexer.TIdent || (token.Kind == css_lexer.TString && !p.makeLocalSymbols)) && isInvalidAnimationName(token.Text) {
return
}
token.Kind = css_lexer.TSymbol
token.PayloadIndex = p.symbolForName(token.Loc, token.Text).Ref.InnerIndex
}
func isInvalidAnimationName(text string) bool {
lower := strings.ToLower(text)
return lower == "none" || cssWideAndReservedKeywords[lower]
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_parser.go | internal/css_parser/css_parser.go | package css_parser
import (
"fmt"
"strings"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/logger"
)
// This is mostly a normal CSS parser with one exception: the addition of
// support for parsing https://drafts.csswg.org/css-nesting-1/.
type parser struct {
log logger.Log
source logger.Source
tokens []css_lexer.Token
allComments []logger.Range
legalComments []css_lexer.Comment
stack []css_lexer.T
importRecords []ast.ImportRecord
symbols []ast.Symbol
composes map[ast.Ref]*css_ast.Composes
localSymbols []ast.LocRef
localScope map[string]ast.LocRef
globalScope map[string]ast.LocRef
nestingWarnings map[logger.Loc]struct{}
tracker logger.LineColumnTracker
enclosingAtMedia [][]css_ast.MediaQuery
layersPreImport [][]string
layersPostImport [][]string
enclosingLayer []string
anonLayerCount int
index int
legalCommentIndex int
inSelectorSubtree int
prevError logger.Loc
options Options
nestingIsPresent bool
makeLocalSymbols bool
hasSeenAtImport bool
}
type Options struct {
cssPrefixData map[css_ast.D]compat.CSSPrefix
// This is an embedded struct. Always access these directly instead of off
// the name "optionsThatSupportStructuralEquality". This is only grouped like
// this to make the equality comparison easier and safer (and hopefully faster).
optionsThatSupportStructuralEquality
}
type symbolMode uint8
const (
symbolModeDisabled symbolMode = iota
symbolModeGlobal
symbolModeLocal
)
type optionsThatSupportStructuralEquality struct {
originalTargetEnv string
unsupportedCSSFeatures compat.CSSFeature
minifySyntax bool
minifyWhitespace bool
minifyIdentifiers bool
symbolMode symbolMode
}
func OptionsFromConfig(loader config.Loader, options *config.Options) Options {
var symbolMode symbolMode
switch loader {
case config.LoaderGlobalCSS:
symbolMode = symbolModeGlobal
case config.LoaderLocalCSS:
symbolMode = symbolModeLocal
}
return Options{
cssPrefixData: options.CSSPrefixData,
optionsThatSupportStructuralEquality: optionsThatSupportStructuralEquality{
minifySyntax: options.MinifySyntax,
minifyWhitespace: options.MinifyWhitespace,
minifyIdentifiers: options.MinifyIdentifiers,
unsupportedCSSFeatures: options.UnsupportedCSSFeatures,
originalTargetEnv: options.OriginalTargetEnv,
symbolMode: symbolMode,
},
}
}
func (a *Options) Equal(b *Options) bool {
// Compare "optionsThatSupportStructuralEquality"
if a.optionsThatSupportStructuralEquality != b.optionsThatSupportStructuralEquality {
return false
}
// Compare "cssPrefixData"
if len(a.cssPrefixData) != len(b.cssPrefixData) {
return false
}
for k, va := range a.cssPrefixData {
vb, ok := b.cssPrefixData[k]
if !ok || va != vb {
return false
}
}
for k := range b.cssPrefixData {
if _, ok := b.cssPrefixData[k]; !ok {
return false
}
}
return true
}
func Parse(log logger.Log, source logger.Source, options Options) css_ast.AST {
result := css_lexer.Tokenize(log, source, css_lexer.Options{
RecordAllComments: options.minifyIdentifiers,
})
p := parser{
log: log,
source: source,
tracker: logger.MakeLineColumnTracker(&source),
options: options,
tokens: result.Tokens,
allComments: result.AllComments,
legalComments: result.LegalComments,
prevError: logger.Loc{Start: -1},
composes: make(map[ast.Ref]*css_ast.Composes),
localScope: make(map[string]ast.LocRef),
globalScope: make(map[string]ast.LocRef),
makeLocalSymbols: options.symbolMode == symbolModeLocal,
}
rules := p.parseListOfRules(ruleContext{
isTopLevel: true,
parseSelectors: true,
})
p.expect(css_lexer.TEndOfFile)
return css_ast.AST{
Rules: rules,
CharFreq: p.computeCharacterFrequency(),
Symbols: p.symbols,
ImportRecords: p.importRecords,
ApproximateLineCount: result.ApproximateLineCount,
SourceMapComment: result.SourceMapComment,
LocalSymbols: p.localSymbols,
LocalScope: p.localScope,
GlobalScope: p.globalScope,
Composes: p.composes,
LayersPreImport: p.layersPreImport,
LayersPostImport: p.layersPostImport,
}
}
// Compute a character frequency histogram for everything that's not a bound
// symbol. This is used to modify how minified names are generated for slightly
// better gzip compression. Even though it's a very small win, we still do it
// because it's simple to do and very cheap to compute.
func (p *parser) computeCharacterFrequency() *ast.CharFreq {
if !p.options.minifyIdentifiers {
return nil
}
// Add everything in the file to the histogram
charFreq := &ast.CharFreq{}
charFreq.Scan(p.source.Contents, 1)
// Subtract out all comments
for _, commentRange := range p.allComments {
charFreq.Scan(p.source.TextForRange(commentRange), -1)
}
// Subtract out all import paths
for _, record := range p.importRecords {
if !record.SourceIndex.IsValid() {
charFreq.Scan(record.Path.Text, -1)
}
}
// Subtract out all symbols that will be minified
for _, symbol := range p.symbols {
if symbol.Kind == ast.SymbolLocalCSS {
charFreq.Scan(symbol.OriginalName, -int32(symbol.UseCountEstimate))
}
}
return charFreq
}
func (p *parser) advance() {
if p.index < len(p.tokens) {
p.index++
}
}
func (p *parser) at(index int) css_lexer.Token {
if index < len(p.tokens) {
return p.tokens[index]
}
return css_lexer.Token{
Kind: css_lexer.TEndOfFile,
Range: logger.Range{Loc: logger.Loc{Start: int32(len(p.source.Contents))}},
}
}
func (p *parser) current() css_lexer.Token {
return p.at(p.index)
}
func (p *parser) next() css_lexer.Token {
return p.at(p.index + 1)
}
func (p *parser) raw() string {
t := p.current()
return p.source.Contents[t.Range.Loc.Start:t.Range.End()]
}
func (p *parser) decoded() string {
return p.current().DecodedText(p.source.Contents)
}
func (p *parser) peek(kind css_lexer.T) bool {
return kind == p.current().Kind
}
func (p *parser) eat(kind css_lexer.T) bool {
if p.peek(kind) {
p.advance()
return true
}
return false
}
func (p *parser) expect(kind css_lexer.T) bool {
return p.expectWithMatchingLoc(kind, logger.Loc{Start: -1})
}
func (p *parser) expectWithMatchingLoc(kind css_lexer.T, matchingLoc logger.Loc) bool {
if p.eat(kind) {
return true
}
t := p.current()
if (t.Flags & css_lexer.DidWarnAboutSingleLineComment) != 0 {
return false
}
var text string
var suggestion string
var notes []logger.MsgData
expected := kind.String()
if strings.HasPrefix(expected, "\"") && strings.HasSuffix(expected, "\"") {
suggestion = expected[1 : len(expected)-1]
}
if (kind == css_lexer.TSemicolon || kind == css_lexer.TColon) && p.index > 0 && p.at(p.index-1).Kind == css_lexer.TWhitespace {
// Have a nice error message for forgetting a trailing semicolon or colon
text = fmt.Sprintf("Expected %s", expected)
t = p.at(p.index - 1)
} else if (kind == css_lexer.TCloseBrace || kind == css_lexer.TCloseBracket || kind == css_lexer.TCloseParen) &&
matchingLoc.Start != -1 && int(matchingLoc.Start)+1 <= len(p.source.Contents) {
// Have a nice error message for forgetting a closing brace/bracket/parenthesis
c := p.source.Contents[matchingLoc.Start : matchingLoc.Start+1]
text = fmt.Sprintf("Expected %s to go with %q", expected, c)
notes = append(notes, p.tracker.MsgData(logger.Range{Loc: matchingLoc, Len: 1}, fmt.Sprintf("The unbalanced %q is here:", c)))
} else {
switch t.Kind {
case css_lexer.TEndOfFile, css_lexer.TWhitespace:
text = fmt.Sprintf("Expected %s but found %s", expected, t.Kind.String())
t.Range.Len = 0
case css_lexer.TBadURL, css_lexer.TUnterminatedString:
text = fmt.Sprintf("Expected %s but found %s", expected, t.Kind.String())
default:
text = fmt.Sprintf("Expected %s but found %q", expected, p.raw())
}
}
if t.Range.Loc.Start > p.prevError.Start {
data := p.tracker.MsgData(t.Range, text)
data.Location.Suggestion = suggestion
p.log.AddMsgID(logger.MsgID_CSS_CSSSyntaxError, logger.Msg{Kind: logger.Warning, Data: data, Notes: notes})
p.prevError = t.Range.Loc
}
return false
}
func (p *parser) unexpected() {
if t := p.current(); t.Range.Loc.Start > p.prevError.Start && (t.Flags&css_lexer.DidWarnAboutSingleLineComment) == 0 {
var text string
switch t.Kind {
case css_lexer.TEndOfFile, css_lexer.TWhitespace:
text = fmt.Sprintf("Unexpected %s", t.Kind.String())
t.Range.Len = 0
case css_lexer.TBadURL, css_lexer.TUnterminatedString:
text = fmt.Sprintf("Unexpected %s", t.Kind.String())
default:
text = fmt.Sprintf("Unexpected %q", p.raw())
}
p.log.AddID(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &p.tracker, t.Range, text)
p.prevError = t.Range.Loc
}
}
func (p *parser) symbolForName(loc logger.Loc, name string) ast.LocRef {
var kind ast.SymbolKind
var scope map[string]ast.LocRef
if p.makeLocalSymbols {
kind = ast.SymbolLocalCSS
scope = p.localScope
} else {
kind = ast.SymbolGlobalCSS
scope = p.globalScope
}
entry, ok := scope[name]
if !ok {
entry = ast.LocRef{
Loc: loc,
Ref: ast.Ref{
SourceIndex: p.source.Index,
InnerIndex: uint32(len(p.symbols)),
},
}
p.symbols = append(p.symbols, ast.Symbol{
Kind: kind,
OriginalName: name,
Link: ast.InvalidRef,
})
scope[name] = entry
if kind == ast.SymbolLocalCSS {
p.localSymbols = append(p.localSymbols, entry)
}
}
p.symbols[entry.Ref.InnerIndex].UseCountEstimate++
return entry
}
func (p *parser) recordAtLayerRule(layers [][]string) {
if p.anonLayerCount > 0 {
return
}
for _, layer := range layers {
if len(p.enclosingLayer) > 0 {
clone := make([]string, 0, len(p.enclosingLayer)+len(layer))
layer = append(append(clone, p.enclosingLayer...), layer...)
}
p.layersPostImport = append(p.layersPostImport, layer)
}
}
type ruleContext struct {
isTopLevel bool
parseSelectors bool
}
func (p *parser) parseListOfRules(context ruleContext) []css_ast.Rule {
atRuleContext := atRuleContext{}
if context.isTopLevel {
atRuleContext.charsetValidity = atRuleValid
atRuleContext.importValidity = atRuleValid
atRuleContext.isTopLevel = true
}
rules := []css_ast.Rule{}
didFindAtImport := false
loop:
for {
if context.isTopLevel {
p.nestingIsPresent = false
}
// If there are any legal comments immediately before the current token,
// turn them all into comment rules and append them to the current rule list
for p.legalCommentIndex < len(p.legalComments) {
comment := p.legalComments[p.legalCommentIndex]
if comment.TokenIndexAfter > uint32(p.index) {
break
}
if comment.TokenIndexAfter == uint32(p.index) {
rules = append(rules, css_ast.Rule{Loc: comment.Loc, Data: &css_ast.RComment{Text: comment.Text}})
}
p.legalCommentIndex++
}
switch p.current().Kind {
case css_lexer.TEndOfFile:
break loop
case css_lexer.TCloseBrace:
if !context.isTopLevel {
break loop
}
case css_lexer.TWhitespace:
p.advance()
continue
case css_lexer.TAtKeyword:
rule := p.parseAtRule(atRuleContext)
// Disallow "@charset" and "@import" after other rules
if context.isTopLevel {
switch r := rule.Data.(type) {
case *css_ast.RAtCharset:
// This doesn't invalidate anything because it always comes first
case *css_ast.RAtImport:
didFindAtImport = true
if atRuleContext.charsetValidity == atRuleValid {
atRuleContext.afterLoc = rule.Loc
atRuleContext.charsetValidity = atRuleInvalidAfter
}
case *css_ast.RAtLayer:
if atRuleContext.charsetValidity == atRuleValid {
atRuleContext.afterLoc = rule.Loc
atRuleContext.charsetValidity = atRuleInvalidAfter
}
// From the specification: "Note: No @layer rules are allowed between
// @import and @namespace rules. Any @layer rule that comes after an
// @import or @namespace rule will cause any subsequent @import or
// @namespace rules to be ignored."
if atRuleContext.importValidity == atRuleValid && (r.Rules != nil || didFindAtImport) {
atRuleContext.afterLoc = rule.Loc
atRuleContext.charsetValidity = atRuleInvalidAfter
atRuleContext.importValidity = atRuleInvalidAfter
}
default:
if atRuleContext.importValidity == atRuleValid {
atRuleContext.afterLoc = rule.Loc
atRuleContext.charsetValidity = atRuleInvalidAfter
atRuleContext.importValidity = atRuleInvalidAfter
}
}
}
// Lower CSS nesting if it's not supported (but only at the top level)
if p.nestingIsPresent && p.options.unsupportedCSSFeatures.Has(compat.Nesting) && context.isTopLevel {
rules = p.lowerNestingInRule(rule, rules)
} else {
rules = append(rules, rule)
}
continue
case css_lexer.TCDO, css_lexer.TCDC:
if context.isTopLevel {
p.advance()
continue
}
}
if atRuleContext.importValidity == atRuleValid {
atRuleContext.afterLoc = p.current().Range.Loc
atRuleContext.charsetValidity = atRuleInvalidAfter
atRuleContext.importValidity = atRuleInvalidAfter
}
// Note: CSS recently changed to parse and discard declarations
// here instead of treating them as the start of a qualified rule.
// See also: https://github.com/w3c/csswg-drafts/issues/8834
if !context.isTopLevel {
if scan, index := p.scanForEndOfRule(); scan == endOfRuleSemicolon {
tokens := p.convertTokens(p.tokens[p.index:index])
rules = append(rules, css_ast.Rule{Loc: p.current().Range.Loc, Data: &css_ast.RBadDeclaration{Tokens: tokens}})
p.index = index + 1
continue
}
}
var rule css_ast.Rule
if context.parseSelectors {
rule = p.parseSelectorRule(context.isTopLevel, parseSelectorOpts{})
} else {
rule = p.parseQualifiedRule(parseQualifiedRuleOpts{isTopLevel: context.isTopLevel})
}
// Lower CSS nesting if it's not supported (but only at the top level)
if p.nestingIsPresent && p.options.unsupportedCSSFeatures.Has(compat.Nesting) && context.isTopLevel {
rules = p.lowerNestingInRule(rule, rules)
} else {
rules = append(rules, rule)
}
}
if p.options.minifySyntax {
rules = p.mangleRules(rules, context.isTopLevel)
}
return rules
}
type listOfDeclarationsOpts struct {
composesContext *composesContext
canInlineNoOpNesting bool
}
func (p *parser) parseListOfDeclarations(opts listOfDeclarationsOpts) (list []css_ast.Rule) {
list = []css_ast.Rule{}
foundNesting := false
for {
switch p.current().Kind {
case css_lexer.TWhitespace, css_lexer.TSemicolon:
p.advance()
case css_lexer.TEndOfFile, css_lexer.TCloseBrace:
list = p.processDeclarations(list, opts.composesContext)
if p.options.minifySyntax {
list = p.mangleRules(list, false /* isTopLevel */)
// Pull out all unnecessarily-nested declarations and stick them at the end
if opts.canInlineNoOpNesting {
// "a { & { x: y } }" => "a { x: y }"
// "a { & { b: c } d: e }" => "a { d: e; b: c }"
if foundNesting {
var inlineDecls []css_ast.Rule
n := 0
for _, rule := range list {
if rule, ok := rule.Data.(*css_ast.RSelector); ok && len(rule.Selectors) == 1 {
if sel := rule.Selectors[0]; len(sel.Selectors) == 1 && sel.Selectors[0].IsSingleAmpersand() {
inlineDecls = append(inlineDecls, rule.Rules...)
continue
}
}
list[n] = rule
n++
}
list = append(list[:n], inlineDecls...)
}
} else {
// "a, b::before { & { x: y } }" => "a, b::before { & { x: y } }"
}
}
return
case css_lexer.TAtKeyword:
if p.inSelectorSubtree > 0 {
p.nestingIsPresent = true
}
list = append(list, p.parseAtRule(atRuleContext{
isDeclarationList: true,
canInlineNoOpNesting: opts.canInlineNoOpNesting,
}))
// Reference: https://drafts.csswg.org/css-nesting-1/
default:
if scan, _ := p.scanForEndOfRule(); scan == endOfRuleOpenBrace {
p.nestingIsPresent = true
foundNesting = true
rule := p.parseSelectorRule(false, parseSelectorOpts{
isDeclarationContext: true,
composesContext: opts.composesContext,
})
// If this rule was a single ":global" or ":local", inline it here. This
// is handled differently than a bare "&" with normal CSS nesting because
// that would be inlined at the end of the parent rule's body instead,
// which is probably unexpected (e.g. it would trip people up when trying
// to write rules in a specific order).
if sel, ok := rule.Data.(*css_ast.RSelector); ok && len(sel.Selectors) == 1 {
if first := sel.Selectors[0]; len(first.Selectors) == 1 {
if first := first.Selectors[0]; first.WasEmptyFromLocalOrGlobal && first.IsSingleAmpersand() {
list = append(list, sel.Rules...)
continue
}
}
}
list = append(list, rule)
} else {
list = append(list, p.parseDeclaration())
}
}
}
}
func (p *parser) mangleRules(rules []css_ast.Rule, isTopLevel bool) []css_ast.Rule {
// Remove empty rules
mangledRules := make([]css_ast.Rule, 0, len(rules))
var prevNonComment css_ast.R
next:
for _, rule := range rules {
nextNonComment := rule.Data
switch r := rule.Data.(type) {
case *css_ast.RAtKeyframes:
// Do not remove empty "@keyframe foo {}" rules. Even empty rules still
// dispatch JavaScript animation events, so removing them changes
// behavior: https://bugzilla.mozilla.org/show_bug.cgi?id=1004377.
case *css_ast.RAtLayer:
if len(r.Rules) == 0 && len(r.Names) > 0 {
// Do not remove empty "@layer foo {}" rules. The specification says:
// "Cascade layers are sorted by the order in which they first are
// declared, with nested layers grouped within their parent layers
// before any unlayered rules." So removing empty rules could change
// the order in which they are first declared, and is therefore invalid.
//
// We can turn "@layer foo {}" into "@layer foo;" to be shorter. But
// don't collapse anonymous "@layer {}" into "@layer;" because that is
// a syntax error.
r.Rules = nil
} else if len(r.Rules) == 1 && len(r.Names) == 1 {
// Only collapse layers if each layer has exactly one name
if r2, ok := r.Rules[0].Data.(*css_ast.RAtLayer); ok && len(r2.Names) == 1 {
// "@layer a { @layer b {} }" => "@layer a.b;"
// "@layer a { @layer b { c {} } }" => "@layer a.b { c {} }"
r.Names[0] = append(r.Names[0], r2.Names[0]...)
r.Rules = r2.Rules
}
}
case *css_ast.RKnownAt:
if len(r.Rules) == 0 && atKnownRuleCanBeRemovedIfEmpty[r.AtToken] {
continue
}
case *css_ast.RAtMedia:
if len(r.Rules) == 0 {
continue
}
// Unwrap "@media" rules that duplicate conditions from a parent "@media"
// rule. This is unlikely to be authored manually but can be automatically
// generated when using a CSS framework such as Tailwind.
//
// @media (min-width: 1024px) {
// .md\:class {
// color: red;
// }
// @media (min-width: 1024px) {
// .md\:class {
// color: red;
// }
// }
// }
//
// This converts that code into the following:
//
// @media (min-width: 1024px) {
// .md\:class {
// color: red;
// }
// .md\:class {
// color: red;
// }
// }
//
// Which can then be mangled further.
for _, queries := range p.enclosingAtMedia {
if css_ast.MediaQueriesEqual(r.Queries, queries, nil) {
mangledRules = append(mangledRules, r.Rules...)
continue next
}
}
case *css_ast.RSelector:
if len(r.Rules) == 0 {
continue
}
// Merge adjacent selectors with the same content
// "a { color: red; } b { color: red; }" => "a, b { color: red; }"
if prevNonComment != nil {
if r, ok := rule.Data.(*css_ast.RSelector); ok {
if prev, ok := prevNonComment.(*css_ast.RSelector); ok && css_ast.RulesEqual(r.Rules, prev.Rules, nil) &&
isSafeSelectors(r.Selectors) && isSafeSelectors(prev.Selectors) {
nextSelector:
for _, sel := range r.Selectors {
for _, prevSel := range prev.Selectors {
if sel.Equal(prevSel, nil) {
// Don't add duplicate selectors more than once
continue nextSelector
}
}
prev.Selectors = append(prev.Selectors, sel)
}
continue
}
}
}
case *css_ast.RComment:
nextNonComment = nil
}
if nextNonComment != nil {
prevNonComment = nextNonComment
}
mangledRules = append(mangledRules, rule)
}
// Mangle non-top-level rules using a back-to-front pass. Top-level rules
// will be mangled by the linker instead for cross-file rule mangling.
if !isTopLevel {
remover := MakeDeadRuleMangler(ast.SymbolMap{})
mangledRules = remover.RemoveDeadRulesInPlace(p.source.Index, mangledRules, p.importRecords)
}
return mangledRules
}
type ruleEntry struct {
data css_ast.R
callCounter uint32
}
type hashEntry struct {
rules []ruleEntry
}
type callEntry struct {
importRecords []ast.ImportRecord
sourceIndex uint32
}
type DeadRuleRemover struct {
entries map[uint32]hashEntry
calls []callEntry
check css_ast.CrossFileEqualityCheck
}
func MakeDeadRuleMangler(symbols ast.SymbolMap) DeadRuleRemover {
return DeadRuleRemover{
entries: make(map[uint32]hashEntry),
check: css_ast.CrossFileEqualityCheck{Symbols: symbols},
}
}
func (remover *DeadRuleRemover) RemoveDeadRulesInPlace(sourceIndex uint32, rules []css_ast.Rule, importRecords []ast.ImportRecord) []css_ast.Rule {
// The caller may call this function multiple times, each with a different
// set of import records. Remember each set of import records for equality
// checks later.
callCounter := uint32(len(remover.calls))
remover.calls = append(remover.calls, callEntry{importRecords, sourceIndex})
// Remove duplicate rules, scanning from the back so we keep the last
// duplicate. Note that the linker calls this, so we do not want to do
// anything that modifies the rules themselves. One reason is that ASTs
// are immutable at the linking stage. Another reason is that merging
// CSS ASTs from separate files will mess up source maps because a single
// AST cannot simultaneously represent offsets from multiple files.
n := len(rules)
start := n
skipRule:
for i := n - 1; i >= 0; i-- {
rule := rules[i]
// Remove rules with selectors that don't apply to anything (e.g. ":is()")
if r, ok := rule.Data.(*css_ast.RSelector); ok && allSelectorsAreDead(r.Selectors) {
continue skipRule
}
// For duplicate rules, omit all but the last copy
if hash, ok := rule.Data.Hash(); ok {
entry := remover.entries[hash]
for _, current := range entry.rules {
var check *css_ast.CrossFileEqualityCheck
// If this rule was from another file, then pass along both arrays
// of import records so that the equality check for "url()" tokens
// can use them to check for equality.
if current.callCounter != callCounter {
// Reuse the same memory allocation
check = &remover.check
call := remover.calls[current.callCounter]
check.ImportRecordsA = importRecords
check.ImportRecordsB = call.importRecords
check.SourceIndexA = sourceIndex
check.SourceIndexB = call.sourceIndex
}
if rule.Data.Equal(current.data, check) {
continue skipRule
}
}
entry.rules = append(entry.rules, ruleEntry{
data: rule.Data,
callCounter: callCounter,
})
remover.entries[hash] = entry
}
start--
rules[start] = rule
}
return rules[start:]
}
func containsDeadSelectors(selectors []css_ast.CompoundSelector) bool {
for _, sel := range selectors {
for _, ss := range sel.SubclassSelectors {
if pseudo, ok := ss.Data.(*css_ast.SSPseudoClassWithSelectorList); ok && len(pseudo.Selectors) == 0 &&
(pseudo.Kind == css_ast.PseudoClassIs || pseudo.Kind == css_ast.PseudoClassWhere) {
// ":is()" and ":where()" never match anything when empty
return true
}
}
}
return false
}
func allSelectorsAreDead(selectors []css_ast.ComplexSelector) bool {
for _, sel := range selectors {
if !containsDeadSelectors(sel.Selectors) {
return false
}
}
return true
}
// Reference: https://developer.mozilla.org/en-US/docs/Web/HTML/Element
var nonDeprecatedElementsSupportedByIE7 = map[string]bool{
"a": true,
"abbr": true,
"address": true,
"area": true,
"b": true,
"base": true,
"blockquote": true,
"body": true,
"br": true,
"button": true,
"caption": true,
"cite": true,
"code": true,
"col": true,
"colgroup": true,
"dd": true,
"del": true,
"dfn": true,
"div": true,
"dl": true,
"dt": true,
"em": true,
"embed": true,
"fieldset": true,
"form": true,
"h1": true,
"h2": true,
"h3": true,
"h4": true,
"h5": true,
"h6": true,
"head": true,
"hr": true,
"html": true,
"i": true,
"iframe": true,
"img": true,
"input": true,
"ins": true,
"kbd": true,
"label": true,
"legend": true,
"li": true,
"link": true,
"map": true,
"menu": true,
"meta": true,
"noscript": true,
"object": true,
"ol": true,
"optgroup": true,
"option": true,
"p": true,
"param": true,
"pre": true,
"q": true,
"ruby": true,
"s": true,
"samp": true,
"script": true,
"select": true,
"small": true,
"span": true,
"strong": true,
"style": true,
"sub": true,
"sup": true,
"table": true,
"tbody": true,
"td": true,
"textarea": true,
"tfoot": true,
"th": true,
"thead": true,
"title": true,
"tr": true,
"u": true,
"ul": true,
"var": true,
}
// This only returns true if all of these selectors are considered "safe" which
// means that they are very likely to work in any browser a user might reasonably
// be using. We do NOT want to merge adjacent qualified rules with the same body
// if any of the selectors are unsafe, since then browsers which don't support
// that particular feature would ignore the entire merged qualified rule:
//
// Input:
// a { color: red }
// b { color: red }
// input::-moz-placeholder { color: red }
//
// Valid output:
// a, b { color: red }
// input::-moz-placeholder { color: red }
//
// Invalid output:
// a, b, input::-moz-placeholder { color: red }
//
// This considers IE 7 and above to be a browser that a user could possibly use.
// Versions of IE less than 6 are not considered.
func isSafeSelectors(complexSelectors []css_ast.ComplexSelector) bool {
for _, complex := range complexSelectors {
for _, compound := range complex.Selectors {
if len(compound.NestingSelectorLocs) > 0 {
// Bail because this is an extension: https://drafts.csswg.org/css-nesting-1/
return false
}
if compound.Combinator.Byte != 0 {
// "Before Internet Explorer 10, the combinator only works in standards mode"
// Reference: https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors
return false
}
if compound.TypeSelector != nil {
if compound.TypeSelector.NamespacePrefix != nil {
// Bail if we hit a namespace, which doesn't work in IE before version 9
// Reference: https://developer.mozilla.org/en-US/docs/Web/CSS/Type_selectors
return false
}
if compound.TypeSelector.Name.Kind == css_lexer.TIdent && !nonDeprecatedElementsSupportedByIE7[compound.TypeSelector.Name.Text] {
// Bail if this element is either deprecated or not supported in IE 7
return false
}
}
for _, ss := range compound.SubclassSelectors {
switch s := ss.Data.(type) {
case *css_ast.SSAttribute:
if s.MatcherModifier != 0 {
// Bail if we hit a case modifier, which doesn't work in IE at all
// Reference: https://developer.mozilla.org/en-US/docs/Web/CSS/Attribute_selectors
return false
}
case *css_ast.SSPseudoClass:
// Bail if this pseudo class doesn't match a hard-coded list that's
// known to work everywhere. For example, ":focus" doesn't work in IE 7.
// Reference: https://developer.mozilla.org/en-US/docs/Web/CSS/Pseudo-classes
if s.Args == nil && !s.IsElement {
switch s.Name {
case "active", "first-child", "hover", "link", "visited":
continue
}
}
return false
case *css_ast.SSPseudoClassWithSelectorList:
// These definitely don't work in IE 7
return false
}
}
}
}
return true
}
func (p *parser) parseURLOrString() (string, logger.Range, bool) {
t := p.current()
switch t.Kind {
case css_lexer.TString:
text := p.decoded()
p.advance()
return text, t.Range, true
case css_lexer.TURL:
text := p.decoded()
p.advance()
return text, t.Range, true
case css_lexer.TFunction:
if strings.EqualFold(p.decoded(), "url") {
matchingLoc := logger.Loc{Start: p.current().Range.End() - 1}
i := p.index + 1
// Skip over whitespace
for p.at(i).Kind == css_lexer.TWhitespace {
i++
}
// Consume a string
if p.at(i).Kind == css_lexer.TString {
stringIndex := i
i++
// Skip over whitespace
for p.at(i).Kind == css_lexer.TWhitespace {
i++
}
// Consume a closing parenthesis
if close := p.at(i).Kind; close == css_lexer.TCloseParen || close == css_lexer.TEndOfFile {
t := p.at(stringIndex)
text := t.DecodedText(p.source.Contents)
p.index = i
p.expectWithMatchingLoc(css_lexer.TCloseParen, matchingLoc)
return text, t.Range, true
}
}
}
}
return "", logger.Range{}, false
}
func (p *parser) expectURLOrString() (url string, r logger.Range, ok bool) {
url, r, ok = p.parseURLOrString()
if !ok {
p.expect(css_lexer.TURL)
}
return
}
type atRuleKind uint8
const (
atRuleUnknown atRuleKind = iota
atRuleDeclarations
atRuleInheritContext
atRuleQualifiedOrEmpty
atRuleEmpty
)
var specialAtRules = map[string]atRuleKind{
"media": atRuleInheritContext,
"supports": atRuleInheritContext,
"font-face": atRuleDeclarations,
"page": atRuleDeclarations,
// These go inside "@page": https://www.w3.org/TR/css-page-3/#syntax-page-selector
"bottom-center": atRuleDeclarations,
"bottom-left-corner": atRuleDeclarations,
"bottom-left": atRuleDeclarations,
"bottom-right-corner": atRuleDeclarations,
"bottom-right": atRuleDeclarations,
"left-bottom": atRuleDeclarations,
"left-middle": atRuleDeclarations,
"left-top": atRuleDeclarations,
"right-bottom": atRuleDeclarations,
"right-middle": atRuleDeclarations,
"right-top": atRuleDeclarations,
"top-center": atRuleDeclarations,
"top-left-corner": atRuleDeclarations,
"top-left": atRuleDeclarations,
"top-right-corner": atRuleDeclarations,
"top-right": atRuleDeclarations,
// These properties are very deprecated and appear to only be useful for
// mobile versions of internet explorer (which may no longer exist?), but
// they are used by the https://ant.design/ design system so we recognize
// them to avoid the warning.
//
// Documentation: https://developer.mozilla.org/en-US/docs/Web/CSS/@viewport
// Discussion: https://github.com/w3c/csswg-drafts/issues/4766
//
"viewport": atRuleDeclarations,
"-ms-viewport": atRuleDeclarations,
// This feature has been removed from the web because it's actively harmful.
// However, there is one exception where "@-moz-document url-prefix() {" is
// accepted by Firefox to basically be an "if Firefox" conditional rule.
//
// Documentation: https://developer.mozilla.org/en-US/docs/Web/CSS/@document
// Discussion: https://bugzilla.mozilla.org/show_bug.cgi?id=1035091
//
"document": atRuleInheritContext,
"-moz-document": atRuleInheritContext,
// This is a new feature that changes how the CSS rule cascade works. It can
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_parser_media.go | internal/css_parser/css_parser_media.go | package css_parser
import (
"strings"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/logger"
)
// Reference: https://drafts.csswg.org/mediaqueries-4/
func (p *parser) parseMediaQueryListUntil(stop func(css_lexer.T) bool) []css_ast.MediaQuery {
var queries []css_ast.MediaQuery
p.eat(css_lexer.TWhitespace)
for !p.peek(css_lexer.TEndOfFile) && !stop(p.current().Kind) {
start := p.index
query, ok := p.parseMediaQuery()
if !ok {
// If parsing failed, parse an arbitrary sequence of tokens instead
p.index = start
loc := p.current().Range.Loc
for !p.peek(css_lexer.TEndOfFile) && !stop(p.current().Kind) && !p.peek(css_lexer.TComma) {
p.parseComponentValue()
}
tokens := p.convertTokens(p.tokens[start:p.index])
query = css_ast.MediaQuery{Loc: loc, Data: &css_ast.MQArbitraryTokens{Tokens: tokens}}
}
queries = append(queries, query)
p.eat(css_lexer.TWhitespace)
if !p.eat(css_lexer.TComma) {
break
}
p.eat(css_lexer.TWhitespace)
}
return queries
}
func (p *parser) parseMediaQuery() (css_ast.MediaQuery, bool) {
loc := p.current().Range.Loc
// Check for a media condition first
if p.looksLikeMediaCondition() {
return p.parseMediaCondition(mediaWithOr)
}
// Parse the media type and potentially the leading "not" or "only" keyword
mediaType := p.decoded()
if !p.peek(css_lexer.TIdent) {
p.expect(css_lexer.TIdent)
return css_ast.MediaQuery{}, false
}
op := css_ast.MQTypeOpNone
if strings.EqualFold(mediaType, "not") {
op = css_ast.MQTypeOpNot
} else if strings.EqualFold(mediaType, "only") {
op = css_ast.MQTypeOpOnly
}
if op != css_ast.MQTypeOpNone {
p.advance()
p.eat(css_lexer.TWhitespace)
mediaType = p.decoded()
if !p.peek(css_lexer.TIdent) {
p.expect(css_lexer.TIdent)
return css_ast.MediaQuery{}, false
}
}
// The <media-type> production does not include the keywords "only", "not", "and", "or", and "layer".
if strings.EqualFold(mediaType, "only") ||
strings.EqualFold(mediaType, "not") ||
strings.EqualFold(mediaType, "and") ||
strings.EqualFold(mediaType, "or") ||
strings.EqualFold(mediaType, "layer") {
p.unexpected()
return css_ast.MediaQuery{}, false
}
p.advance()
p.eat(css_lexer.TWhitespace)
// Potentially parse a chain of "and" operators
var andOrNull css_ast.MediaQuery
if p.peek(css_lexer.TIdent) && strings.EqualFold(p.decoded(), "and") {
p.advance()
p.eat(css_lexer.TWhitespace)
var ok bool
andOrNull, ok = p.parseMediaCondition(mediaWithoutOr)
if !ok {
return css_ast.MediaQuery{}, false
}
}
return css_ast.MediaQuery{Loc: loc, Data: &css_ast.MQType{Op: op, Type: mediaType, AndOrNull: andOrNull}}, true
}
func (p *parser) looksLikeMediaCondition() bool {
kind := p.current().Kind
return kind == css_lexer.TOpenParen || kind == css_lexer.TFunction ||
(kind == css_lexer.TIdent && strings.EqualFold(p.decoded(), "not") &&
p.next().Kind == css_lexer.TWhitespace &&
p.at(p.index+2).Kind == css_lexer.TOpenParen)
}
type mediaOr uint8
const (
mediaWithOr mediaOr = iota
mediaWithoutOr
)
func (p *parser) parseMediaCondition(or mediaOr) (css_ast.MediaQuery, bool) {
loc := p.current().Range.Loc
// Handle a leading "not"
if p.peek(css_lexer.TIdent) && strings.EqualFold(p.decoded(), "not") {
p.advance()
p.eat(css_lexer.TWhitespace)
if inner, ok := p.parseMediaInParens(); !ok {
return css_ast.MediaQuery{}, false
} else {
return p.maybeSimplifyMediaNot(loc, inner), true
}
}
// Parse the first term
first, ok := p.parseMediaInParens()
if !ok {
return css_ast.MediaQuery{}, false
}
p.eat(css_lexer.TWhitespace)
// Potentially parse a chain of "and" or "or" operators
if p.peek(css_lexer.TIdent) {
if keyword := p.decoded(); strings.EqualFold(keyword, "and") || (or == mediaWithOr && strings.EqualFold(keyword, "or")) {
op := css_ast.MQBinaryOpAnd
if len(keyword) == 2 {
op = css_ast.MQBinaryOpOr
}
inner := p.appendMediaTerm([]css_ast.MediaQuery{}, first, op)
for {
p.advance()
p.eat(css_lexer.TWhitespace)
next, ok := p.parseMediaInParens()
if !ok {
return css_ast.MediaQuery{}, false
}
inner = p.appendMediaTerm(inner, next, op)
p.eat(css_lexer.TWhitespace)
if !p.peek(css_lexer.TIdent) || !strings.EqualFold(p.decoded(), keyword) {
break
}
}
return css_ast.MediaQuery{Loc: loc, Data: &css_ast.MQBinary{Op: op, Terms: inner}}, true
}
}
return first, true
}
func (p *parser) appendMediaTerm(inner []css_ast.MediaQuery, term css_ast.MediaQuery, op css_ast.MQBinaryOp) []css_ast.MediaQuery {
// "(a and b) and c" => "a and b and c"
// "(a or b) or c" => "a or b or c"
if binary, ok := term.Data.(*css_ast.MQBinary); ok && binary.Op == op && p.options.minifySyntax {
return append(inner, binary.Terms...)
} else {
return append(inner, term)
}
}
func (p *parser) parseMediaInParens() (css_ast.MediaQuery, bool) {
p.eat(css_lexer.TWhitespace)
start := p.index
// Consume the opening token
isFunction := p.eat(css_lexer.TFunction)
if !isFunction && !p.expect(css_lexer.TOpenParen) {
return css_ast.MediaQuery{}, false
}
p.eat(css_lexer.TWhitespace)
// Handle a media condition
if !isFunction && p.looksLikeMediaCondition() {
if inner, ok := p.parseMediaCondition(mediaWithOr); !ok {
return css_ast.MediaQuery{}, false
} else {
p.eat(css_lexer.TWhitespace)
if !p.expect(css_lexer.TCloseParen) {
return css_ast.MediaQuery{}, false
}
return inner, ok
}
}
// Scan over the remaining tokens
for !p.peek(css_lexer.TCloseParen) && !p.peek(css_lexer.TEndOfFile) {
p.parseComponentValue()
}
end := p.index
if !p.expect(css_lexer.TCloseParen) {
return css_ast.MediaQuery{}, false
}
tokens := p.convertTokens(p.tokens[start:end])
loc := tokens[0].Loc
// Potentially pattern-match the tokens inside the parentheses
if !isFunction && len(tokens) == 1 {
if children := tokens[0].Children; children != nil {
if term, ok := parsePlainOrBooleanMediaFeature(*children); ok {
return css_ast.MediaQuery{Loc: loc, Data: term}, true
}
if term, ok := parseRangeMediaFeature(*children); ok {
if p.options.unsupportedCSSFeatures.Has(compat.MediaRange) {
var terms []css_ast.MediaQuery
if term.BeforeCmp != css_ast.MQCmpNone {
terms = append(terms, lowerMediaRange(term.NameLoc, term.Name, term.BeforeCmp.Reverse(), term.Before))
}
if term.AfterCmp != css_ast.MQCmpNone {
terms = append(terms, lowerMediaRange(term.NameLoc, term.Name, term.AfterCmp, term.After))
}
if len(terms) == 1 {
return terms[0], true
} else {
return css_ast.MediaQuery{Loc: loc, Data: &css_ast.MQBinary{Op: css_ast.MQBinaryOpAnd, Terms: terms}}, true
}
}
return css_ast.MediaQuery{Loc: loc, Data: term}, true
}
}
}
return css_ast.MediaQuery{Loc: loc, Data: &css_ast.MQArbitraryTokens{Tokens: tokens}}, true
}
func lowerMediaRange(loc logger.Loc, name string, cmp css_ast.MQCmp, value []css_ast.Token) css_ast.MediaQuery {
switch cmp {
case css_ast.MQCmpLe:
// "foo <= 123" => "max-foo: 123"
return css_ast.MediaQuery{Loc: loc, Data: &css_ast.MQPlainOrBoolean{Name: "max-" + name, ValueOrNil: value}}
case css_ast.MQCmpGe:
// "foo >= 123" => "min-foo: 123"
return css_ast.MediaQuery{Loc: loc, Data: &css_ast.MQPlainOrBoolean{Name: "min-" + name, ValueOrNil: value}}
case css_ast.MQCmpLt:
// "foo < 123" => "not (min-foo: 123)"
return css_ast.MediaQuery{Loc: loc, Data: &css_ast.MQNot{
Inner: css_ast.MediaQuery{Loc: loc, Data: &css_ast.MQPlainOrBoolean{Name: "min-" + name, ValueOrNil: value}},
}}
case css_ast.MQCmpGt:
// "foo > 123" => "not (max-foo: 123)"
return css_ast.MediaQuery{Loc: loc, Data: &css_ast.MQNot{
Inner: css_ast.MediaQuery{Loc: loc, Data: &css_ast.MQPlainOrBoolean{Name: "max-" + name, ValueOrNil: value}},
}}
default:
// "foo = 123" => "foo: 123"
return css_ast.MediaQuery{Loc: loc, Data: &css_ast.MQPlainOrBoolean{Name: name, ValueOrNil: value}}
}
}
func parsePlainOrBooleanMediaFeature(tokens []css_ast.Token) (*css_ast.MQPlainOrBoolean, bool) {
if len(tokens) == 1 && tokens[0].Kind == css_lexer.TIdent {
return &css_ast.MQPlainOrBoolean{Name: tokens[0].Text}, true
}
if len(tokens) >= 3 && tokens[0].Kind == css_lexer.TIdent && tokens[1].Kind == css_lexer.TColon {
if value, rest := scanMediaValue(tokens[2:]); len(rest) == 0 {
return &css_ast.MQPlainOrBoolean{Name: tokens[0].Text, ValueOrNil: value}, true
}
}
return nil, false
}
func parseRangeMediaFeature(tokens []css_ast.Token) (*css_ast.MQRange, bool) {
if first, tokens := scanMediaValue(tokens); len(first) > 0 {
if firstCmp, tokens := scanMediaComparison(tokens); firstCmp != css_ast.MQCmpNone {
if second, tokens := scanMediaValue(tokens); len(second) > 0 {
if len(tokens) == 0 {
if name, nameLoc, ok := isSingleIdent(first); ok {
return &css_ast.MQRange{
Name: name,
NameLoc: nameLoc,
AfterCmp: firstCmp,
After: second,
}, true
} else if name, nameLoc, ok := isSingleIdent(second); ok {
return &css_ast.MQRange{
Before: first,
BeforeCmp: firstCmp,
Name: name,
NameLoc: nameLoc,
}, true
}
} else if name, nameLoc, ok := isSingleIdent(second); ok {
if secondCmp, tokens := scanMediaComparison(tokens); secondCmp != css_ast.MQCmpNone {
if f, s := firstCmp.Dir(), secondCmp.Dir(); (f < 0 && s < 0) || (f > 0 && s > 0) {
if third, tokens := scanMediaValue(tokens); len(third) > 0 && len(tokens) == 0 {
return &css_ast.MQRange{
Before: first,
BeforeCmp: firstCmp,
Name: name,
NameLoc: nameLoc,
AfterCmp: secondCmp,
After: third,
}, true
}
}
}
}
}
}
}
return nil, false
}
func (p *parser) maybeSimplifyMediaNot(loc logger.Loc, inner css_ast.MediaQuery) css_ast.MediaQuery {
if p.options.minifySyntax {
switch data := inner.Data.(type) {
case *css_ast.MQNot:
// "not (not a)" => "a"
// "not (not (not a))" => "not a"
return data.Inner
case *css_ast.MQBinary:
// "not ((not a) and (not b))" => "a or b"
// "not ((not a) or (not b))" => "a and b"
terms := make([]css_ast.MediaQuery, 0, len(data.Terms))
for _, term := range data.Terms {
if not, ok := term.Data.(*css_ast.MQNot); ok {
terms = append(terms, not.Inner)
} else {
break
}
}
if len(terms) == len(data.Terms) {
data.Op ^= 1
data.Terms = terms
return inner
}
case *css_ast.MQRange:
if (data.BeforeCmp == css_ast.MQCmpNone && data.AfterCmp != css_ast.MQCmpEq) ||
(data.AfterCmp == css_ast.MQCmpNone && data.BeforeCmp != css_ast.MQCmpEq) {
data.BeforeCmp = data.BeforeCmp.Flip()
data.AfterCmp = data.AfterCmp.Flip()
return inner
}
}
}
return css_ast.MediaQuery{Loc: loc, Data: &css_ast.MQNot{Inner: inner}}
}
func isSingleIdent(tokens []css_ast.Token) (string, logger.Loc, bool) {
if len(tokens) == 1 && tokens[0].Kind == css_lexer.TIdent {
return tokens[0].Text, tokens[0].Loc, true
} else {
return "", logger.Loc{}, false
}
}
func scanMediaComparison(tokens []css_ast.Token) (css_ast.MQCmp, []css_ast.Token) {
if len(tokens) >= 1 {
switch tokens[0].Kind {
case css_lexer.TDelimEquals:
return css_ast.MQCmpEq, tokens[1:]
case css_lexer.TDelimLessThan:
// Handle "<=" or "<"
if len(tokens) >= 2 && tokens[1].Kind == css_lexer.TDelimEquals &&
((tokens[0].Whitespace&css_ast.WhitespaceAfter)|(tokens[1].Whitespace&css_ast.WhitespaceBefore)) == 0 {
return css_ast.MQCmpLe, tokens[2:]
}
return css_ast.MQCmpLt, tokens[1:]
case css_lexer.TDelimGreaterThan:
// Handle ">=" or ">"
if len(tokens) >= 2 && tokens[1].Kind == css_lexer.TDelimEquals &&
((tokens[0].Whitespace&css_ast.WhitespaceAfter)|(tokens[1].Whitespace&css_ast.WhitespaceBefore)) == 0 {
return css_ast.MQCmpGe, tokens[2:]
}
return css_ast.MQCmpGt, tokens[1:]
}
}
return css_ast.MQCmpNone, tokens
}
func scanMediaValue(tokens []css_ast.Token) ([]css_ast.Token, []css_ast.Token) {
n := 0
if len(tokens) >= 1 {
switch tokens[0].Kind {
case css_lexer.TDimension, css_lexer.TIdent:
n = 1
case css_lexer.TNumber:
// Potentially recognize a ratio which is "<number> / <number>"
if len(tokens) >= 3 && tokens[1].Kind == css_lexer.TDelimSlash && tokens[2].Kind == css_lexer.TNumber {
n = 3
} else {
n = 1
}
}
}
// Trim whitespace at the endpoints
if n > 0 {
tokens[0].Whitespace &= ^css_ast.WhitespaceBefore
tokens[n-1].Whitespace &= ^css_ast.WhitespaceAfter
}
return tokens[:n], tokens[n:]
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls_transform.go | internal/css_parser/css_decls_transform.go | package css_parser
import (
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
func turnPercentIntoNumberIfShorter(t *css_ast.Token) {
if t.Kind == css_lexer.TPercentage {
if shifted, ok := shiftDot(t.PercentageValue(), -2); ok && len(shifted) < len(t.Text) {
t.Kind = css_lexer.TNumber
t.Text = shifted
}
}
}
// https://www.w3.org/TR/css-transforms-1/#two-d-transform-functions
// https://drafts.csswg.org/css-transforms-2/#transform-functions
func (p *parser) mangleTransforms(tokens []css_ast.Token) []css_ast.Token {
for i := range tokens {
if token := &tokens[i]; token.Kind == css_lexer.TFunction {
if args := *token.Children; css_ast.TokensAreCommaSeparated(args) {
n := len(args)
switch strings.ToLower(token.Text) {
////////////////////////////////////////////////////////////////////////////////
// 2D transforms
case "matrix":
// specifies a 2D transformation in the form of a transformation
// matrix of the six values a, b, c, d, e, f.
if n == 11 {
// | a c 0 e |
// | b d 0 f |
// | 0 0 1 0 |
// | 0 0 0 1 |
a, b, c, d, e, f := args[0], args[2], args[4], args[6], args[8], args[10]
if b.IsZero() && c.IsZero() && e.IsZero() && f.IsZero() {
// | a 0 0 0 |
// | 0 d 0 0 |
// | 0 0 1 0 |
// | 0 0 0 1 |
if a.EqualIgnoringWhitespace(d) {
// "matrix(a, 0, 0, a, 0, 0)" => "scale(a)"
token.Text = "scale"
*token.Children = args[:1]
} else if d.IsOne() {
// "matrix(a, 0, 0, 1, 0, 0)" => "scaleX(a)"
token.Text = "scaleX"
*token.Children = args[:1]
} else if a.IsOne() {
// "matrix(1, 0, 0, d, 0, 0)" => "scaleY(d)"
token.Text = "scaleY"
*token.Children = args[6:7]
} else {
// "matrix(a, 0, 0, d, 0, 0)" => "scale(a, d)"
token.Text = "scale"
*token.Children = append(args[:2], d)
}
// Note: A "matrix" cannot be directly converted into a "translate"
// because "translate" requires units while "matrix" requires no
// units. I'm not sure exactly what the semantics are so I'm not
// sure if you can just add "px" or not. Even if that did work,
// you still couldn't substitute values containing "var()" since
// units would still not be substituted in that case.
}
}
case "translate":
// specifies a 2D translation by the vector [tx, ty], where tx is the
// first translation-value parameter and ty is the optional second
// translation-value parameter. If <ty> is not provided, ty has zero
// as a value.
if n == 1 {
args[0].TurnLengthOrPercentageIntoNumberIfZero()
} else if n == 3 {
tx, ty := &args[0], &args[2]
tx.TurnLengthOrPercentageIntoNumberIfZero()
ty.TurnLengthOrPercentageIntoNumberIfZero()
if ty.IsZero() {
// "translate(tx, 0)" => "translate(tx)"
*token.Children = args[:1]
} else if tx.IsZero() {
// "translate(0, ty)" => "translateY(ty)"
token.Text = "translateY"
*token.Children = args[2:]
}
}
case "translatex":
// specifies a translation by the given amount in the X direction.
if n == 1 {
// "translateX(tx)" => "translate(tx)"
token.Text = "translate"
args[0].TurnLengthOrPercentageIntoNumberIfZero()
}
case "translatey":
// specifies a translation by the given amount in the Y direction.
if n == 1 {
args[0].TurnLengthOrPercentageIntoNumberIfZero()
}
case "scale":
// specifies a 2D scale operation by the [sx,sy] scaling vector
// described by the 2 parameters. If the second parameter is not
// provided, it takes a value equal to the first. For example,
// scale(1, 1) would leave an element unchanged, while scale(2, 2)
// would cause it to appear twice as long in both the X and Y axes,
// or four times its typical geometric size.
if n == 1 {
turnPercentIntoNumberIfShorter(&args[0])
} else if n == 3 {
sx, sy := &args[0], &args[2]
turnPercentIntoNumberIfShorter(sx)
turnPercentIntoNumberIfShorter(sy)
if sx.EqualIgnoringWhitespace(*sy) {
// "scale(s, s)" => "scale(s)"
*token.Children = args[:1]
} else if sy.IsOne() {
// "scale(s, 1)" => "scaleX(s)"
token.Text = "scaleX"
*token.Children = args[:1]
} else if sx.IsOne() {
// "scale(1, s)" => "scaleY(s)"
token.Text = "scaleY"
*token.Children = args[2:]
}
}
case "scalex":
// specifies a 2D scale operation using the [sx,1] scaling vector,
// where sx is given as the parameter.
if n == 1 {
turnPercentIntoNumberIfShorter(&args[0])
}
case "scaley":
// specifies a 2D scale operation using the [1,sy] scaling vector,
// where sy is given as the parameter.
if n == 1 {
turnPercentIntoNumberIfShorter(&args[0])
}
case "rotate":
// specifies a 2D rotation by the angle specified in the parameter
// about the origin of the element, as defined by the
// transform-origin property. For example, rotate(90deg) would
// cause elements to appear rotated one-quarter of a turn in the
// clockwise direction.
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
}
// Note: This is considered a 2D transform even though it's specified
// in terms of a 3D transform because it doesn't trigger Safari's 3D
// transform bugs.
case "rotatez":
// same as rotate3d(0, 0, 1, <angle>), which is a 3d transform
// equivalent to the 2d transform rotate(<angle>).
if n == 1 {
// "rotateZ(angle)" => "rotate(angle)"
token.Text = "rotate"
args[0].TurnLengthIntoNumberIfZero()
}
case "skew":
// specifies a 2D skew by [ax,ay] for X and Y. If the second
// parameter is not provided, it has a zero value.
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
} else if n == 3 {
ax, ay := &args[0], &args[2]
ax.TurnLengthIntoNumberIfZero()
ay.TurnLengthIntoNumberIfZero()
if ay.IsZero() {
// "skew(ax, 0)" => "skew(ax)"
*token.Children = args[:1]
}
}
case "skewx":
// specifies a 2D skew transformation along the X axis by the given
// angle.
if n == 1 {
// "skewX(ax)" => "skew(ax)"
token.Text = "skew"
args[0].TurnLengthIntoNumberIfZero()
}
case "skewy":
// specifies a 2D skew transformation along the Y axis by the given
// angle.
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
}
////////////////////////////////////////////////////////////////////////////////
// 3D transforms
// Note: Safari has a bug where 3D transforms render differently than
// other transforms. This means we should not minify a 3D transform
// into a 2D transform or it will cause a rendering difference in
// Safari.
case "matrix3d":
// specifies a 3D transformation as a 4x4 homogeneous matrix of 16
// values in column-major order.
if n == 31 {
// | m0 m4 m8 m12 |
// | m1 m5 m9 m13 |
// | m2 m6 m10 m14 |
// | m3 m7 m11 m15 |
mask := uint32(0)
for i := 0; i < 16; i++ {
if arg := args[i*2]; arg.IsZero() {
mask |= 1 << i
} else if arg.IsOne() {
mask |= (1 << 16) << i
}
}
const onlyScale = 0b1000_0000_0000_0000_0111_1011_1101_1110
if (mask & onlyScale) == onlyScale {
// | m0 0 0 0 |
// | 0 m5 0 0 |
// | 0 0 m10 0 |
// | 0 0 0 1 |
sx, sy := args[0], args[10]
if sx.IsOne() && sy.IsOne() {
token.Text = "scaleZ"
*token.Children = args[20:21]
} else {
token.Text = "scale3d"
*token.Children = append(append(args[0:2], args[10:12]...), args[20])
}
}
// Note: A "matrix3d" cannot be directly converted into a "translate3d"
// because "translate3d" requires units while "matrix3d" requires no
// units. I'm not sure exactly what the semantics are so I'm not
// sure if you can just add "px" or not. Even if that did work,
// you still couldn't substitute values containing "var()" since
// units would still not be substituted in that case.
}
case "translate3d":
// specifies a 3D translation by the vector [tx,ty,tz], with tx,
// ty and tz being the first, second and third translation-value
// parameters respectively.
if n == 5 {
tx, ty, tz := &args[0], &args[2], &args[4]
tx.TurnLengthOrPercentageIntoNumberIfZero()
ty.TurnLengthOrPercentageIntoNumberIfZero()
tz.TurnLengthIntoNumberIfZero()
if tx.IsZero() && ty.IsZero() {
// "translate3d(0, 0, tz)" => "translateZ(tz)"
token.Text = "translateZ"
*token.Children = args[4:]
}
}
case "translatez":
// specifies a 3D translation by the vector [0,0,tz] with the given
// amount in the Z direction.
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
}
case "scale3d":
// specifies a 3D scale operation by the [sx,sy,sz] scaling vector
// described by the 3 parameters.
if n == 5 {
sx, sy, sz := &args[0], &args[2], &args[4]
turnPercentIntoNumberIfShorter(sx)
turnPercentIntoNumberIfShorter(sy)
turnPercentIntoNumberIfShorter(sz)
if sx.IsOne() && sy.IsOne() {
// "scale3d(1, 1, sz)" => "scaleZ(sz)"
token.Text = "scaleZ"
*token.Children = args[4:]
}
}
case "scalez":
// specifies a 3D scale operation using the [1,1,sz] scaling vector,
// where sz is given as the parameter.
if n == 1 {
turnPercentIntoNumberIfShorter(&args[0])
}
case "rotate3d":
// specifies a 3D rotation by the angle specified in last parameter
// about the [x,y,z] direction vector described by the first three
// parameters. A direction vector that cannot be normalized, such as
// [0,0,0], will cause the rotation to not be applied.
if n == 7 {
x, y, z, angle := &args[0], &args[2], &args[4], &args[6]
angle.TurnLengthIntoNumberIfZero()
if x.IsOne() && y.IsZero() && z.IsZero() {
// "rotate3d(1, 0, 0, angle)" => "rotateX(angle)"
token.Text = "rotateX"
*token.Children = args[6:]
} else if x.IsZero() && y.IsOne() && z.IsZero() {
// "rotate3d(0, 1, 0, angle)" => "rotateY(angle)"
token.Text = "rotateY"
*token.Children = args[6:]
}
}
case "rotatex":
// same as rotate3d(1, 0, 0, <angle>).
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
}
case "rotatey":
// same as rotate3d(0, 1, 0, <angle>).
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
}
case "perspective":
// specifies a perspective projection matrix. This matrix scales
// points in X and Y based on their Z value, scaling points with
// positive Z values away from the origin, and those with negative Z
// values towards the origin. Points on the z=0 plane are unchanged.
// The parameter represents the distance of the z=0 plane from the
// viewer.
if n == 1 {
args[0].TurnLengthIntoNumberIfZero()
}
}
// Trim whitespace at the ends
if args := *token.Children; len(args) > 0 {
args[0].Whitespace &= ^css_ast.WhitespaceBefore
args[len(args)-1].Whitespace &= ^css_ast.WhitespaceAfter
}
}
}
}
return tokens
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls_list_style.go | internal/css_parser/css_decls_list_style.go | package css_parser
import (
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
// list-style-image: <image> | none
// <image>: <url> | <gradient>
// <url>: <url()> | <src()>
// <gradient>: <linear-gradient()> | <repeating-linear-gradient()> | <radial-gradient()> | <repeating-radial-gradient()>
//
// list-style-type: <counter-style> | <string> | none (where the string is a literal bullet marker)
// <counter-style>: <counter-style-name> | <symbols()>
// <counter-style-name>: not: decimal | disc | square | circle | disclosure-open | disclosure-closed | <wide keyword>
// when parsing a <custom-ident> with conflicts, only parse one if no other thing can claim it
func (p *parser) processListStyleShorthand(tokens []css_ast.Token) {
if len(tokens) < 1 || len(tokens) > 3 {
return
}
foundImage := false
foundPosition := false
typeIndex := -1
noneCount := 0
for i, t := range tokens {
switch t.Kind {
case css_lexer.TString:
// "list-style-type" is definitely not a <custom-ident>
return
case css_lexer.TURL:
if !foundImage {
foundImage = true
continue
}
case css_lexer.TFunction:
if !foundImage {
switch strings.ToLower(t.Text) {
case "src", "linear-gradient", "repeating-linear-gradient", "radial-gradient", "radial-linear-gradient":
foundImage = true
continue
}
}
case css_lexer.TIdent:
lower := strings.ToLower(t.Text)
// Note: If "none" is present, it's ambiguous whether it applies to
// "list-style-image" or "list-style-type". To resolve ambiguity it's
// applied at the end to whichever property isn't otherwise set.
if lower == "none" {
noneCount++
continue
}
if !foundPosition && (lower == "inside" || lower == "outside") {
foundPosition = true
continue
}
if typeIndex == -1 {
if cssWideAndReservedKeywords[lower] || predefinedCounterStyles[lower] {
// "list-style-type" is definitely not a <custom-ident>
return
}
typeIndex = i
continue
}
}
// Bail if we hit an unexpected token
return
}
if typeIndex != -1 {
// The first "none" applies to "list-style-image" if it's missing
if !foundImage && noneCount > 0 {
noneCount--
}
if noneCount > 0 {
// "list-style-type" is "none", not a <custom-ident>
return
}
if t := &tokens[typeIndex]; t.Kind == css_lexer.TIdent {
t.Kind = css_lexer.TSymbol
t.PayloadIndex = p.symbolForName(t.Loc, t.Text).Ref.InnerIndex
}
}
}
func (p *parser) processListStyleType(t *css_ast.Token) {
if t.Kind == css_lexer.TIdent {
if lower := strings.ToLower(t.Text); lower != "none" && !cssWideAndReservedKeywords[lower] && !predefinedCounterStyles[lower] {
t.Kind = css_lexer.TSymbol
t.PayloadIndex = p.symbolForName(t.Loc, t.Text).Ref.InnerIndex
}
}
}
// https://drafts.csswg.org/css-counter-styles-3/#predefined-counters
var predefinedCounterStyles = map[string]bool{
// 6.1. Numeric:
"arabic-indic": true,
"armenian": true,
"bengali": true,
"cambodian": true,
"cjk-decimal": true,
"decimal-leading-zero": true,
"decimal": true,
"devanagari": true,
"georgian": true,
"gujarati": true,
"gurmukhi": true,
"hebrew": true,
"kannada": true,
"khmer": true,
"lao": true,
"lower-armenian": true,
"lower-roman": true,
"malayalam": true,
"mongolian": true,
"myanmar": true,
"oriya": true,
"persian": true,
"tamil": true,
"telugu": true,
"thai": true,
"tibetan": true,
"upper-armenian": true,
"upper-roman": true,
// 6.2. Alphabetic:
"hiragana-iroha": true,
"hiragana": true,
"katakana-iroha": true,
"katakana": true,
"lower-alpha": true,
"lower-greek": true,
"lower-latin": true,
"upper-alpha": true,
"upper-latin": true,
// 6.3. Symbolic:
"circle": true,
"disc": true,
"disclosure-closed": true,
"disclosure-open": true,
"square": true,
// 6.4. Fixed:
"cjk-earthly-branch": true,
"cjk-heavenly-stem": true,
// 7.1.1. Japanese:
"japanese-formal": true,
"japanese-informal": true,
// 7.1.2. Korean:
"korean-hangul-formal": true,
"korean-hanja-formal": true,
"korean-hanja-informal": true,
// 7.1.3. Chinese:
"simp-chinese-formal": true,
"simp-chinese-informal": true,
"trad-chinese-formal": true,
"trad-chinese-informal": true,
// 7.2. Ethiopic Numeric Counter Style:
"ethiopic-numeric": true,
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls_border_radius.go | internal/css_parser/css_decls_border_radius.go | package css_parser
import (
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/logger"
)
const (
borderRadiusTopLeft = iota
borderRadiusTopRight
borderRadiusBottomRight
borderRadiusBottomLeft
)
type borderRadiusCorner struct {
firstToken css_ast.Token
secondToken css_ast.Token
unitSafety unitSafetyTracker
ruleIndex uint32 // The index of the originating rule in the rules array
wasSingleRule bool // True if the originating rule was just for this side
}
type borderRadiusTracker struct {
corners [4]borderRadiusCorner
important bool // True if all active rules were flagged as "!important"
}
func (borderRadius *borderRadiusTracker) updateCorner(rules []css_ast.Rule, corner int, new borderRadiusCorner) {
if old := borderRadius.corners[corner]; old.firstToken.Kind != css_lexer.TEndOfFile &&
(!new.wasSingleRule || old.wasSingleRule) &&
old.unitSafety.status == unitSafe && new.unitSafety.status == unitSafe {
rules[old.ruleIndex] = css_ast.Rule{}
}
borderRadius.corners[corner] = new
}
func (borderRadius *borderRadiusTracker) mangleCorners(rules []css_ast.Rule, decl *css_ast.RDeclaration, minifyWhitespace bool) {
// Reset if we see a change in the "!important" flag
if borderRadius.important != decl.Important {
borderRadius.corners = [4]borderRadiusCorner{}
borderRadius.important = decl.Important
}
tokens := decl.Value
beforeSplit := len(tokens)
afterSplit := len(tokens)
// Search for the single slash if present
for i, t := range tokens {
if t.Kind == css_lexer.TDelimSlash {
if beforeSplit == len(tokens) {
beforeSplit = i
afterSplit = i + 1
} else {
// Multiple slashes are an error
borderRadius.corners = [4]borderRadiusCorner{}
return
}
}
}
// Use a single tracker for the whole rule
unitSafety := unitSafetyTracker{}
for _, t := range tokens[:beforeSplit] {
unitSafety.includeUnitOf(t)
}
for _, t := range tokens[afterSplit:] {
unitSafety.includeUnitOf(t)
}
firstRadii, firstRadiiOk := expandTokenQuad(tokens[:beforeSplit], "")
lastRadii, lastRadiiOk := expandTokenQuad(tokens[afterSplit:], "")
// Stop now if the pattern wasn't matched
if !firstRadiiOk || (beforeSplit < afterSplit && !lastRadiiOk) {
borderRadius.corners = [4]borderRadiusCorner{}
return
}
// Handle the first radii
for corner, t := range firstRadii {
if unitSafety.status == unitSafe {
t.TurnLengthIntoNumberIfZero()
}
borderRadius.updateCorner(rules, corner, borderRadiusCorner{
firstToken: t,
secondToken: t,
unitSafety: unitSafety,
ruleIndex: uint32(len(rules) - 1),
})
}
// Handle the last radii
if lastRadiiOk {
for corner, t := range lastRadii {
if unitSafety.status == unitSafe {
t.TurnLengthIntoNumberIfZero()
}
borderRadius.corners[corner].secondToken = t
}
}
// Success
borderRadius.compactRules(rules, decl.KeyRange, minifyWhitespace)
}
func (borderRadius *borderRadiusTracker) mangleCorner(rules []css_ast.Rule, decl *css_ast.RDeclaration, minifyWhitespace bool, corner int) {
// Reset if we see a change in the "!important" flag
if borderRadius.important != decl.Important {
borderRadius.corners = [4]borderRadiusCorner{}
borderRadius.important = decl.Important
}
if tokens := decl.Value; (len(tokens) == 1 && tokens[0].Kind.IsNumeric()) ||
(len(tokens) == 2 && tokens[0].Kind.IsNumeric() && tokens[1].Kind.IsNumeric()) {
firstToken := tokens[0]
secondToken := firstToken
if len(tokens) == 2 {
secondToken = tokens[1]
}
// Check to see if these units are safe to use in every browser
unitSafety := unitSafetyTracker{}
unitSafety.includeUnitOf(firstToken)
unitSafety.includeUnitOf(secondToken)
// Only collapse "0unit" into "0" if the unit is safe
if unitSafety.status == unitSafe && firstToken.TurnLengthIntoNumberIfZero() {
tokens[0] = firstToken
}
if len(tokens) == 2 {
if unitSafety.status == unitSafe && secondToken.TurnLengthIntoNumberIfZero() {
tokens[1] = secondToken
}
// If both tokens are equal, merge them into one
if firstToken.EqualIgnoringWhitespace(secondToken) {
tokens[0].Whitespace &= ^css_ast.WhitespaceAfter
decl.Value = tokens[:1]
}
}
borderRadius.updateCorner(rules, corner, borderRadiusCorner{
firstToken: firstToken,
secondToken: secondToken,
unitSafety: unitSafety,
ruleIndex: uint32(len(rules) - 1),
wasSingleRule: true,
})
borderRadius.compactRules(rules, decl.KeyRange, minifyWhitespace)
} else {
borderRadius.corners = [4]borderRadiusCorner{}
}
}
func (borderRadius *borderRadiusTracker) compactRules(rules []css_ast.Rule, keyRange logger.Range, minifyWhitespace bool) {
// All tokens must be present
if eof := css_lexer.TEndOfFile; borderRadius.corners[0].firstToken.Kind == eof || borderRadius.corners[1].firstToken.Kind == eof ||
borderRadius.corners[2].firstToken.Kind == eof || borderRadius.corners[3].firstToken.Kind == eof {
return
}
// All tokens must have the same unit
for _, side := range borderRadius.corners[1:] {
if !side.unitSafety.isSafeWith(borderRadius.corners[0].unitSafety) {
return
}
}
// Generate the most minimal representation
tokens := compactTokenQuad(
borderRadius.corners[0].firstToken,
borderRadius.corners[1].firstToken,
borderRadius.corners[2].firstToken,
borderRadius.corners[3].firstToken,
minifyWhitespace,
)
secondTokens := compactTokenQuad(
borderRadius.corners[0].secondToken,
borderRadius.corners[1].secondToken,
borderRadius.corners[2].secondToken,
borderRadius.corners[3].secondToken,
minifyWhitespace,
)
if !css_ast.TokensEqualIgnoringWhitespace(tokens, secondTokens) {
var whitespace css_ast.WhitespaceFlags
if !minifyWhitespace {
whitespace = css_ast.WhitespaceBefore | css_ast.WhitespaceAfter
}
tokens = append(tokens, css_ast.Token{
Loc: tokens[len(tokens)-1].Loc,
Kind: css_lexer.TDelimSlash,
Text: "/",
Whitespace: whitespace,
})
tokens = append(tokens, secondTokens...)
}
// Remove all of the existing declarations
var minLoc logger.Loc
for i, corner := range borderRadius.corners {
if loc := rules[corner.ruleIndex].Loc; i == 0 || loc.Start < minLoc.Start {
minLoc = loc
}
rules[corner.ruleIndex] = css_ast.Rule{}
}
// Insert the combined declaration where the last rule was
rules[borderRadius.corners[3].ruleIndex] = css_ast.Rule{Loc: minLoc, Data: &css_ast.RDeclaration{
Key: css_ast.DBorderRadius,
KeyText: "border-radius",
Value: tokens,
KeyRange: keyRange,
Important: borderRadius.important,
}}
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_parser_selector.go | internal/css_parser/css_parser_selector.go | package css_parser
import (
"fmt"
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/logger"
)
type parseSelectorOpts struct {
composesContext *composesContext
pseudoClassKind css_ast.PseudoClassKind
isDeclarationContext bool
stopOnCloseParen bool
onlyOneComplexSelector bool
isForgivingSelectorList bool
noLeadingCombinator bool
}
func (p *parser) parseSelectorList(opts parseSelectorOpts) (list []css_ast.ComplexSelector, ok bool) {
// Potentially parse an empty list for ":is()" and ":where()"
if opts.isForgivingSelectorList && opts.stopOnCloseParen && p.peek(css_lexer.TCloseParen) {
ok = true
return
}
// Parse the first selector
sel, good := p.parseComplexSelector(parseComplexSelectorOpts{
parseSelectorOpts: opts,
isFirst: true,
})
if !good {
return
}
list = p.flattenLocalAndGlobalSelectors(list, sel)
// Parse the remaining selectors
if opts.onlyOneComplexSelector {
if t := p.current(); t.Kind == css_lexer.TComma {
p.prevError = t.Range.Loc
kind := fmt.Sprintf(":%s(...)", opts.pseudoClassKind.String())
p.log.AddIDWithNotes(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &p.tracker, t.Range,
fmt.Sprintf("Unexpected \",\" inside %q", kind),
[]logger.MsgData{{Text: fmt.Sprintf("Different CSS tools behave differently in this case, so esbuild doesn't allow it. "+
"Either remove this comma or split this selector up into multiple comma-separated %q selectors instead.", kind)}})
return
}
} else {
skip:
for {
p.eat(css_lexer.TWhitespace)
if !p.eat(css_lexer.TComma) {
break
}
p.eat(css_lexer.TWhitespace)
sel, good := p.parseComplexSelector(parseComplexSelectorOpts{
parseSelectorOpts: opts,
})
if !good {
return
}
// Omit duplicate selectors
if p.options.minifySyntax {
for _, existing := range list {
if sel.Equal(existing, nil) {
continue skip
}
}
}
list = p.flattenLocalAndGlobalSelectors(list, sel)
}
}
// Remove the leading ampersand when minifying and it can be implied:
//
// "a { & b {} }" => "a { b {} }"
//
// It can't be implied if it's not at the beginning, if there are multiple of
// them, or if the selector list is inside of a pseudo-class selector:
//
// "a { b & {} }"
// "a { & b & {} }"
// "a { :has(& b) {} }"
//
if p.options.minifySyntax && !opts.stopOnCloseParen {
for i := 1; i < len(list); i++ {
if analyzeLeadingAmpersand(list[i], opts.isDeclarationContext) != cannotRemoveLeadingAmpersand {
list[i].Selectors = list[i].Selectors[1:]
}
}
switch analyzeLeadingAmpersand(list[0], opts.isDeclarationContext) {
case canAlwaysRemoveLeadingAmpersand:
list[0].Selectors = list[0].Selectors[1:]
case canRemoveLeadingAmpersandIfNotFirst:
for i := 1; i < len(list); i++ {
if sel := list[i].Selectors[0]; len(sel.NestingSelectorLocs) == 0 && (sel.Combinator.Byte != 0 || sel.TypeSelector == nil) {
list[0].Selectors = list[0].Selectors[1:]
list[0], list[i] = list[i], list[0]
break
}
}
}
}
ok = true
return
}
func mergeCompoundSelectors(target *css_ast.CompoundSelector, source css_ast.CompoundSelector) {
// ".foo:local(&)" => "&.foo"
if len(source.NestingSelectorLocs) > 0 && len(target.NestingSelectorLocs) == 0 {
target.NestingSelectorLocs = source.NestingSelectorLocs
}
if source.TypeSelector != nil {
if target.TypeSelector == nil {
// ".foo:local(div)" => "div.foo"
target.TypeSelector = source.TypeSelector
} else {
// "div:local(span)" => "div:is(span)"
//
// Note: All other implementations of this (Lightning CSS, PostCSS, and
// Webpack) do something really weird here. They do this instead:
//
// "div:local(span)" => "divspan"
//
// But that just seems so obviously wrong that I'm not going to do that.
target.SubclassSelectors = append(target.SubclassSelectors, css_ast.SubclassSelector{
Range: source.TypeSelector.Range(),
Data: &css_ast.SSPseudoClassWithSelectorList{
Kind: css_ast.PseudoClassIs,
Selectors: []css_ast.ComplexSelector{{Selectors: []css_ast.CompoundSelector{{TypeSelector: source.TypeSelector}}}},
},
})
}
}
// ".foo:local(.bar)" => ".foo.bar"
target.SubclassSelectors = append(target.SubclassSelectors, source.SubclassSelectors...)
}
func containsLocalOrGlobalSelector(sel css_ast.ComplexSelector) bool {
for _, s := range sel.Selectors {
for _, ss := range s.SubclassSelectors {
switch pseudo := ss.Data.(type) {
case *css_ast.SSPseudoClass:
if pseudo.Name == "global" || pseudo.Name == "local" {
return true
}
case *css_ast.SSPseudoClassWithSelectorList:
if pseudo.Kind == css_ast.PseudoClassGlobal || pseudo.Kind == css_ast.PseudoClassLocal {
return true
}
}
}
}
return false
}
// This handles the ":local()" and ":global()" annotations from CSS modules
func (p *parser) flattenLocalAndGlobalSelectors(list []css_ast.ComplexSelector, sel css_ast.ComplexSelector) []css_ast.ComplexSelector {
// Only do the work to flatten the whole list if there's a ":local" or a ":global"
if p.options.symbolMode != symbolModeDisabled && containsLocalOrGlobalSelector(sel) {
var selectors []css_ast.CompoundSelector
for _, s := range sel.Selectors {
oldSubclassSelectors := s.SubclassSelectors
s.SubclassSelectors = make([]css_ast.SubclassSelector, 0, len(oldSubclassSelectors))
for _, ss := range oldSubclassSelectors {
switch pseudo := ss.Data.(type) {
case *css_ast.SSPseudoClass:
if pseudo.Name == "global" || pseudo.Name == "local" {
// Remove bare ":global" and ":local" pseudo-classes
continue
}
case *css_ast.SSPseudoClassWithSelectorList:
if pseudo.Kind == css_ast.PseudoClassGlobal || pseudo.Kind == css_ast.PseudoClassLocal {
inner := pseudo.Selectors[0].Selectors
// Replace this pseudo-class with all inner compound selectors.
// The first inner compound selector is merged with the compound
// selector before it and the last inner compound selector is
// merged with the compound selector after it:
//
// "div:local(.a .b):hover" => "div.a b:hover"
//
// This behavior is really strange since this is not how anything
// involving pseudo-classes in real CSS works at all. However, all
// other implementations (Lightning CSS, PostCSS, and Webpack) are
// consistent with this strange behavior, so we do it too.
if inner[0].Combinator.Byte == 0 {
mergeCompoundSelectors(&s, inner[0])
inner = inner[1:]
} else {
// "div:local(+ .foo):hover" => "div + .foo:hover"
}
if n := len(inner); n > 0 {
if !s.IsInvalidBecauseEmpty() {
// Don't add this selector if it consisted only of a bare ":global" or ":local"
selectors = append(selectors, s)
}
selectors = append(selectors, inner[:n-1]...)
s = inner[n-1]
}
continue
}
}
s.SubclassSelectors = append(s.SubclassSelectors, ss)
}
if !s.IsInvalidBecauseEmpty() {
// Don't add this selector if it consisted only of a bare ":global" or ":local"
selectors = append(selectors, s)
}
}
if len(selectors) == 0 {
// Treat a bare ":global" or ":local" as a bare "&" nesting selector
selectors = append(selectors, css_ast.CompoundSelector{
NestingSelectorLocs: []logger.Loc{sel.Selectors[0].Range().Loc},
WasEmptyFromLocalOrGlobal: true,
})
// Make sure we report that nesting is present so that it can be lowered
p.nestingIsPresent = true
}
sel.Selectors = selectors
}
return append(list, sel)
}
type leadingAmpersand uint8
const (
cannotRemoveLeadingAmpersand leadingAmpersand = iota
canAlwaysRemoveLeadingAmpersand
canRemoveLeadingAmpersandIfNotFirst
)
func analyzeLeadingAmpersand(sel css_ast.ComplexSelector, isDeclarationContext bool) leadingAmpersand {
if len(sel.Selectors) > 1 {
if first := sel.Selectors[0]; first.IsSingleAmpersand() {
if second := sel.Selectors[1]; second.Combinator.Byte == 0 && len(second.NestingSelectorLocs) > 0 {
// ".foo { & &.bar {} }" => ".foo { & &.bar {} }"
} else if second.Combinator.Byte != 0 || second.TypeSelector == nil || !isDeclarationContext {
// "& + div {}" => "+ div {}"
// "& div {}" => "div {}"
// ".foo { & + div {} }" => ".foo { + div {} }"
// ".foo { & + &.bar {} }" => ".foo { + &.bar {} }"
// ".foo { & :hover {} }" => ".foo { :hover {} }"
return canAlwaysRemoveLeadingAmpersand
} else {
// ".foo { & div {} }"
// ".foo { .bar, & div {} }" => ".foo { .bar, div {} }"
return canRemoveLeadingAmpersandIfNotFirst
}
}
} else {
// "& {}" => "& {}"
}
return cannotRemoveLeadingAmpersand
}
type parseComplexSelectorOpts struct {
parseSelectorOpts
isFirst bool
}
func (p *parser) parseComplexSelector(opts parseComplexSelectorOpts) (result css_ast.ComplexSelector, ok bool) {
// This is an extension: https://drafts.csswg.org/css-nesting-1/
var combinator css_ast.Combinator
if !opts.noLeadingCombinator {
combinator = p.parseCombinator()
if combinator.Byte != 0 {
p.nestingIsPresent = true
p.eat(css_lexer.TWhitespace)
}
}
// Parent
sel, good := p.parseCompoundSelector(parseComplexSelectorOpts{
parseSelectorOpts: opts.parseSelectorOpts,
isFirst: opts.isFirst,
})
if !good {
return
}
sel.Combinator = combinator
result.Selectors = append(result.Selectors, sel)
stop := css_lexer.TOpenBrace
if opts.stopOnCloseParen {
stop = css_lexer.TCloseParen
}
for {
p.eat(css_lexer.TWhitespace)
if p.peek(css_lexer.TEndOfFile) || p.peek(css_lexer.TComma) || p.peek(stop) {
break
}
// Optional combinator
combinator := p.parseCombinator()
if combinator.Byte != 0 {
p.eat(css_lexer.TWhitespace)
}
// Child
sel, good := p.parseCompoundSelector(parseComplexSelectorOpts{
parseSelectorOpts: opts.parseSelectorOpts,
})
if !good {
return
}
sel.Combinator = combinator
result.Selectors = append(result.Selectors, sel)
}
ok = true
return
}
func (p *parser) nameToken() css_ast.NameToken {
t := p.current()
return css_ast.NameToken{
Kind: t.Kind,
Range: t.Range,
Text: p.decoded(),
}
}
func (p *parser) parseCompoundSelector(opts parseComplexSelectorOpts) (sel css_ast.CompoundSelector, ok bool) {
startLoc := p.current().Range.Loc
// This is an extension: https://drafts.csswg.org/css-nesting-1/
hasLeadingNestingSelector := p.peek(css_lexer.TDelimAmpersand)
if hasLeadingNestingSelector {
p.nestingIsPresent = true
sel.NestingSelectorLocs = append(sel.NestingSelectorLocs, startLoc)
p.advance()
}
// Parse the type selector
typeSelectorLoc := p.current().Range.Loc
switch p.current().Kind {
case css_lexer.TDelimBar, css_lexer.TIdent, css_lexer.TDelimAsterisk:
nsName := css_ast.NamespacedName{}
if !p.peek(css_lexer.TDelimBar) {
nsName.Name = p.nameToken()
p.advance()
} else {
// Hack: Create an empty "identifier" to represent this
nsName.Name.Kind = css_lexer.TIdent
}
if p.eat(css_lexer.TDelimBar) {
if !p.peek(css_lexer.TIdent) && !p.peek(css_lexer.TDelimAsterisk) {
p.expect(css_lexer.TIdent)
return
}
prefix := nsName.Name
nsName.NamespacePrefix = &prefix
nsName.Name = p.nameToken()
p.advance()
}
sel.TypeSelector = &nsName
}
// Parse the subclass selectors
subclassSelectors:
for {
subclassToken := p.current()
switch subclassToken.Kind {
case css_lexer.THash:
if (subclassToken.Flags & css_lexer.IsID) == 0 {
break subclassSelectors
}
nameLoc := logger.Loc{Start: subclassToken.Range.Loc.Start + 1}
name := p.decoded()
sel.SubclassSelectors = append(sel.SubclassSelectors, css_ast.SubclassSelector{
Range: subclassToken.Range,
Data: &css_ast.SSHash{
Name: p.symbolForName(nameLoc, name),
},
})
p.advance()
case css_lexer.TDelimDot:
p.advance()
nameRange := p.current().Range
name := p.decoded()
sel.SubclassSelectors = append(sel.SubclassSelectors, css_ast.SubclassSelector{
Range: logger.Range{Loc: subclassToken.Range.Loc, Len: nameRange.End() - subclassToken.Range.Loc.Start},
Data: &css_ast.SSClass{
Name: p.symbolForName(nameRange.Loc, name),
},
})
if !p.expect(css_lexer.TIdent) {
return
}
case css_lexer.TOpenBracket:
attr, r := p.parseAttributeSelector()
if r.Len == 0 {
return
}
sel.SubclassSelectors = append(sel.SubclassSelectors, css_ast.SubclassSelector{
Range: r,
Data: &attr,
})
case css_lexer.TColon:
if p.next().Kind == css_lexer.TColon {
// Special-case the start of the pseudo-element selector section
for p.peek(css_lexer.TColon) {
firstColonLoc := p.current().Range.Loc
isElement := p.next().Kind == css_lexer.TColon
if isElement {
p.advance()
}
pseudo, r := p.parsePseudoClassSelector(firstColonLoc, isElement)
// https://www.w3.org/TR/selectors-4/#single-colon-pseudos
// The four Level 2 pseudo-elements (::before, ::after, ::first-line,
// and ::first-letter) may, for legacy reasons, be represented using
// the <pseudo-class-selector> grammar, with only a single ":"
// character at their start.
if p.options.minifySyntax && isElement {
if pseudo, ok := pseudo.(*css_ast.SSPseudoClass); ok && len(pseudo.Args) == 0 {
switch pseudo.Name {
case "before", "after", "first-line", "first-letter":
pseudo.IsElement = false
}
}
}
sel.SubclassSelectors = append(sel.SubclassSelectors, css_ast.SubclassSelector{
Range: r,
Data: pseudo,
})
}
break subclassSelectors
}
pseudo, r := p.parsePseudoClassSelector(subclassToken.Range.Loc, false)
sel.SubclassSelectors = append(sel.SubclassSelectors, css_ast.SubclassSelector{
Range: r,
Data: pseudo,
})
case css_lexer.TDelimAmpersand:
// This is an extension: https://drafts.csswg.org/css-nesting-1/
p.nestingIsPresent = true
sel.NestingSelectorLocs = append(sel.NestingSelectorLocs, subclassToken.Range.Loc)
p.advance()
default:
break subclassSelectors
}
}
// The compound selector must be non-empty
if sel.IsInvalidBecauseEmpty() {
p.unexpected()
return
}
// Note: "&div {}" was originally valid, but is now an invalid selector:
// https://github.com/w3c/csswg-drafts/issues/8662#issuecomment-1514977935.
// This is because SASS already uses that syntax to mean something very
// different, so that syntax has been removed to avoid mistakes.
if hasLeadingNestingSelector && sel.TypeSelector != nil {
r := logger.Range{Loc: typeSelectorLoc, Len: p.at(p.index-1).Range.End() - typeSelectorLoc.Start}
text := sel.TypeSelector.Name.Text
if sel.TypeSelector.NamespacePrefix != nil {
text = fmt.Sprintf("%s|%s", sel.TypeSelector.NamespacePrefix.Text, text)
}
var howToFix string
suggestion := p.source.TextForRange(r)
if opts.isFirst {
suggestion = fmt.Sprintf(":is(%s)", suggestion)
howToFix = "You can wrap this selector in \":is(...)\" as a workaround. "
} else {
r = logger.Range{Loc: startLoc, Len: r.End() - startLoc.Start}
suggestion += "&"
howToFix = "You can move the \"&\" to the end of this selector as a workaround. "
}
msg := logger.Msg{
Kind: logger.Warning,
Data: p.tracker.MsgData(r, fmt.Sprintf("Cannot use type selector %q directly after nesting selector \"&\"", text)),
Notes: []logger.MsgData{{Text: "CSS nesting syntax does not allow the \"&\" selector to come before a type selector. " +
howToFix +
"This restriction exists to avoid problems with SASS nesting, where the same syntax means something very different " +
"that has no equivalent in real CSS (appending a suffix to the parent selector)."}},
}
msg.Data.Location.Suggestion = suggestion
p.log.AddMsgID(logger.MsgID_CSS_CSSSyntaxError, msg)
return
}
// The type selector must always come first
switch p.current().Kind {
case css_lexer.TDelimBar, css_lexer.TIdent, css_lexer.TDelimAsterisk:
p.unexpected()
return
}
ok = true
return
}
func (p *parser) parseAttributeSelector() (attr css_ast.SSAttribute, r logger.Range) {
matchingLoc := p.current().Range.Loc
p.advance()
// Parse the namespaced name
switch p.current().Kind {
case css_lexer.TDelimBar, css_lexer.TDelimAsterisk:
// "[|x]"
// "[*|x]"
if p.peek(css_lexer.TDelimAsterisk) {
prefix := p.nameToken()
p.advance()
attr.NamespacedName.NamespacePrefix = &prefix
} else {
// "[|attr]" is equivalent to "[attr]". From the specification:
// "In keeping with the Namespaces in the XML recommendation, default
// namespaces do not apply to attributes, therefore attribute selectors
// without a namespace component apply only to attributes that have no
// namespace (equivalent to |attr)."
}
if !p.expect(css_lexer.TDelimBar) {
return
}
attr.NamespacedName.Name = p.nameToken()
if !p.expect(css_lexer.TIdent) {
return
}
default:
// "[x]"
// "[x|y]"
attr.NamespacedName.Name = p.nameToken()
if !p.expect(css_lexer.TIdent) {
return
}
if p.next().Kind != css_lexer.TDelimEquals && p.eat(css_lexer.TDelimBar) {
prefix := attr.NamespacedName.Name
attr.NamespacedName.NamespacePrefix = &prefix
attr.NamespacedName.Name = p.nameToken()
if !p.expect(css_lexer.TIdent) {
return
}
}
}
// Parse the optional matcher operator
p.eat(css_lexer.TWhitespace)
if p.eat(css_lexer.TDelimEquals) {
attr.MatcherOp = "="
} else {
switch p.current().Kind {
case css_lexer.TDelimTilde:
attr.MatcherOp = "~="
case css_lexer.TDelimBar:
attr.MatcherOp = "|="
case css_lexer.TDelimCaret:
attr.MatcherOp = "^="
case css_lexer.TDelimDollar:
attr.MatcherOp = "$="
case css_lexer.TDelimAsterisk:
attr.MatcherOp = "*="
}
if attr.MatcherOp != "" {
p.advance()
if !p.expect(css_lexer.TDelimEquals) {
return
}
}
}
// Parse the optional matcher value
if attr.MatcherOp != "" {
p.eat(css_lexer.TWhitespace)
if !p.peek(css_lexer.TString) && !p.peek(css_lexer.TIdent) {
p.unexpected()
}
attr.MatcherValue = p.decoded()
p.advance()
p.eat(css_lexer.TWhitespace)
if p.peek(css_lexer.TIdent) {
if modifier := p.decoded(); len(modifier) == 1 {
if c := modifier[0]; c == 'i' || c == 'I' || c == 's' || c == 'S' {
attr.MatcherModifier = c
p.advance()
}
}
}
}
closeRange := p.current().Range
if !p.expectWithMatchingLoc(css_lexer.TCloseBracket, matchingLoc) {
closeRange.Len = 0
}
r = logger.Range{Loc: matchingLoc, Len: closeRange.End() - matchingLoc.Start}
return
}
func (p *parser) parsePseudoClassSelector(loc logger.Loc, isElement bool) (css_ast.SS, logger.Range) {
p.advance()
if p.peek(css_lexer.TFunction) {
text := p.decoded()
matchingLoc := logger.Loc{Start: p.current().Range.End() - 1}
p.advance()
// Potentially parse a pseudo-class with a selector list
if !isElement {
var kind css_ast.PseudoClassKind
local := p.makeLocalSymbols
ok := true
switch text {
case "global":
kind = css_ast.PseudoClassGlobal
if p.options.symbolMode != symbolModeDisabled {
local = false
}
case "has":
kind = css_ast.PseudoClassHas
case "is":
kind = css_ast.PseudoClassIs
case "local":
kind = css_ast.PseudoClassLocal
if p.options.symbolMode != symbolModeDisabled {
local = true
}
case "not":
kind = css_ast.PseudoClassNot
case "nth-child":
kind = css_ast.PseudoClassNthChild
case "nth-last-child":
kind = css_ast.PseudoClassNthLastChild
case "nth-of-type":
kind = css_ast.PseudoClassNthOfType
case "nth-last-of-type":
kind = css_ast.PseudoClassNthLastOfType
case "where":
kind = css_ast.PseudoClassWhere
default:
ok = false
}
if ok {
old := p.index
if kind.HasNthIndex() {
p.eat(css_lexer.TWhitespace)
// Parse the "An+B" syntax
if index, ok := p.parseNthIndex(); ok {
var selectors []css_ast.ComplexSelector
// Parse the optional "of" clause
if (kind == css_ast.PseudoClassNthChild || kind == css_ast.PseudoClassNthLastChild) &&
p.peek(css_lexer.TIdent) && strings.EqualFold(p.decoded(), "of") {
p.advance()
p.eat(css_lexer.TWhitespace)
// Contain the effects of ":local" and ":global"
oldLocal := p.makeLocalSymbols
selectors, ok = p.parseSelectorList(parseSelectorOpts{
stopOnCloseParen: true,
noLeadingCombinator: true,
})
p.makeLocalSymbols = oldLocal
}
// "2n+0" => "2n"
if p.options.minifySyntax {
index.Minify()
}
// Match the closing ")"
if ok {
closeRange := p.current().Range
if !p.expectWithMatchingLoc(css_lexer.TCloseParen, matchingLoc) {
closeRange.Len = 0
}
return &css_ast.SSPseudoClassWithSelectorList{Kind: kind, Selectors: selectors, Index: index},
logger.Range{Loc: loc, Len: closeRange.End() - loc.Start}
}
}
} else {
p.eat(css_lexer.TWhitespace)
// ":local" forces local names and ":global" forces global names
oldLocal := p.makeLocalSymbols
p.makeLocalSymbols = local
selectors, ok := p.parseSelectorList(parseSelectorOpts{
pseudoClassKind: kind,
stopOnCloseParen: true,
onlyOneComplexSelector: kind == css_ast.PseudoClassGlobal || kind == css_ast.PseudoClassLocal,
isForgivingSelectorList: kind == css_ast.PseudoClassIs || kind == css_ast.PseudoClassWhere,
})
p.makeLocalSymbols = oldLocal
// Match the closing ")"
if ok {
closeRange := p.current().Range
if !p.expectWithMatchingLoc(css_lexer.TCloseParen, matchingLoc) {
closeRange.Len = 0
}
return &css_ast.SSPseudoClassWithSelectorList{Kind: kind, Selectors: selectors},
logger.Range{Loc: loc, Len: closeRange.End() - loc.Start}
}
}
p.index = old
}
}
args := p.convertTokens(p.parseAnyValue())
closeRange := p.current().Range
if !p.expectWithMatchingLoc(css_lexer.TCloseParen, matchingLoc) {
closeRange.Len = 0
}
return &css_ast.SSPseudoClass{IsElement: isElement, Name: text, Args: args},
logger.Range{Loc: loc, Len: closeRange.End() - loc.Start}
}
nameRange := p.current().Range
name := p.decoded()
sel := css_ast.SSPseudoClass{IsElement: isElement}
if p.expect(css_lexer.TIdent) {
sel.Name = name
// ":local .local_name :global .global_name {}"
// ":local { .local_name { :global { .global_name {} } }"
if p.options.symbolMode != symbolModeDisabled {
switch name {
case "local":
p.makeLocalSymbols = true
case "global":
p.makeLocalSymbols = false
}
}
} else {
nameRange.Len = 0
}
return &sel, logger.Range{Loc: loc, Len: nameRange.End() - loc.Start}
}
func (p *parser) parseAnyValue() []css_lexer.Token {
// Reference: https://drafts.csswg.org/css-syntax-3/#typedef-declaration-value
p.stack = p.stack[:0] // Reuse allocated memory
start := p.index
loop:
for {
switch p.current().Kind {
case css_lexer.TCloseParen, css_lexer.TCloseBracket, css_lexer.TCloseBrace:
last := len(p.stack) - 1
if last < 0 || !p.peek(p.stack[last]) {
break loop
}
p.stack = p.stack[:last]
case css_lexer.TSemicolon, css_lexer.TDelimExclamation:
if len(p.stack) == 0 {
break loop
}
case css_lexer.TOpenParen, css_lexer.TFunction:
p.stack = append(p.stack, css_lexer.TCloseParen)
case css_lexer.TOpenBracket:
p.stack = append(p.stack, css_lexer.TCloseBracket)
case css_lexer.TOpenBrace:
p.stack = append(p.stack, css_lexer.TCloseBrace)
case css_lexer.TEndOfFile:
break loop
}
p.advance()
}
tokens := p.tokens[start:p.index]
if len(tokens) == 0 {
p.unexpected()
}
return tokens
}
func (p *parser) parseCombinator() css_ast.Combinator {
t := p.current()
switch t.Kind {
case css_lexer.TDelimGreaterThan:
p.advance()
return css_ast.Combinator{Loc: t.Range.Loc, Byte: '>'}
case css_lexer.TDelimPlus:
p.advance()
return css_ast.Combinator{Loc: t.Range.Loc, Byte: '+'}
case css_lexer.TDelimTilde:
p.advance()
return css_ast.Combinator{Loc: t.Range.Loc, Byte: '~'}
default:
return css_ast.Combinator{}
}
}
func parseInteger(text string) (string, bool) {
n := len(text)
if n == 0 {
return "", false
}
// Trim leading zeros
start := 0
for start < n && text[start] == '0' {
start++
}
// Make sure remaining characters are digits
if start == n {
return "0", true
}
for i := start; i < n; i++ {
if c := text[i]; c < '0' || c > '9' {
return "", false
}
}
return text[start:], true
}
func (p *parser) parseNthIndex() (css_ast.NthIndex, bool) {
type sign uint8
const (
none sign = iota
negative
positive
)
// Reference: https://drafts.csswg.org/css-syntax-3/#anb-microsyntax
t0 := p.current()
text0 := p.decoded()
// Handle "even" and "odd"
if t0.Kind == css_lexer.TIdent && (text0 == "even" || text0 == "odd") {
p.advance()
p.eat(css_lexer.TWhitespace)
return css_ast.NthIndex{B: text0}, true
}
// Handle a single number
if t0.Kind == css_lexer.TNumber {
bNeg := false
if strings.HasPrefix(text0, "-") {
bNeg = true
text0 = text0[1:]
} else {
text0 = strings.TrimPrefix(text0, "+")
}
if b, ok := parseInteger(text0); ok {
if bNeg {
b = "-" + b
}
p.advance()
p.eat(css_lexer.TWhitespace)
return css_ast.NthIndex{B: b}, true
}
p.unexpected()
return css_ast.NthIndex{}, false
}
aSign := none
if p.eat(css_lexer.TDelimPlus) {
aSign = positive
t0 = p.current()
text0 = p.decoded()
}
// Everything from here must be able to contain an "n"
if t0.Kind != css_lexer.TIdent && t0.Kind != css_lexer.TDimension {
p.unexpected()
return css_ast.NthIndex{}, false
}
// Check for a leading sign
if aSign == none {
if strings.HasPrefix(text0, "-") {
aSign = negative
text0 = text0[1:]
} else {
text0 = strings.TrimPrefix(text0, "+")
}
}
// The string must contain an "n"
n := strings.IndexByte(text0, 'n')
if n < 0 {
p.unexpected()
return css_ast.NthIndex{}, false
}
// Parse the number before the "n"
var a string
if n == 0 {
if aSign == negative {
a = "-1"
} else {
a = "1"
}
} else if aInt, ok := parseInteger(text0[:n]); ok {
if aSign == negative {
aInt = "-" + aInt
}
a = aInt
} else {
p.unexpected()
return css_ast.NthIndex{}, false
}
text0 = text0[n+1:]
// Parse the stuff after the "n"
bSign := none
if strings.HasPrefix(text0, "-") {
text0 = text0[1:]
if b, ok := parseInteger(text0); ok {
p.advance()
p.eat(css_lexer.TWhitespace)
return css_ast.NthIndex{A: a, B: "-" + b}, true
}
bSign = negative
}
if text0 != "" {
p.unexpected()
return css_ast.NthIndex{}, false
}
p.advance()
p.eat(css_lexer.TWhitespace)
// Parse an optional sign delimiter
if bSign == none {
if p.eat(css_lexer.TDelimMinus) {
bSign = negative
p.eat(css_lexer.TWhitespace)
} else if p.eat(css_lexer.TDelimPlus) {
bSign = positive
p.eat(css_lexer.TWhitespace)
}
}
// Parse an optional trailing number
t1 := p.current()
text1 := p.decoded()
if t1.Kind == css_lexer.TNumber {
if bSign == none {
if strings.HasPrefix(text1, "-") {
bSign = negative
text1 = text1[1:]
} else if strings.HasPrefix(text1, "+") {
text1 = text1[1:]
}
}
if b, ok := parseInteger(text1); ok {
if bSign == negative {
b = "-" + b
}
p.advance()
p.eat(css_lexer.TWhitespace)
return css_ast.NthIndex{A: a, B: b}, true
}
}
// If there is a trailing sign, then there must also be a trailing number
if bSign != none {
p.expect(css_lexer.TNumber)
return css_ast.NthIndex{}, false
}
return css_ast.NthIndex{A: a}, true
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_nesting.go | internal/css_parser/css_nesting.go | package css_parser
import (
"fmt"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/logger"
)
func (p *parser) lowerNestingInRule(rule css_ast.Rule, results []css_ast.Rule) []css_ast.Rule {
switch r := rule.Data.(type) {
case *css_ast.RSelector:
scope := func(loc logger.Loc) css_ast.ComplexSelector {
return css_ast.ComplexSelector{
Selectors: []css_ast.CompoundSelector{{
SubclassSelectors: []css_ast.SubclassSelector{{
Range: logger.Range{Loc: loc},
Data: &css_ast.SSPseudoClass{Name: "scope"},
}},
}},
}
}
parentSelectorsWithPseudo := make([]css_ast.ComplexSelector, 0, len(r.Selectors))
parentSelectorsNoPseudo := make([]css_ast.ComplexSelector, 0, len(r.Selectors))
for i, sel := range r.Selectors {
// Top-level "&" should be replaced with ":scope" to avoid recursion.
// From https://www.w3.org/TR/css-nesting-1/#nest-selector:
//
// "When used in the selector of a nested style rule, the nesting
// selector represents the elements matched by the parent rule. When
// used in any other context, it represents the same elements as
// :scope in that context (unless otherwise defined)."
//
substituted := make([]css_ast.CompoundSelector, 0, len(sel.Selectors))
for _, x := range sel.Selectors {
substituted = p.substituteAmpersandsInCompoundSelector(x, scope, substituted, keepLeadingCombinator)
}
r.Selectors[i] = css_ast.ComplexSelector{Selectors: substituted}
// Filter out pseudo elements because they are ignored by nested style
// rules. This is because pseudo-elements are not valid within :is():
// https://www.w3.org/TR/selectors-4/#matches-pseudo. This restriction
// may be relaxed in the future, but this restriction has shipped so
// we're stuck with it: https://github.com/w3c/csswg-drafts/issues/7433.
//
// Note: This is only for the parent selector list that is used to
// substitute "&" within child rules. Do not filter out the pseudo
// element from the top-level selector list.
if !sel.UsesPseudoElement() {
parentSelectorsNoPseudo = append(parentSelectorsNoPseudo, css_ast.ComplexSelector{Selectors: substituted})
}
// This filtering is only done conditionally because it seems to only
// apply sometimes. Specifically it doesn't seem to apply when the
// nested rule is an at-rule. So we use the unfiltered list in that
// case. See: https://github.com/evanw/esbuild/issues/4265
parentSelectorsWithPseudo = append(parentSelectorsWithPseudo, css_ast.ComplexSelector{Selectors: substituted})
}
// Emit this selector before its nested children
start := len(results)
results = append(results, rule)
// Lower all children and filter out ones that become empty
context := lowerNestingContext{
parentSelectorsWithPseudo: parentSelectorsWithPseudo,
parentSelectorsNoPseudo: parentSelectorsNoPseudo,
loweredRules: results,
}
r.Rules = p.lowerNestingInRulesAndReturnRemaining(r.Rules, &context)
// Omit this selector entirely if it's now empty
if len(r.Rules) == 0 {
copy(context.loweredRules[start:], context.loweredRules[start+1:])
context.loweredRules = context.loweredRules[:len(context.loweredRules)-1]
}
return context.loweredRules
case *css_ast.RKnownAt:
var rules []css_ast.Rule
for _, child := range r.Rules {
rules = p.lowerNestingInRule(child, rules)
}
r.Rules = rules
case *css_ast.RAtLayer:
var rules []css_ast.Rule
for _, child := range r.Rules {
rules = p.lowerNestingInRule(child, rules)
}
r.Rules = rules
case *css_ast.RAtMedia:
var rules []css_ast.Rule
for _, child := range r.Rules {
rules = p.lowerNestingInRule(child, rules)
}
r.Rules = rules
}
return append(results, rule)
}
// Lower all children and filter out ones that become empty
func (p *parser) lowerNestingInRulesAndReturnRemaining(rules []css_ast.Rule, context *lowerNestingContext) []css_ast.Rule {
n := 0
for _, child := range rules {
child = p.lowerNestingInRuleWithContext(child, context)
if child.Data != nil {
rules[n] = child
n++
}
}
return rules[:n]
}
func compoundSelectorTermCount(sel css_ast.CompoundSelector) int {
count := 0
for _, ss := range sel.SubclassSelectors {
count++
if list, ok := ss.Data.(*css_ast.SSPseudoClassWithSelectorList); ok {
count += complexSelectorTermCount(list.Selectors)
}
}
return count
}
func complexSelectorTermCount(selectors []css_ast.ComplexSelector) int {
count := 0
for _, sel := range selectors {
for _, inner := range sel.Selectors {
count += compoundSelectorTermCount(inner)
}
}
return count
}
func (p *parser) addExpansionError(loc logger.Loc, n int) {
p.log.AddErrorWithNotes(&p.tracker, logger.Range{Loc: loc}, "CSS nesting is causing too much expansion",
[]logger.MsgData{{Text: fmt.Sprintf("CSS nesting expansion was terminated because a rule was generated with %d selectors. "+
"This limit exists to prevent esbuild from using too much time and/or memory. "+
"Please change your CSS to use fewer levels of nesting.", n)}})
}
type lowerNestingContext struct {
parentSelectorsWithPseudo []css_ast.ComplexSelector
parentSelectorsNoPseudo []css_ast.ComplexSelector
loweredRules []css_ast.Rule
}
func (p *parser) lowerNestingInRuleWithContext(rule css_ast.Rule, context *lowerNestingContext) css_ast.Rule {
switch r := rule.Data.(type) {
case *css_ast.RSelector:
oldSelectorsLen := len(r.Selectors)
oldSelectorsComplexity := complexSelectorTermCount(r.Selectors)
// "a { & b {} }" => "a b {}"
// "a { &b {} }" => "a:is(b) {}"
// "a { &:hover {} }" => "a:hover {}"
// ".x { &b {} }" => "b.x {}"
// "a, b { .c, d {} }" => ":is(a, b) :is(.c, d) {}"
// "a, b { &.c, & d, e & {} }" => ":is(a, b).c, :is(a, b) d, e :is(a, b) {}"
// Pass 1: Canonicalize and analyze our selectors
for i := range r.Selectors {
sel := &r.Selectors[i]
// Inject the implicit "&" now for simplicity later on
if sel.IsRelative() {
sel.Selectors = append([]css_ast.CompoundSelector{{NestingSelectorLocs: []logger.Loc{rule.Loc}}}, sel.Selectors...)
}
}
// Pass 2: Substitute "&" for the parent selector
if !p.options.unsupportedCSSFeatures.Has(compat.IsPseudoClass) || len(context.parentSelectorsNoPseudo) <= 1 {
// If we can use ":is", or we don't have to because there's only one
// parent selector, or we are using ":is()" to match zero parent selectors
// (even if ":is" is unsupported), then substituting "&" for the parent
// selector is easy.
for i := range r.Selectors {
complex := &r.Selectors[i]
results := make([]css_ast.CompoundSelector, 0, len(complex.Selectors))
parent := p.multipleComplexSelectorsToSingleComplexSelector(context.parentSelectorsNoPseudo)
for _, compound := range complex.Selectors {
results = p.substituteAmpersandsInCompoundSelector(compound, parent, results, keepLeadingCombinator)
}
complex.Selectors = results
}
} else {
// Otherwise if we can't use ":is", the transform is more complicated.
// Avoiding ":is" can lead to a combinatorial explosion of cases so we
// want to avoid this if possible. For example:
//
// .first, .second, .third {
// & > & {
// color: red;
// }
// }
//
// If we can use ":is" (the easy case above) then we can do this:
//
// :is(.first, .second, .third) > :is(.first, .second, .third) {
// color: red;
// }
//
// But if we can't use ":is" then we have to do this instead:
//
// .first > .first,
// .first > .second,
// .first > .third,
// .second > .first,
// .second > .second,
// .second > .third,
// .third > .first,
// .third > .second,
// .third > .third {
// color: red;
// }
//
// That combinatorial explosion is what the loop below implements. Note
// that PostCSS's implementation of nesting gets this wrong. It generates
// this instead:
//
// .first > .first,
// .second > .second,
// .third > .third {
// color: red;
// }
//
// That's not equivalent, so that's an incorrect transformation.
var selectors []css_ast.ComplexSelector
var indices []int
for {
// Every time we encounter another "&", add another dimension
offset := 0
parent := func(loc logger.Loc) css_ast.ComplexSelector {
if offset == len(indices) {
indices = append(indices, 0)
}
index := indices[offset]
offset++
return context.parentSelectorsNoPseudo[index]
}
// Do the substitution for this particular combination
for i := range r.Selectors {
complex := r.Selectors[i]
results := make([]css_ast.CompoundSelector, 0, len(complex.Selectors))
for _, compound := range complex.Selectors {
results = p.substituteAmpersandsInCompoundSelector(compound, parent, results, keepLeadingCombinator)
}
complex.Selectors = results
selectors = append(selectors, complex)
offset = 0
}
// Do addition with carry on the indices across dimensions
carry := len(indices)
for carry > 0 {
index := &indices[carry-1]
if *index+1 < len(context.parentSelectorsNoPseudo) {
*index++
break
}
*index = 0
carry--
}
if carry == 0 {
break
}
}
r.Selectors = selectors
}
// Put limits on the combinatorial explosion to avoid using too much time and/or memory
if n := len(r.Selectors); n > oldSelectorsLen && n > 0xFF00 {
p.addExpansionError(rule.Loc, n)
return css_ast.Rule{}
}
if n := complexSelectorTermCount(r.Selectors); n > oldSelectorsComplexity && n > 0xFF00 {
p.addExpansionError(rule.Loc, n)
return css_ast.Rule{}
}
// Lower all child rules using our newly substituted selector
context.loweredRules = p.lowerNestingInRule(rule, context.loweredRules)
return css_ast.Rule{}
case *css_ast.RKnownAt:
childContext := lowerNestingContext{
parentSelectorsWithPseudo: context.parentSelectorsWithPseudo,
parentSelectorsNoPseudo: context.parentSelectorsNoPseudo,
}
r.Rules = p.lowerNestingInRulesAndReturnRemaining(r.Rules, &childContext)
// "div { @supports (color: red) { color: red } }" "@supports (color: red) { div { color: red } }"
if len(r.Rules) > 0 {
childContext.loweredRules = append([]css_ast.Rule{{Loc: rule.Loc, Data: &css_ast.RSelector{
Selectors: context.parentSelectorsWithPseudo,
Rules: r.Rules,
}}}, childContext.loweredRules...)
}
// "div { @supports (color: red) { &:hover { color: red } } }" "@supports (color: red) { div:hover { color: red } }"
if len(childContext.loweredRules) > 0 {
r.Rules = childContext.loweredRules
context.loweredRules = append(context.loweredRules, rule)
}
return css_ast.Rule{}
case *css_ast.RAtMedia:
childContext := lowerNestingContext{
parentSelectorsWithPseudo: context.parentSelectorsWithPseudo,
parentSelectorsNoPseudo: context.parentSelectorsNoPseudo,
}
r.Rules = p.lowerNestingInRulesAndReturnRemaining(r.Rules, &childContext)
// "div { @media screen { color: red } }" "@media screen { div { color: red } }"
if len(r.Rules) > 0 {
childContext.loweredRules = append([]css_ast.Rule{{Loc: rule.Loc, Data: &css_ast.RSelector{
Selectors: context.parentSelectorsWithPseudo,
Rules: r.Rules,
}}}, childContext.loweredRules...)
}
// "div { @media screen { &:hover { color: red } } }" "@media screen { div:hover { color: red } }"
if len(childContext.loweredRules) > 0 {
r.Rules = childContext.loweredRules
context.loweredRules = append(context.loweredRules, rule)
}
return css_ast.Rule{}
case *css_ast.RAtLayer:
// Lower all children and filter out ones that become empty
childContext := lowerNestingContext{
parentSelectorsWithPseudo: context.parentSelectorsWithPseudo,
parentSelectorsNoPseudo: context.parentSelectorsNoPseudo,
}
r.Rules = p.lowerNestingInRulesAndReturnRemaining(r.Rules, &childContext)
// "div { @layer foo { color: red } }" "@layer foo { div { color: red } }"
if len(r.Rules) > 0 {
childContext.loweredRules = append([]css_ast.Rule{{Loc: rule.Loc, Data: &css_ast.RSelector{
Selectors: context.parentSelectorsWithPseudo,
Rules: r.Rules,
}}}, childContext.loweredRules...)
}
// "div { @layer foo { &:hover { color: red } } }" "@layer foo { div:hover { color: red } }"
// "div { @layer foo {} }" => "@layer foo {}" (layers have side effects, so don't remove empty ones)
r.Rules = childContext.loweredRules
context.loweredRules = append(context.loweredRules, rule)
return css_ast.Rule{}
}
return rule
}
type leadingCombinatorStrip uint8
const (
keepLeadingCombinator leadingCombinatorStrip = iota
stripLeadingCombinator
)
func (p *parser) substituteAmpersandsInCompoundSelector(
sel css_ast.CompoundSelector,
replacementFn func(logger.Loc) css_ast.ComplexSelector,
results []css_ast.CompoundSelector,
strip leadingCombinatorStrip,
) []css_ast.CompoundSelector {
for _, nestingSelectorLoc := range sel.NestingSelectorLocs {
replacement := replacementFn(nestingSelectorLoc)
// Convert the replacement to a single compound selector
var single css_ast.CompoundSelector
if sel.Combinator.Byte == 0 && (len(replacement.Selectors) == 1 || len(results) == 0) {
// ".foo { :hover & {} }" => ":hover .foo {}"
// ".foo .bar { &:hover {} }" => ".foo .bar:hover {}"
last := len(replacement.Selectors) - 1
results = append(results, replacement.Selectors[:last]...)
single = replacement.Selectors[last]
if strip == stripLeadingCombinator {
single.Combinator = css_ast.Combinator{}
}
sel.Combinator = single.Combinator
} else if len(replacement.Selectors) == 1 {
// ".foo { > &:hover {} }" => ".foo > .foo:hover {}"
single = replacement.Selectors[0]
if strip == stripLeadingCombinator {
single.Combinator = css_ast.Combinator{}
}
} else {
// ".foo .bar { :hover & {} }" => ":hover :is(.foo .bar) {}"
// ".foo .bar { > &:hover {} }" => ".foo .bar > :is(.foo .bar):hover {}"
p.reportNestingWithGeneratedPseudoClassIs(nestingSelectorLoc)
single = css_ast.CompoundSelector{
SubclassSelectors: []css_ast.SubclassSelector{{
Range: logger.Range{Loc: nestingSelectorLoc},
Data: &css_ast.SSPseudoClassWithSelectorList{
Kind: css_ast.PseudoClassIs,
Selectors: []css_ast.ComplexSelector{replacement.Clone()},
},
}},
}
}
var subclassSelectorPrefix []css_ast.SubclassSelector
// Insert the type selector
if single.TypeSelector != nil {
if sel.TypeSelector != nil {
p.reportNestingWithGeneratedPseudoClassIs(nestingSelectorLoc)
subclassSelectorPrefix = append(subclassSelectorPrefix, css_ast.SubclassSelector{
Range: sel.TypeSelector.Range(),
Data: &css_ast.SSPseudoClassWithSelectorList{
Kind: css_ast.PseudoClassIs,
Selectors: []css_ast.ComplexSelector{{Selectors: []css_ast.CompoundSelector{{TypeSelector: sel.TypeSelector}}}},
},
})
}
sel.TypeSelector = single.TypeSelector
}
// Insert the subclass selectors
subclassSelectorPrefix = append(subclassSelectorPrefix, single.SubclassSelectors...)
// Write the changes back
if len(subclassSelectorPrefix) > 0 {
sel.SubclassSelectors = append(subclassSelectorPrefix, sel.SubclassSelectors...)
}
}
sel.NestingSelectorLocs = nil
// "div { :is(&.foo) {} }" => ":is(div.foo) {}"
for _, ss := range sel.SubclassSelectors {
if class, ok := ss.Data.(*css_ast.SSPseudoClassWithSelectorList); ok {
outer := make([]css_ast.ComplexSelector, 0, len(class.Selectors))
for _, complex := range class.Selectors {
inner := make([]css_ast.CompoundSelector, 0, len(complex.Selectors))
for _, sel := range complex.Selectors {
inner = p.substituteAmpersandsInCompoundSelector(sel, replacementFn, inner, stripLeadingCombinator)
}
outer = append(outer, css_ast.ComplexSelector{Selectors: inner})
}
class.Selectors = outer
}
}
return append(results, sel)
}
// Turn the list of selectors into a single selector by wrapping lists
// without a single element with ":is(...)". Note that this may result
// in an empty ":is()" selector (which matches nothing).
func (p *parser) multipleComplexSelectorsToSingleComplexSelector(selectors []css_ast.ComplexSelector) func(logger.Loc) css_ast.ComplexSelector {
if len(selectors) == 1 {
return func(logger.Loc) css_ast.ComplexSelector {
return selectors[0]
}
}
var leadingCombinator css_ast.Combinator
clones := make([]css_ast.ComplexSelector, len(selectors))
for i, sel := range selectors {
// "> a, > b" => "> :is(a, b)" (the caller should have already checked that all leading combinators are the same)
leadingCombinator = sel.Selectors[0].Combinator
clones[i] = sel.Clone()
}
return func(loc logger.Loc) css_ast.ComplexSelector {
return css_ast.ComplexSelector{
Selectors: []css_ast.CompoundSelector{{
Combinator: leadingCombinator,
SubclassSelectors: []css_ast.SubclassSelector{{
Range: logger.Range{Loc: loc},
Data: &css_ast.SSPseudoClassWithSelectorList{
Kind: css_ast.PseudoClassIs,
Selectors: clones,
},
}},
}},
}
}
}
func (p *parser) reportNestingWithGeneratedPseudoClassIs(nestingSelectorLoc logger.Loc) {
if p.options.unsupportedCSSFeatures.Has(compat.IsPseudoClass) {
_, didWarn := p.nestingWarnings[nestingSelectorLoc]
if didWarn {
// Only warn at each location once
return
}
if p.nestingWarnings == nil {
p.nestingWarnings = make(map[logger.Loc]struct{})
}
p.nestingWarnings[nestingSelectorLoc] = struct{}{}
text := "Transforming this CSS nesting syntax is not supported in the configured target environment"
if p.options.originalTargetEnv != "" {
text = fmt.Sprintf("%s (%s)", text, p.options.originalTargetEnv)
}
r := logger.Range{Loc: nestingSelectorLoc, Len: 1}
p.log.AddIDWithNotes(logger.MsgID_CSS_UnsupportedCSSNesting, logger.Warning, &p.tracker, r, text, []logger.MsgData{{
Text: "The nesting transform for this case must generate an \":is(...)\" but the configured target environment does not support the \":is\" pseudo-class."}})
}
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_parser_test.go | internal/css_parser/css_parser_test.go | package css_parser
import (
"fmt"
"strings"
"testing"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/css_printer"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/test"
)
func expectPrintedCommon(t *testing.T, name string, contents string, expected string, expectedLog string, loader config.Loader, options config.Options) {
t.Helper()
t.Run(name, func(t *testing.T) {
t.Helper()
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
tree := Parse(log, test.SourceForTest(contents), OptionsFromConfig(loader, &options))
msgs := log.Done()
var text strings.Builder
for _, msg := range msgs {
text.WriteString(msg.String(logger.OutputOptions{}, logger.TerminalInfo{}))
}
test.AssertEqualWithDiff(t, text.String(), expectedLog)
symbols := ast.NewSymbolMap(1)
symbols.SymbolsForSource[0] = tree.Symbols
result := css_printer.Print(tree, symbols, css_printer.Options{
MinifyWhitespace: options.MinifyWhitespace,
})
test.AssertEqualWithDiff(t, string(result.CSS), expected)
})
}
func expectPrinted(t *testing.T, contents string, expected string, expectedLog string) {
t.Helper()
expectPrintedCommon(t, contents, contents, expected, expectedLog, config.LoaderCSS, config.Options{})
}
func expectPrintedLocal(t *testing.T, contents string, expected string, expectedLog string) {
t.Helper()
expectPrintedCommon(t, contents+" [local]", contents, expected, expectedLog, config.LoaderLocalCSS, config.Options{})
}
func expectPrintedLower(t *testing.T, contents string, expected string, expectedLog string) {
t.Helper()
expectPrintedCommon(t, contents+" [lower]", contents, expected, expectedLog, config.LoaderCSS, config.Options{
UnsupportedCSSFeatures: ^compat.CSSFeature(0),
})
}
func expectPrintedLowerUnsupported(t *testing.T, unsupportedCSSFeatures compat.CSSFeature, contents string, expected string, expectedLog string) {
t.Helper()
expectPrintedCommon(t, contents+" [lower]", contents, expected, expectedLog, config.LoaderCSS, config.Options{
UnsupportedCSSFeatures: unsupportedCSSFeatures,
})
}
func expectPrintedWithAllPrefixes(t *testing.T, contents string, expected string, expectedLog string) {
t.Helper()
expectPrintedCommon(t, contents+" [prefixed]", contents, expected, expectedLog, config.LoaderCSS, config.Options{
CSSPrefixData: compat.CSSPrefixData(map[compat.Engine]compat.Semver{
compat.Chrome: {Parts: []int{0}},
compat.Edge: {Parts: []int{0}},
compat.Firefox: {Parts: []int{0}},
compat.IE: {Parts: []int{0}},
compat.IOS: {Parts: []int{0}},
compat.Opera: {Parts: []int{0}},
compat.Safari: {Parts: []int{0}},
}),
})
}
func expectPrintedMinify(t *testing.T, contents string, expected string, expectedLog string) {
t.Helper()
expectPrintedCommon(t, contents+" [minify]", contents, expected, expectedLog, config.LoaderCSS, config.Options{
MinifyWhitespace: true,
})
}
func expectPrintedMangle(t *testing.T, contents string, expected string, expectedLog string) {
t.Helper()
expectPrintedCommon(t, contents+" [mangle]", contents, expected, expectedLog, config.LoaderCSS, config.Options{
MinifySyntax: true,
})
}
func expectPrintedLowerMangle(t *testing.T, contents string, expected string, expectedLog string) {
t.Helper()
expectPrintedCommon(t, contents+" [lower, mangle]", contents, expected, expectedLog, config.LoaderCSS, config.Options{
UnsupportedCSSFeatures: ^compat.CSSFeature(0),
MinifySyntax: true,
})
}
func expectPrintedMangleMinify(t *testing.T, contents string, expected string, expectedLog string) {
t.Helper()
expectPrintedCommon(t, contents+" [mangle, minify]", contents, expected, expectedLog, config.LoaderCSS, config.Options{
MinifySyntax: true,
MinifyWhitespace: true,
})
}
func expectPrintedLowerMinify(t *testing.T, contents string, expected string, expectedLog string) {
t.Helper()
expectPrintedCommon(t, contents+" [lower, minify]", contents, expected, expectedLog, config.LoaderCSS, config.Options{
UnsupportedCSSFeatures: ^compat.CSSFeature(0),
MinifyWhitespace: true,
})
}
func TestSingleLineComment(t *testing.T) {
expectPrinted(t, "a, // a\nb // b\n{}", "a, // a b // b {\n}\n",
"<stdin>: WARNING: Comments in CSS use \"/* ... */\" instead of \"//\"\n"+
"<stdin>: WARNING: Comments in CSS use \"/* ... */\" instead of \"//\"\n")
expectPrinted(t, "a, ///// a /////\n{}", "a, ///// a ///// {\n}\n",
"<stdin>: WARNING: Comments in CSS use \"/* ... */\" instead of \"//\"\n")
}
func TestEscapes(t *testing.T) {
// TIdent
expectPrinted(t, "a { value: id\\65nt }", "a {\n value: ident;\n}\n", "")
expectPrinted(t, "a { value: \\69 dent }", "a {\n value: ident;\n}\n", "")
expectPrinted(t, "a { value: \\69dent }", "a {\n value: \u69DEnt;\n}\n", "")
expectPrinted(t, "a { value: \\2cx }", "a {\n value: \\,x;\n}\n", "")
expectPrinted(t, "a { value: \\,x }", "a {\n value: \\,x;\n}\n", "")
expectPrinted(t, "a { value: x\\2c }", "a {\n value: x\\,;\n}\n", "")
expectPrinted(t, "a { value: x\\, }", "a {\n value: x\\,;\n}\n", "")
expectPrinted(t, "a { value: x\\0 }", "a {\n value: x\uFFFD;\n}\n", "")
expectPrinted(t, "a { value: x\\1 }", "a {\n value: x\\\x01;\n}\n", "")
expectPrinted(t, "a { value: x\x00 }", "a {\n value: x\uFFFD;\n}\n", "")
expectPrinted(t, "a { value: x\x01 }", "a {\n value: x\x01;\n}\n", "")
// THash
expectPrinted(t, "a { value: #0h\\61sh }", "a {\n value: #0hash;\n}\n", "")
expectPrinted(t, "a { value: #\\30hash }", "a {\n value: #0hash;\n}\n", "")
expectPrinted(t, "a { value: #\\2cx }", "a {\n value: #\\,x;\n}\n", "")
expectPrinted(t, "a { value: #\\,x }", "a {\n value: #\\,x;\n}\n", "")
// THashID
expectPrinted(t, "a { value: #h\\61sh }", "a {\n value: #hash;\n}\n", "")
expectPrinted(t, "a { value: #\\68 ash }", "a {\n value: #hash;\n}\n", "")
expectPrinted(t, "a { value: #\\68ash }", "a {\n value: #\u068Ash;\n}\n", "")
expectPrinted(t, "a { value: #x\\2c }", "a {\n value: #x\\,;\n}\n", "")
expectPrinted(t, "a { value: #x\\, }", "a {\n value: #x\\,;\n}\n", "")
// TFunction
expectPrinted(t, "a { value: f\\6e() }", "a {\n value: fn();\n}\n", "")
expectPrinted(t, "a { value: \\66n() }", "a {\n value: fn();\n}\n", "")
expectPrinted(t, "a { value: \\2cx() }", "a {\n value: \\,x();\n}\n", "")
expectPrinted(t, "a { value: \\,x() }", "a {\n value: \\,x();\n}\n", "")
expectPrinted(t, "a { value: x\\2c() }", "a {\n value: x\\,();\n}\n", "")
expectPrinted(t, "a { value: x\\,() }", "a {\n value: x\\,();\n}\n", "")
// TString
expectPrinted(t, "a { value: 'a\\62 c' }", "a {\n value: \"abc\";\n}\n", "")
expectPrinted(t, "a { value: 'a\\62c' }", "a {\n value: \"a\u062C\";\n}\n", "")
expectPrinted(t, "a { value: '\\61 bc' }", "a {\n value: \"abc\";\n}\n", "")
expectPrinted(t, "a { value: '\\61bc' }", "a {\n value: \"\u61BC\";\n}\n", "")
expectPrinted(t, "a { value: '\\2c' }", "a {\n value: \",\";\n}\n", "")
expectPrinted(t, "a { value: '\\,' }", "a {\n value: \",\";\n}\n", "")
expectPrinted(t, "a { value: '\\0' }", "a {\n value: \"\uFFFD\";\n}\n", "")
expectPrinted(t, "a { value: '\\1' }", "a {\n value: \"\x01\";\n}\n", "")
expectPrinted(t, "a { value: '\x00' }", "a {\n value: \"\uFFFD\";\n}\n", "")
expectPrinted(t, "a { value: '\x01' }", "a {\n value: \"\x01\";\n}\n", "")
// TURL
expectPrinted(t, "a { value: url(a\\62 c) }", "a {\n value: url(abc);\n}\n", "")
expectPrinted(t, "a { value: url(a\\62c) }", "a {\n value: url(a\u062C);\n}\n", "")
expectPrinted(t, "a { value: url(\\61 bc) }", "a {\n value: url(abc);\n}\n", "")
expectPrinted(t, "a { value: url(\\61bc) }", "a {\n value: url(\u61BC);\n}\n", "")
expectPrinted(t, "a { value: url(\\2c) }", "a {\n value: url(,);\n}\n", "")
expectPrinted(t, "a { value: url(\\,) }", "a {\n value: url(,);\n}\n", "")
// TAtKeyword
expectPrinted(t, "a { value: @k\\65yword }", "a {\n value: @keyword;\n}\n", "")
expectPrinted(t, "a { value: @\\6b eyword }", "a {\n value: @keyword;\n}\n", "")
expectPrinted(t, "a { value: @\\6beyword }", "a {\n value: @\u06BEyword;\n}\n", "")
expectPrinted(t, "a { value: @\\2cx }", "a {\n value: @\\,x;\n}\n", "")
expectPrinted(t, "a { value: @\\,x }", "a {\n value: @\\,x;\n}\n", "")
expectPrinted(t, "a { value: @x\\2c }", "a {\n value: @x\\,;\n}\n", "")
expectPrinted(t, "a { value: @x\\, }", "a {\n value: @x\\,;\n}\n", "")
// TDimension
expectPrinted(t, "a { value: 10\\65m }", "a {\n value: 10em;\n}\n", "")
expectPrinted(t, "a { value: 10p\\32x }", "a {\n value: 10p2x;\n}\n", "")
expectPrinted(t, "a { value: 10e\\32x }", "a {\n value: 10\\65 2x;\n}\n", "")
expectPrinted(t, "a { value: 10e-\\32x }", "a {\n value: 10\\65-2x;\n}\n", "")
expectPrinted(t, "a { value: 10E\\32x }", "a {\n value: 10\\45 2x;\n}\n", "")
expectPrinted(t, "a { value: 10E-\\32x }", "a {\n value: 10\\45-2x;\n}\n", "")
expectPrinted(t, "a { value: 10e1e\\32x }", "a {\n value: 10e1e2x;\n}\n", "")
expectPrinted(t, "a { value: 10e1e-\\32x }", "a {\n value: 10e1e-2x;\n}\n", "")
expectPrinted(t, "a { value: 10e1E\\32x }", "a {\n value: 10e1E2x;\n}\n", "")
expectPrinted(t, "a { value: 10e1E-\\32x }", "a {\n value: 10e1E-2x;\n}\n", "")
expectPrinted(t, "a { value: 10E1e\\32x }", "a {\n value: 10E1e2x;\n}\n", "")
expectPrinted(t, "a { value: 10E1e-\\32x }", "a {\n value: 10E1e-2x;\n}\n", "")
expectPrinted(t, "a { value: 10E1E\\32x }", "a {\n value: 10E1E2x;\n}\n", "")
expectPrinted(t, "a { value: 10E1E-\\32x }", "a {\n value: 10E1E-2x;\n}\n", "")
expectPrinted(t, "a { value: 10\\32x }", "a {\n value: 10\\32x;\n}\n", "")
expectPrinted(t, "a { value: 10\\2cx }", "a {\n value: 10\\,x;\n}\n", "")
expectPrinted(t, "a { value: 10\\,x }", "a {\n value: 10\\,x;\n}\n", "")
expectPrinted(t, "a { value: 10x\\2c }", "a {\n value: 10x\\,;\n}\n", "")
expectPrinted(t, "a { value: 10x\\, }", "a {\n value: 10x\\,;\n}\n", "")
// This must remain unescaped. See https://github.com/evanw/esbuild/issues/2677
expectPrinted(t, "@font-face { unicode-range: U+0e2e-0e2f }", "@font-face {\n unicode-range: U+0e2e-0e2f;\n}\n", "")
// RDeclaration
colorWarning := "<stdin>: WARNING: \",olor\" is not a known CSS property\nNOTE: Did you mean \"color\" instead?\n"
expectPrintedMangle(t, "a { c\\6flor: #f00 }", "a {\n color: red;\n}\n", "")
expectPrintedMangle(t, "a { \\63olor: #f00 }", "a {\n color: red;\n}\n", "")
expectPrintedMangle(t, "a { \\2color: #f00 }", "a {\n \\,olor: #f00;\n}\n", colorWarning)
expectPrintedMangle(t, "a { \\,olor: #f00 }", "a {\n \\,olor: #f00;\n}\n", colorWarning)
// RUnknownAt
expectPrinted(t, "@unknown;", "@unknown;\n", "")
expectPrinted(t, "@u\\6eknown;", "@unknown;\n", "")
expectPrinted(t, "@\\75nknown;", "@unknown;\n", "")
expectPrinted(t, "@u\\2cnknown;", "@u\\,nknown;\n", "")
expectPrinted(t, "@u\\,nknown;", "@u\\,nknown;\n", "")
expectPrinted(t, "@\\2cunknown;", "@\\,unknown;\n", "")
expectPrinted(t, "@\\,unknown;", "@\\,unknown;\n", "")
// RAtKeyframes
expectPrinted(t, "@k\\65yframes abc { from {} }", "@keyframes abc {\n from {\n }\n}\n", "")
expectPrinted(t, "@keyframes \\61 bc { from {} }", "@keyframes abc {\n from {\n }\n}\n", "")
expectPrinted(t, "@keyframes a\\62 c { from {} }", "@keyframes abc {\n from {\n }\n}\n", "")
expectPrinted(t, "@keyframes abc { \\66rom {} }", "@keyframes abc {\n from {\n }\n}\n", "")
expectPrinted(t, "@keyframes a\\2c c { \\66rom {} }", "@keyframes a\\,c {\n from {\n }\n}\n", "")
expectPrinted(t, "@keyframes a\\,c { \\66rom {} }", "@keyframes a\\,c {\n from {\n }\n}\n", "")
// RAtNamespace
namespaceWarning := "<stdin>: WARNING: \"@namespace\" rules are not supported\n"
expectPrinted(t, "@n\\61mespace ns 'path';", "@namespace ns \"path\";\n", namespaceWarning)
expectPrinted(t, "@namespace \\6es 'path';", "@namespace ns \"path\";\n", namespaceWarning)
expectPrinted(t, "@namespace ns 'p\\61th';", "@namespace ns \"path\";\n", namespaceWarning)
expectPrinted(t, "@namespace \\2cs 'p\\61th';", "@namespace \\,s \"path\";\n", namespaceWarning)
expectPrinted(t, "@namespace \\,s 'p\\61th';", "@namespace \\,s \"path\";\n", namespaceWarning)
// CompoundSelector
expectPrinted(t, "* {}", "* {\n}\n", "")
expectPrinted(t, "*|div {}", "*|div {\n}\n", "")
expectPrinted(t, "\\2a {}", "\\* {\n}\n", "")
expectPrinted(t, "\\2a|div {}", "\\*|div {\n}\n", "")
expectPrinted(t, "\\2d {}", "\\- {\n}\n", "")
expectPrinted(t, "\\2d- {}", "-- {\n}\n", "")
expectPrinted(t, "-\\2d {}", "-- {\n}\n", "")
expectPrinted(t, "\\2d 123 {}", "\\-123 {\n}\n", "")
// SSHash
expectPrinted(t, "#h\\61sh {}", "#hash {\n}\n", "")
expectPrinted(t, "#\\2chash {}", "#\\,hash {\n}\n", "")
expectPrinted(t, "#\\,hash {}", "#\\,hash {\n}\n", "")
expectPrinted(t, "#\\2d {}", "#\\- {\n}\n", "")
expectPrinted(t, "#\\2d- {}", "#-- {\n}\n", "")
expectPrinted(t, "#-\\2d {}", "#-- {\n}\n", "")
expectPrinted(t, "#\\2d 123 {}", "#\\-123 {\n}\n", "")
expectPrinted(t, "#\\61hash {}", "#ahash {\n}\n", "")
expectPrinted(t, "#\\30hash {}", "#\\30hash {\n}\n", "")
expectPrinted(t, "#0\\2chash {}", "#0\\,hash {\n}\n", "<stdin>: WARNING: Unexpected \"#0\\\\2chash\"\n")
expectPrinted(t, "#0\\,hash {}", "#0\\,hash {\n}\n", "<stdin>: WARNING: Unexpected \"#0\\\\,hash\"\n")
// SSClass
expectPrinted(t, ".cl\\61ss {}", ".class {\n}\n", "")
expectPrinted(t, ".\\2c class {}", ".\\,class {\n}\n", "")
expectPrinted(t, ".\\,class {}", ".\\,class {\n}\n", "")
// SSPseudoClass
expectPrinted(t, ":pseudocl\\61ss {}", ":pseudoclass {\n}\n", "")
expectPrinted(t, ":pseudo\\2c class {}", ":pseudo\\,class {\n}\n", "")
expectPrinted(t, ":pseudo\\,class {}", ":pseudo\\,class {\n}\n", "")
expectPrinted(t, ":pseudo(cl\\61ss) {}", ":pseudo(class) {\n}\n", "")
expectPrinted(t, ":pseudo(cl\\2css) {}", ":pseudo(cl\\,ss) {\n}\n", "")
expectPrinted(t, ":pseudo(cl\\,ss) {}", ":pseudo(cl\\,ss) {\n}\n", "")
// SSAttribute
expectPrinted(t, "[\\61ttr] {}", "[attr] {\n}\n", "")
expectPrinted(t, "[\\2c attr] {}", "[\\,attr] {\n}\n", "")
expectPrinted(t, "[\\,attr] {}", "[\\,attr] {\n}\n", "")
expectPrinted(t, "[attr\\7e=x] {}", "[attr\\~=x] {\n}\n", "")
expectPrinted(t, "[attr\\~=x] {}", "[attr\\~=x] {\n}\n", "")
expectPrinted(t, "[attr=\\2c] {}", "[attr=\",\"] {\n}\n", "")
expectPrinted(t, "[attr=\\,] {}", "[attr=\",\"] {\n}\n", "")
expectPrinted(t, "[attr=\"-\"] {}", "[attr=\"-\"] {\n}\n", "")
expectPrinted(t, "[attr=\"--\"] {}", "[attr=--] {\n}\n", "")
expectPrinted(t, "[attr=\"-a\"] {}", "[attr=-a] {\n}\n", "")
expectPrinted(t, "[\\6es|attr] {}", "[ns|attr] {\n}\n", "")
expectPrinted(t, "[ns|\\61ttr] {}", "[ns|attr] {\n}\n", "")
expectPrinted(t, "[\\2cns|attr] {}", "[\\,ns|attr] {\n}\n", "")
expectPrinted(t, "[ns|\\2c attr] {}", "[ns|\\,attr] {\n}\n", "")
expectPrinted(t, "[*|attr] {}", "[*|attr] {\n}\n", "")
expectPrinted(t, "[\\2a|attr] {}", "[\\*|attr] {\n}\n", "")
}
func TestString(t *testing.T) {
expectPrinted(t, "a:after { content: 'a\\\rb' }", "a:after {\n content: \"ab\";\n}\n", "")
expectPrinted(t, "a:after { content: 'a\\\nb' }", "a:after {\n content: \"ab\";\n}\n", "")
expectPrinted(t, "a:after { content: 'a\\\fb' }", "a:after {\n content: \"ab\";\n}\n", "")
expectPrinted(t, "a:after { content: 'a\\\r\nb' }", "a:after {\n content: \"ab\";\n}\n", "")
expectPrinted(t, "a:after { content: 'a\\62 c' }", "a:after {\n content: \"abc\";\n}\n", "")
expectPrinted(t, "a:after { content: '\r' }", "a:after {\n content: '\n ' }\n ;\n}\n",
`<stdin>: WARNING: Unterminated string token
<stdin>: WARNING: Expected "}" to go with "{"
<stdin>: NOTE: The unbalanced "{" is here:
<stdin>: WARNING: Unterminated string token
`)
expectPrinted(t, "a:after { content: '\n' }", "a:after {\n content: '\n ' }\n ;\n}\n",
`<stdin>: WARNING: Unterminated string token
<stdin>: WARNING: Expected "}" to go with "{"
<stdin>: NOTE: The unbalanced "{" is here:
<stdin>: WARNING: Unterminated string token
`)
expectPrinted(t, "a:after { content: '\f' }", "a:after {\n content: '\n ' }\n ;\n}\n",
`<stdin>: WARNING: Unterminated string token
<stdin>: WARNING: Expected "}" to go with "{"
<stdin>: NOTE: The unbalanced "{" is here:
<stdin>: WARNING: Unterminated string token
`)
expectPrinted(t, "a:after { content: '\r\n' }", "a:after {\n content: '\n ' }\n ;\n}\n",
`<stdin>: WARNING: Unterminated string token
<stdin>: WARNING: Expected "}" to go with "{"
<stdin>: NOTE: The unbalanced "{" is here:
<stdin>: WARNING: Unterminated string token
`)
expectPrinted(t, "a:after { content: '\\1010101' }", "a:after {\n content: \"\U001010101\";\n}\n", "")
expectPrinted(t, "a:after { content: '\\invalid' }", "a:after {\n content: \"invalid\";\n}\n", "")
}
func TestNumber(t *testing.T) {
for _, ext := range []string{"", "%", "px+"} {
expectPrinted(t, "a { width: .0"+ext+"; }", "a {\n width: .0"+ext+";\n}\n", "")
expectPrinted(t, "a { width: .00"+ext+"; }", "a {\n width: .00"+ext+";\n}\n", "")
expectPrinted(t, "a { width: .10"+ext+"; }", "a {\n width: .10"+ext+";\n}\n", "")
expectPrinted(t, "a { width: 0."+ext+"; }", "a {\n width: 0."+ext+";\n}\n", "")
expectPrinted(t, "a { width: 0.0"+ext+"; }", "a {\n width: 0.0"+ext+";\n}\n", "")
expectPrinted(t, "a { width: 0.1"+ext+"; }", "a {\n width: 0.1"+ext+";\n}\n", "")
expectPrinted(t, "a { width: +.0"+ext+"; }", "a {\n width: +.0"+ext+";\n}\n", "")
expectPrinted(t, "a { width: +.00"+ext+"; }", "a {\n width: +.00"+ext+";\n}\n", "")
expectPrinted(t, "a { width: +.10"+ext+"; }", "a {\n width: +.10"+ext+";\n}\n", "")
expectPrinted(t, "a { width: +0."+ext+"; }", "a {\n width: +0."+ext+";\n}\n", "")
expectPrinted(t, "a { width: +0.0"+ext+"; }", "a {\n width: +0.0"+ext+";\n}\n", "")
expectPrinted(t, "a { width: +0.1"+ext+"; }", "a {\n width: +0.1"+ext+";\n}\n", "")
expectPrinted(t, "a { width: -.0"+ext+"; }", "a {\n width: -.0"+ext+";\n}\n", "")
expectPrinted(t, "a { width: -.00"+ext+"; }", "a {\n width: -.00"+ext+";\n}\n", "")
expectPrinted(t, "a { width: -.10"+ext+"; }", "a {\n width: -.10"+ext+";\n}\n", "")
expectPrinted(t, "a { width: -0."+ext+"; }", "a {\n width: -0."+ext+";\n}\n", "")
expectPrinted(t, "a { width: -0.0"+ext+"; }", "a {\n width: -0.0"+ext+";\n}\n", "")
expectPrinted(t, "a { width: -0.1"+ext+"; }", "a {\n width: -0.1"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: .0"+ext+"; }", "a {\n width: 0"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: .00"+ext+"; }", "a {\n width: 0"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: .10"+ext+"; }", "a {\n width: .1"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: 0."+ext+"; }", "a {\n width: 0"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: 0.0"+ext+"; }", "a {\n width: 0"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: 0.1"+ext+"; }", "a {\n width: .1"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: +.0"+ext+"; }", "a {\n width: +0"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: +.00"+ext+"; }", "a {\n width: +0"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: +.10"+ext+"; }", "a {\n width: +.1"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: +0."+ext+"; }", "a {\n width: +0"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: +0.0"+ext+"; }", "a {\n width: +0"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: +0.1"+ext+"; }", "a {\n width: +.1"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: -.0"+ext+"; }", "a {\n width: -0"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: -.00"+ext+"; }", "a {\n width: -0"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: -.10"+ext+"; }", "a {\n width: -.1"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: -0."+ext+"; }", "a {\n width: -0"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: -0.0"+ext+"; }", "a {\n width: -0"+ext+";\n}\n", "")
expectPrintedMangle(t, "a { width: -0.1"+ext+"; }", "a {\n width: -.1"+ext+";\n}\n", "")
}
}
func TestURL(t *testing.T) {
expectPrinted(t, "a { background: url(foo.png) }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url('foo.png') }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url(\"foo.png\") }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url(\"foo.png\" ) }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url(\"foo.png\"\t) }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url(\"foo.png\"\r) }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url(\"foo.png\"\n) }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url(\"foo.png\"\f) }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url(\"foo.png\"\r\n) }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url( \"foo.png\") }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url(\t\"foo.png\") }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url(\r\"foo.png\") }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url(\n\"foo.png\") }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url(\f\"foo.png\") }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url(\r\n\"foo.png\") }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url( \"foo.png\" ) }", "a {\n background: url(foo.png);\n}\n", "")
expectPrinted(t, "a { background: url(\"foo.png\" extra-stuff) }", "a {\n background: url(\"foo.png\" extra-stuff);\n}\n", "")
expectPrinted(t, "a { background: url( \"foo.png\" extra-stuff ) }", "a {\n background: url(\"foo.png\" extra-stuff);\n}\n", "")
}
func TestHexColor(t *testing.T) {
// "#RGBA"
expectPrinted(t, "a { color: #1234 }", "a {\n color: #1234;\n}\n", "")
expectPrinted(t, "a { color: #123f }", "a {\n color: #123f;\n}\n", "")
expectPrinted(t, "a { color: #abcd }", "a {\n color: #abcd;\n}\n", "")
expectPrinted(t, "a { color: #abcf }", "a {\n color: #abcf;\n}\n", "")
expectPrinted(t, "a { color: #ABCD }", "a {\n color: #ABCD;\n}\n", "")
expectPrinted(t, "a { color: #ABCF }", "a {\n color: #ABCF;\n}\n", "")
expectPrintedMangle(t, "a { color: #1234 }", "a {\n color: #1234;\n}\n", "")
expectPrintedMangle(t, "a { color: #123f }", "a {\n color: #123;\n}\n", "")
expectPrintedMangle(t, "a { color: #abcd }", "a {\n color: #abcd;\n}\n", "")
expectPrintedMangle(t, "a { color: #abcf }", "a {\n color: #abc;\n}\n", "")
expectPrintedMangle(t, "a { color: #ABCD }", "a {\n color: #abcd;\n}\n", "")
expectPrintedMangle(t, "a { color: #ABCF }", "a {\n color: #abc;\n}\n", "")
// "#RRGGBB"
expectPrinted(t, "a { color: #112233 }", "a {\n color: #112233;\n}\n", "")
expectPrinted(t, "a { color: #122233 }", "a {\n color: #122233;\n}\n", "")
expectPrinted(t, "a { color: #112333 }", "a {\n color: #112333;\n}\n", "")
expectPrinted(t, "a { color: #112234 }", "a {\n color: #112234;\n}\n", "")
expectPrintedMangle(t, "a { color: #112233 }", "a {\n color: #123;\n}\n", "")
expectPrintedMangle(t, "a { color: #122233 }", "a {\n color: #122233;\n}\n", "")
expectPrintedMangle(t, "a { color: #112333 }", "a {\n color: #112333;\n}\n", "")
expectPrintedMangle(t, "a { color: #112234 }", "a {\n color: #112234;\n}\n", "")
expectPrinted(t, "a { color: #aabbcc }", "a {\n color: #aabbcc;\n}\n", "")
expectPrinted(t, "a { color: #abbbcc }", "a {\n color: #abbbcc;\n}\n", "")
expectPrinted(t, "a { color: #aabccc }", "a {\n color: #aabccc;\n}\n", "")
expectPrinted(t, "a { color: #aabbcd }", "a {\n color: #aabbcd;\n}\n", "")
expectPrintedMangle(t, "a { color: #aabbcc }", "a {\n color: #abc;\n}\n", "")
expectPrintedMangle(t, "a { color: #abbbcc }", "a {\n color: #abbbcc;\n}\n", "")
expectPrintedMangle(t, "a { color: #aabccc }", "a {\n color: #aabccc;\n}\n", "")
expectPrintedMangle(t, "a { color: #aabbcd }", "a {\n color: #aabbcd;\n}\n", "")
expectPrinted(t, "a { color: #AABBCC }", "a {\n color: #AABBCC;\n}\n", "")
expectPrinted(t, "a { color: #ABBBCC }", "a {\n color: #ABBBCC;\n}\n", "")
expectPrinted(t, "a { color: #AABCCC }", "a {\n color: #AABCCC;\n}\n", "")
expectPrinted(t, "a { color: #AABBCD }", "a {\n color: #AABBCD;\n}\n", "")
expectPrintedMangle(t, "a { color: #AABBCC }", "a {\n color: #abc;\n}\n", "")
expectPrintedMangle(t, "a { color: #ABBBCC }", "a {\n color: #abbbcc;\n}\n", "")
expectPrintedMangle(t, "a { color: #AABCCC }", "a {\n color: #aabccc;\n}\n", "")
expectPrintedMangle(t, "a { color: #AABBCD }", "a {\n color: #aabbcd;\n}\n", "")
// "#RRGGBBAA"
expectPrinted(t, "a { color: #11223344 }", "a {\n color: #11223344;\n}\n", "")
expectPrinted(t, "a { color: #12223344 }", "a {\n color: #12223344;\n}\n", "")
expectPrinted(t, "a { color: #11233344 }", "a {\n color: #11233344;\n}\n", "")
expectPrinted(t, "a { color: #11223444 }", "a {\n color: #11223444;\n}\n", "")
expectPrinted(t, "a { color: #11223345 }", "a {\n color: #11223345;\n}\n", "")
expectPrintedMangle(t, "a { color: #11223344 }", "a {\n color: #1234;\n}\n", "")
expectPrintedMangle(t, "a { color: #12223344 }", "a {\n color: #12223344;\n}\n", "")
expectPrintedMangle(t, "a { color: #11233344 }", "a {\n color: #11233344;\n}\n", "")
expectPrintedMangle(t, "a { color: #11223444 }", "a {\n color: #11223444;\n}\n", "")
expectPrintedMangle(t, "a { color: #11223345 }", "a {\n color: #11223345;\n}\n", "")
expectPrinted(t, "a { color: #aabbccdd }", "a {\n color: #aabbccdd;\n}\n", "")
expectPrinted(t, "a { color: #abbbccdd }", "a {\n color: #abbbccdd;\n}\n", "")
expectPrinted(t, "a { color: #aabcccdd }", "a {\n color: #aabcccdd;\n}\n", "")
expectPrinted(t, "a { color: #aabbcddd }", "a {\n color: #aabbcddd;\n}\n", "")
expectPrinted(t, "a { color: #aabbccde }", "a {\n color: #aabbccde;\n}\n", "")
expectPrintedMangle(t, "a { color: #aabbccdd }", "a {\n color: #abcd;\n}\n", "")
expectPrintedMangle(t, "a { color: #abbbccdd }", "a {\n color: #abbbccdd;\n}\n", "")
expectPrintedMangle(t, "a { color: #aabcccdd }", "a {\n color: #aabcccdd;\n}\n", "")
expectPrintedMangle(t, "a { color: #aabbcddd }", "a {\n color: #aabbcddd;\n}\n", "")
expectPrintedMangle(t, "a { color: #aabbccde }", "a {\n color: #aabbccde;\n}\n", "")
expectPrinted(t, "a { color: #AABBCCDD }", "a {\n color: #AABBCCDD;\n}\n", "")
expectPrinted(t, "a { color: #ABBBCCDD }", "a {\n color: #ABBBCCDD;\n}\n", "")
expectPrinted(t, "a { color: #AABCCCDD }", "a {\n color: #AABCCCDD;\n}\n", "")
expectPrinted(t, "a { color: #AABBCDDD }", "a {\n color: #AABBCDDD;\n}\n", "")
expectPrinted(t, "a { color: #AABBCCDE }", "a {\n color: #AABBCCDE;\n}\n", "")
expectPrintedMangle(t, "a { color: #AABBCCDD }", "a {\n color: #abcd;\n}\n", "")
expectPrintedMangle(t, "a { color: #ABBBCCDD }", "a {\n color: #abbbccdd;\n}\n", "")
expectPrintedMangle(t, "a { color: #AABCCCDD }", "a {\n color: #aabcccdd;\n}\n", "")
expectPrintedMangle(t, "a { color: #AABBCDDD }", "a {\n color: #aabbcddd;\n}\n", "")
expectPrintedMangle(t, "a { color: #AABBCCDE }", "a {\n color: #aabbccde;\n}\n", "")
// "#RRGGBBFF"
expectPrinted(t, "a { color: #112233ff }", "a {\n color: #112233ff;\n}\n", "")
expectPrinted(t, "a { color: #122233ff }", "a {\n color: #122233ff;\n}\n", "")
expectPrinted(t, "a { color: #112333ff }", "a {\n color: #112333ff;\n}\n", "")
expectPrinted(t, "a { color: #112234ff }", "a {\n color: #112234ff;\n}\n", "")
expectPrinted(t, "a { color: #112233ef }", "a {\n color: #112233ef;\n}\n", "")
expectPrintedMangle(t, "a { color: #112233ff }", "a {\n color: #123;\n}\n", "")
expectPrintedMangle(t, "a { color: #122233ff }", "a {\n color: #122233;\n}\n", "")
expectPrintedMangle(t, "a { color: #112333ff }", "a {\n color: #112333;\n}\n", "")
expectPrintedMangle(t, "a { color: #112234ff }", "a {\n color: #112234;\n}\n", "")
expectPrintedMangle(t, "a { color: #112233ef }", "a {\n color: #112233ef;\n}\n", "")
expectPrinted(t, "a { color: #aabbccff }", "a {\n color: #aabbccff;\n}\n", "")
expectPrinted(t, "a { color: #abbbccff }", "a {\n color: #abbbccff;\n}\n", "")
expectPrinted(t, "a { color: #aabcccff }", "a {\n color: #aabcccff;\n}\n", "")
expectPrinted(t, "a { color: #aabbcdff }", "a {\n color: #aabbcdff;\n}\n", "")
expectPrinted(t, "a { color: #aabbccef }", "a {\n color: #aabbccef;\n}\n", "")
expectPrintedMangle(t, "a { color: #aabbccff }", "a {\n color: #abc;\n}\n", "")
expectPrintedMangle(t, "a { color: #abbbccff }", "a {\n color: #abbbcc;\n}\n", "")
expectPrintedMangle(t, "a { color: #aabcccff }", "a {\n color: #aabccc;\n}\n", "")
expectPrintedMangle(t, "a { color: #aabbcdff }", "a {\n color: #aabbcd;\n}\n", "")
expectPrintedMangle(t, "a { color: #aabbccef }", "a {\n color: #aabbccef;\n}\n", "")
expectPrinted(t, "a { color: #AABBCCFF }", "a {\n color: #AABBCCFF;\n}\n", "")
expectPrinted(t, "a { color: #ABBBCCFF }", "a {\n color: #ABBBCCFF;\n}\n", "")
expectPrinted(t, "a { color: #AABCCCFF }", "a {\n color: #AABCCCFF;\n}\n", "")
expectPrinted(t, "a { color: #AABBCDFF }", "a {\n color: #AABBCDFF;\n}\n", "")
expectPrinted(t, "a { color: #AABBCCEF }", "a {\n color: #AABBCCEF;\n}\n", "")
expectPrintedMangle(t, "a { color: #AABBCCFF }", "a {\n color: #abc;\n}\n", "")
expectPrintedMangle(t, "a { color: #ABBBCCFF }", "a {\n color: #abbbcc;\n}\n", "")
expectPrintedMangle(t, "a { color: #AABCCCFF }", "a {\n color: #aabccc;\n}\n", "")
expectPrintedMangle(t, "a { color: #AABBCDFF }", "a {\n color: #aabbcd;\n}\n", "")
expectPrintedMangle(t, "a { color: #AABBCCEF }", "a {\n color: #aabbccef;\n}\n", "")
}
func TestColorFunctions(t *testing.T) {
expectPrinted(t, "a { color: color(display-p3 0.5 0.0 0.0%) }", "a {\n color: color(display-p3 0.5 0.0 0.0%);\n}\n", "")
expectPrinted(t, "a { color: color(display-p3 0.5 0.0 0.0% / 0.5) }", "a {\n color: color(display-p3 0.5 0.0 0.0% / 0.5);\n}\n", "")
// Check minification of tokens
expectPrintedMangle(t, "a { color: color(display-p3 0.5 0.0 0.0%) }", "a {\n color: color(display-p3 .5 0 0%);\n}\n", "")
expectPrintedMangle(t, "a { color: color(display-p3 0.5 0.0 0.0% / 0.5) }", "a {\n color: color(display-p3 .5 0 0% / .5);\n}\n", "")
// Check out-of-range colors
expectPrintedLower(t, "a { before: 0; color: color(display-p3 1 0 0); after: 1 }",
"a {\n before: 0;\n color: #ff0f0e;\n color: color(display-p3 1 0 0);\n after: 1;\n}\n", "")
expectPrintedLowerMangle(t, "a { before: 0; color: color(display-p3 1 0 0); after: 1 }",
"a {\n before: 0;\n color: #ff0f0e;\n color: color(display-p3 1 0 0);\n after: 1;\n}\n", "")
expectPrintedLower(t, "a { before: 0; color: color(display-p3 1 0 0 / 0.5); after: 1 }",
"a {\n before: 0;\n color: rgba(255, 15, 14, .5);\n color: color(display-p3 1 0 0 / 0.5);\n after: 1;\n}\n", "")
expectPrintedLowerMangle(t, "a { before: 0; color: color(display-p3 1 0 0 / 0.5); after: 1 }",
"a {\n before: 0;\n color: rgba(255, 15, 14, .5);\n color: color(display-p3 1 0 0 / .5);\n after: 1;\n}\n", "")
expectPrintedLower(t, "a { before: 0; background: color(display-p3 1 0 0); after: 1 }",
"a {\n before: 0;\n background: #ff0f0e;\n background: color(display-p3 1 0 0);\n after: 1;\n}\n", "")
expectPrintedLowerMangle(t, "a { before: 0; background: color(display-p3 1 0 0); after: 1 }",
"a {\n before: 0;\n background: #ff0f0e;\n background: color(display-p3 1 0 0);\n after: 1;\n}\n", "")
expectPrintedLower(t, "a { before: 0; background: color(display-p3 1 0 0 / 0.5); after: 1 }",
"a {\n before: 0;\n background: rgba(255, 15, 14, .5);\n background: color(display-p3 1 0 0 / 0.5);\n after: 1;\n}\n", "")
expectPrintedLowerMangle(t, "a { before: 0; background: color(display-p3 1 0 0 / 0.5); after: 1 }",
"a {\n before: 0;\n background: rgba(255, 15, 14, .5);\n background: color(display-p3 1 0 0 / .5);\n after: 1;\n}\n", "")
expectPrintedLower(t, "a { before: 0; box-shadow: 1px color(display-p3 1 0 0); after: 1 }",
"a {\n before: 0;\n box-shadow: 1px #ff0f0e;\n box-shadow: 1px color(display-p3 1 0 0);\n after: 1;\n}\n", "")
expectPrintedLowerMangle(t, "a { before: 0; box-shadow: 1px color(display-p3 1 0 0); after: 1 }",
"a {\n before: 0;\n box-shadow: 1px #ff0f0e;\n box-shadow: 1px color(display-p3 1 0 0);\n after: 1;\n}\n", "")
expectPrintedLower(t, "a { before: 0; box-shadow: 1px color(display-p3 1 0 0 / 0.5); after: 1 }",
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls_color.go | internal/css_parser/css_decls_color.go | package css_parser
import (
"fmt"
"math"
"strconv"
"strings"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/helpers"
)
// These names are shorter than their hex codes
var shortColorName = map[uint32]string{
0x000080ff: "navy",
0x008000ff: "green",
0x008080ff: "teal",
0x4b0082ff: "indigo",
0x800000ff: "maroon",
0x800080ff: "purple",
0x808000ff: "olive",
0x808080ff: "gray",
0xa0522dff: "sienna",
0xa52a2aff: "brown",
0xc0c0c0ff: "silver",
0xcd853fff: "peru",
0xd2b48cff: "tan",
0xda70d6ff: "orchid",
0xdda0ddff: "plum",
0xee82eeff: "violet",
0xf0e68cff: "khaki",
0xf0ffffff: "azure",
0xf5deb3ff: "wheat",
0xf5f5dcff: "beige",
0xfa8072ff: "salmon",
0xfaf0e6ff: "linen",
0xff0000ff: "red",
0xff6347ff: "tomato",
0xff7f50ff: "coral",
0xffa500ff: "orange",
0xffc0cbff: "pink",
0xffd700ff: "gold",
0xffe4c4ff: "bisque",
0xfffafaff: "snow",
0xfffff0ff: "ivory",
}
var colorNameToHex = map[string]uint32{
"black": 0x000000ff,
"silver": 0xc0c0c0ff,
"gray": 0x808080ff,
"white": 0xffffffff,
"maroon": 0x800000ff,
"red": 0xff0000ff,
"purple": 0x800080ff,
"fuchsia": 0xff00ffff,
"green": 0x008000ff,
"lime": 0x00ff00ff,
"olive": 0x808000ff,
"yellow": 0xffff00ff,
"navy": 0x000080ff,
"blue": 0x0000ffff,
"teal": 0x008080ff,
"aqua": 0x00ffffff,
"orange": 0xffa500ff,
"aliceblue": 0xf0f8ffff,
"antiquewhite": 0xfaebd7ff,
"aquamarine": 0x7fffd4ff,
"azure": 0xf0ffffff,
"beige": 0xf5f5dcff,
"bisque": 0xffe4c4ff,
"blanchedalmond": 0xffebcdff,
"blueviolet": 0x8a2be2ff,
"brown": 0xa52a2aff,
"burlywood": 0xdeb887ff,
"cadetblue": 0x5f9ea0ff,
"chartreuse": 0x7fff00ff,
"chocolate": 0xd2691eff,
"coral": 0xff7f50ff,
"cornflowerblue": 0x6495edff,
"cornsilk": 0xfff8dcff,
"crimson": 0xdc143cff,
"cyan": 0x00ffffff,
"darkblue": 0x00008bff,
"darkcyan": 0x008b8bff,
"darkgoldenrod": 0xb8860bff,
"darkgray": 0xa9a9a9ff,
"darkgreen": 0x006400ff,
"darkgrey": 0xa9a9a9ff,
"darkkhaki": 0xbdb76bff,
"darkmagenta": 0x8b008bff,
"darkolivegreen": 0x556b2fff,
"darkorange": 0xff8c00ff,
"darkorchid": 0x9932ccff,
"darkred": 0x8b0000ff,
"darksalmon": 0xe9967aff,
"darkseagreen": 0x8fbc8fff,
"darkslateblue": 0x483d8bff,
"darkslategray": 0x2f4f4fff,
"darkslategrey": 0x2f4f4fff,
"darkturquoise": 0x00ced1ff,
"darkviolet": 0x9400d3ff,
"deeppink": 0xff1493ff,
"deepskyblue": 0x00bfffff,
"dimgray": 0x696969ff,
"dimgrey": 0x696969ff,
"dodgerblue": 0x1e90ffff,
"firebrick": 0xb22222ff,
"floralwhite": 0xfffaf0ff,
"forestgreen": 0x228b22ff,
"gainsboro": 0xdcdcdcff,
"ghostwhite": 0xf8f8ffff,
"gold": 0xffd700ff,
"goldenrod": 0xdaa520ff,
"greenyellow": 0xadff2fff,
"grey": 0x808080ff,
"honeydew": 0xf0fff0ff,
"hotpink": 0xff69b4ff,
"indianred": 0xcd5c5cff,
"indigo": 0x4b0082ff,
"ivory": 0xfffff0ff,
"khaki": 0xf0e68cff,
"lavender": 0xe6e6faff,
"lavenderblush": 0xfff0f5ff,
"lawngreen": 0x7cfc00ff,
"lemonchiffon": 0xfffacdff,
"lightblue": 0xadd8e6ff,
"lightcoral": 0xf08080ff,
"lightcyan": 0xe0ffffff,
"lightgoldenrodyellow": 0xfafad2ff,
"lightgray": 0xd3d3d3ff,
"lightgreen": 0x90ee90ff,
"lightgrey": 0xd3d3d3ff,
"lightpink": 0xffb6c1ff,
"lightsalmon": 0xffa07aff,
"lightseagreen": 0x20b2aaff,
"lightskyblue": 0x87cefaff,
"lightslategray": 0x778899ff,
"lightslategrey": 0x778899ff,
"lightsteelblue": 0xb0c4deff,
"lightyellow": 0xffffe0ff,
"limegreen": 0x32cd32ff,
"linen": 0xfaf0e6ff,
"magenta": 0xff00ffff,
"mediumaquamarine": 0x66cdaaff,
"mediumblue": 0x0000cdff,
"mediumorchid": 0xba55d3ff,
"mediumpurple": 0x9370dbff,
"mediumseagreen": 0x3cb371ff,
"mediumslateblue": 0x7b68eeff,
"mediumspringgreen": 0x00fa9aff,
"mediumturquoise": 0x48d1ccff,
"mediumvioletred": 0xc71585ff,
"midnightblue": 0x191970ff,
"mintcream": 0xf5fffaff,
"mistyrose": 0xffe4e1ff,
"moccasin": 0xffe4b5ff,
"navajowhite": 0xffdeadff,
"oldlace": 0xfdf5e6ff,
"olivedrab": 0x6b8e23ff,
"orangered": 0xff4500ff,
"orchid": 0xda70d6ff,
"palegoldenrod": 0xeee8aaff,
"palegreen": 0x98fb98ff,
"paleturquoise": 0xafeeeeff,
"palevioletred": 0xdb7093ff,
"papayawhip": 0xffefd5ff,
"peachpuff": 0xffdab9ff,
"peru": 0xcd853fff,
"pink": 0xffc0cbff,
"plum": 0xdda0ddff,
"powderblue": 0xb0e0e6ff,
"rosybrown": 0xbc8f8fff,
"royalblue": 0x4169e1ff,
"saddlebrown": 0x8b4513ff,
"salmon": 0xfa8072ff,
"sandybrown": 0xf4a460ff,
"seagreen": 0x2e8b57ff,
"seashell": 0xfff5eeff,
"sienna": 0xa0522dff,
"skyblue": 0x87ceebff,
"slateblue": 0x6a5acdff,
"slategray": 0x708090ff,
"slategrey": 0x708090ff,
"snow": 0xfffafaff,
"springgreen": 0x00ff7fff,
"steelblue": 0x4682b4ff,
"tan": 0xd2b48cff,
"thistle": 0xd8bfd8ff,
"tomato": 0xff6347ff,
"turquoise": 0x40e0d0ff,
"violet": 0xee82eeff,
"wheat": 0xf5deb3ff,
"whitesmoke": 0xf5f5f5ff,
"yellowgreen": 0x9acd32ff,
"rebeccapurple": 0x663399ff,
}
func parseHex(text string) (uint32, bool) {
hex := uint32(0)
for _, c := range text {
hex <<= 4
switch {
case c >= '0' && c <= '9':
hex |= uint32(c) - '0'
case c >= 'a' && c <= 'f':
hex |= uint32(c) - ('a' - 10)
case c >= 'A' && c <= 'F':
hex |= uint32(c) - ('A' - 10)
default:
return 0, false
}
}
return hex, true
}
// 0xAABBCCDD => 0xABCD
func compactHex(v uint32) uint32 {
return ((v & 0x0FF00000) >> 12) | ((v & 0x00000FF0) >> 4)
}
// 0xABCD => 0xAABBCCDD
func expandHex(v uint32) uint32 {
return ((v & 0xF000) << 16) | ((v & 0xFF00) << 12) | ((v & 0x0FF0) << 8) | ((v & 0x00FF) << 4) | (v & 0x000F)
}
func hexR(v uint32) int { return int(v >> 24) }
func hexG(v uint32) int { return int((v >> 16) & 255) }
func hexB(v uint32) int { return int((v >> 8) & 255) }
func hexA(v uint32) int { return int(v & 255) }
func floatToStringForColor(a float64) string {
text := fmt.Sprintf("%.03f", a)
for text[len(text)-1] == '0' {
text = text[:len(text)-1]
}
if text[len(text)-1] == '.' {
text = text[:len(text)-1]
}
return text
}
func degreesForAngle(token css_ast.Token) (float64, bool) {
switch token.Kind {
case css_lexer.TNumber:
if value, err := strconv.ParseFloat(token.Text, 64); err == nil {
return value, true
}
case css_lexer.TDimension:
if value, err := strconv.ParseFloat(token.DimensionValue(), 64); err == nil {
switch token.DimensionUnit() {
case "deg":
return value, true
case "grad":
return value * (360.0 / 400.0), true
case "rad":
return value * (180.0 / math.Pi), true
case "turn":
return value * 360.0, true
}
}
}
return 0, false
}
func lowerAlphaPercentageToNumber(token css_ast.Token) css_ast.Token {
if token.Kind == css_lexer.TPercentage {
if value, err := strconv.ParseFloat(token.Text[:len(token.Text)-1], 64); err == nil {
token.Kind = css_lexer.TNumber
token.Text = floatToStringForColor(value / 100.0)
}
}
return token
}
// Convert newer color syntax to older color syntax for older browsers
func (p *parser) lowerAndMinifyColor(token css_ast.Token, wouldClipColor *bool) css_ast.Token {
text := token.Text
switch token.Kind {
case css_lexer.THash:
if p.options.unsupportedCSSFeatures.Has(compat.HexRGBA) {
switch len(text) {
case 4:
// "#1234" => "rgba(1, 2, 3, 0.004)"
if hex, ok := parseHex(text); ok {
hex = expandHex(hex)
return p.tryToGenerateColor(token, parsedColor{hex: hex}, nil)
}
case 8:
// "#12345678" => "rgba(18, 52, 86, 0.47)"
if hex, ok := parseHex(text); ok {
return p.tryToGenerateColor(token, parsedColor{hex: hex}, nil)
}
}
}
case css_lexer.TIdent:
if p.options.unsupportedCSSFeatures.Has(compat.RebeccaPurple) && strings.EqualFold(text, "rebeccapurple") {
token.Kind = css_lexer.THash
token.Text = "663399"
}
case css_lexer.TFunction:
switch strings.ToLower(text) {
case "rgb", "rgba", "hsl", "hsla":
if p.options.unsupportedCSSFeatures.Has(compat.Modern_RGB_HSL) {
args := *token.Children
removeAlpha := false
addAlpha := false
// "hsl(1deg, 2%, 3%)" => "hsl(1, 2%, 3%)"
if (text == "hsl" || text == "hsla") && len(args) > 0 {
if degrees, ok := degreesForAngle(args[0]); ok {
args[0].Kind = css_lexer.TNumber
args[0].Text = floatToStringForColor(degrees)
}
}
// These check for "IsNumeric" to reject "var()" since a single "var()"
// can substitute for multiple tokens and that messes up pattern matching
switch len(args) {
case 3:
// "rgba(1 2 3)" => "rgb(1, 2, 3)"
// "hsla(1 2% 3%)" => "hsl(1, 2%, 3%)"
if args[0].Kind.IsNumeric() && args[1].Kind.IsNumeric() && args[2].Kind.IsNumeric() {
removeAlpha = true
args[0].Whitespace = 0
args[1].Whitespace = 0
commaToken := p.commaToken(token.Loc)
token.Children = &[]css_ast.Token{
args[0], commaToken,
args[1], commaToken,
args[2],
}
}
case 5:
// "rgba(1, 2, 3)" => "rgb(1, 2, 3)"
// "hsla(1, 2%, 3%)" => "hsl(1%, 2%, 3%)"
if args[0].Kind.IsNumeric() && args[1].Kind == css_lexer.TComma &&
args[2].Kind.IsNumeric() && args[3].Kind == css_lexer.TComma &&
args[4].Kind.IsNumeric() {
removeAlpha = true
break
}
// "rgb(1 2 3 / 4%)" => "rgba(1, 2, 3, 0.04)"
// "hsl(1 2% 3% / 4%)" => "hsla(1, 2%, 3%, 0.04)"
if args[0].Kind.IsNumeric() && args[1].Kind.IsNumeric() && args[2].Kind.IsNumeric() &&
args[3].Kind == css_lexer.TDelimSlash && args[4].Kind.IsNumeric() {
addAlpha = true
args[0].Whitespace = 0
args[1].Whitespace = 0
args[2].Whitespace = 0
commaToken := p.commaToken(token.Loc)
token.Children = &[]css_ast.Token{
args[0], commaToken,
args[1], commaToken,
args[2], commaToken,
lowerAlphaPercentageToNumber(args[4]),
}
}
case 7:
// "rgb(1%, 2%, 3%, 4%)" => "rgba(1%, 2%, 3%, 0.04)"
// "hsl(1, 2%, 3%, 4%)" => "hsla(1, 2%, 3%, 0.04)"
if args[0].Kind.IsNumeric() && args[1].Kind == css_lexer.TComma &&
args[2].Kind.IsNumeric() && args[3].Kind == css_lexer.TComma &&
args[4].Kind.IsNumeric() && args[5].Kind == css_lexer.TComma &&
args[6].Kind.IsNumeric() {
addAlpha = true
args[6] = lowerAlphaPercentageToNumber(args[6])
}
}
if removeAlpha {
if strings.EqualFold(text, "rgba") {
token.Text = "rgb"
} else if strings.EqualFold(text, "hsla") {
token.Text = "hsl"
}
} else if addAlpha {
if strings.EqualFold(text, "rgb") {
token.Text = "rgba"
} else if strings.EqualFold(text, "hsl") {
token.Text = "hsla"
}
}
}
case "hwb":
if p.options.unsupportedCSSFeatures.Has(compat.HWB) {
if color, ok := parseColor(token); ok {
return p.tryToGenerateColor(token, color, wouldClipColor)
}
}
case "color", "lab", "lch", "oklab", "oklch":
if p.options.unsupportedCSSFeatures.Has(compat.ColorFunctions) {
if color, ok := parseColor(token); ok {
return p.tryToGenerateColor(token, color, wouldClipColor)
}
}
}
}
// When minifying, try to parse the color and print it back out. This minifies
// the color because we always print it out using the shortest encoding.
if p.options.minifySyntax {
if hex, ok := parseColor(token); ok {
token = p.tryToGenerateColor(token, hex, wouldClipColor)
}
}
return token
}
type parsedColor struct {
x, y, z F64 // color if hasColorSpace == true
hex uint32 // color and alpha if hasColorSpace == false, alpha if hasColorSpace == true
hasColorSpace bool
}
func looksLikeColor(token css_ast.Token) bool {
switch token.Kind {
case css_lexer.TIdent:
if _, ok := colorNameToHex[strings.ToLower(token.Text)]; ok {
return true
}
case css_lexer.THash:
switch len(token.Text) {
case 3, 4, 6, 8:
if _, ok := parseHex(token.Text); ok {
return true
}
}
case css_lexer.TFunction:
switch strings.ToLower(token.Text) {
case
"color-mix",
"color",
"hsl",
"hsla",
"hwb",
"lab",
"lch",
"oklab",
"oklch",
"rgb",
"rgba":
return true
}
}
return false
}
func parseColor(token css_ast.Token) (parsedColor, bool) {
text := token.Text
switch token.Kind {
case css_lexer.TIdent:
if hex, ok := colorNameToHex[strings.ToLower(text)]; ok {
return parsedColor{hex: hex}, true
}
case css_lexer.THash:
switch len(text) {
case 3:
// "#123"
if hex, ok := parseHex(text); ok {
return parsedColor{hex: (expandHex(hex) << 8) | 0xFF}, true
}
case 4:
// "#1234"
if hex, ok := parseHex(text); ok {
return parsedColor{hex: expandHex(hex)}, true
}
case 6:
// "#112233"
if hex, ok := parseHex(text); ok {
return parsedColor{hex: (hex << 8) | 0xFF}, true
}
case 8:
// "#11223344"
if hex, ok := parseHex(text); ok {
return parsedColor{hex: hex}, true
}
}
case css_lexer.TFunction:
lowerText := strings.ToLower(text)
switch lowerText {
case "rgb", "rgba":
args := *token.Children
var r, g, b, a css_ast.Token
switch len(args) {
case 3:
// "rgb(1 2 3)"
r, g, b = args[0], args[1], args[2]
case 5:
// "rgba(1, 2, 3)"
if args[1].Kind == css_lexer.TComma && args[3].Kind == css_lexer.TComma {
r, g, b = args[0], args[2], args[4]
break
}
// "rgb(1 2 3 / 4%)"
if args[3].Kind == css_lexer.TDelimSlash {
r, g, b, a = args[0], args[1], args[2], args[4]
}
case 7:
// "rgb(1%, 2%, 3%, 4%)"
if args[1].Kind == css_lexer.TComma && args[3].Kind == css_lexer.TComma && args[5].Kind == css_lexer.TComma {
r, g, b, a = args[0], args[2], args[4], args[6]
}
}
if r, ok := parseColorByte(r, 1); ok {
if g, ok := parseColorByte(g, 1); ok {
if b, ok := parseColorByte(b, 1); ok {
if a, ok := parseAlphaByte(a); ok {
return parsedColor{hex: (r << 24) | (g << 16) | (b << 8) | a}, true
}
}
}
}
case "hsl", "hsla":
args := *token.Children
var h, s, l, a css_ast.Token
switch len(args) {
case 3:
// "hsl(1 2 3)"
h, s, l = args[0], args[1], args[2]
case 5:
// "hsla(1, 2, 3)"
if args[1].Kind == css_lexer.TComma && args[3].Kind == css_lexer.TComma {
h, s, l = args[0], args[2], args[4]
break
}
// "hsl(1 2 3 / 4%)"
if args[3].Kind == css_lexer.TDelimSlash {
h, s, l, a = args[0], args[1], args[2], args[4]
}
case 7:
// "hsl(1%, 2%, 3%, 4%)"
if args[1].Kind == css_lexer.TComma && args[3].Kind == css_lexer.TComma && args[5].Kind == css_lexer.TComma {
h, s, l, a = args[0], args[2], args[4], args[6]
}
}
// HSL => RGB
if h, ok := degreesForAngle(h); ok {
if s, ok := s.ClampedFractionForPercentage(); ok {
if l, ok := l.ClampedFractionForPercentage(); ok {
if a, ok := parseAlphaByte(a); ok {
r, g, b := hslToRgb(helpers.NewF64(h), helpers.NewF64(s), helpers.NewF64(l))
return parsedColor{hex: packRGBA(r, g, b, a)}, true
}
}
}
}
case "hwb":
args := *token.Children
var h, s, l, a css_ast.Token
switch len(args) {
case 3:
// "hwb(1 2 3)"
h, s, l = args[0], args[1], args[2]
case 5:
// "hwb(1 2 3 / 4%)"
if args[3].Kind == css_lexer.TDelimSlash {
h, s, l, a = args[0], args[1], args[2], args[4]
}
}
// HWB => RGB
if h, ok := degreesForAngle(h); ok {
if white, ok := s.ClampedFractionForPercentage(); ok {
if black, ok := l.ClampedFractionForPercentage(); ok {
if a, ok := parseAlphaByte(a); ok {
r, g, b := hwbToRgb(helpers.NewF64(h), helpers.NewF64(white), helpers.NewF64(black))
return parsedColor{hex: packRGBA(r, g, b, a)}, true
}
}
}
}
case "color":
args := *token.Children
var colorSpace, alpha css_ast.Token
switch len(args) {
case 4:
// "color(xyz 1 2 3)"
colorSpace = args[0]
case 6:
// "color(xyz 1 2 3 / 50%)"
if args[4].Kind == css_lexer.TDelimSlash {
colorSpace, alpha = args[0], args[5]
}
}
if colorSpace.Kind == css_lexer.TIdent {
if v0, ok := args[1].NumberOrFractionForPercentage(1, 0); ok {
if v1, ok := args[2].NumberOrFractionForPercentage(1, 0); ok {
if v2, ok := args[3].NumberOrFractionForPercentage(1, 0); ok {
if a, ok := parseAlphaByte(alpha); ok {
v0, v1, v2 := helpers.NewF64(v0), helpers.NewF64(v1), helpers.NewF64(v2)
switch strings.ToLower(colorSpace.Text) {
case "a98-rgb":
r, g, b := lin_a98rgb(v0, v1, v2)
x, y, z := lin_a98rgb_to_xyz(r, g, b)
return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
case "display-p3":
r, g, b := lin_p3(v0, v1, v2)
x, y, z := lin_p3_to_xyz(r, g, b)
return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
case "prophoto-rgb":
r, g, b := lin_prophoto(v0, v1, v2)
x, y, z := lin_prophoto_to_xyz(r, g, b)
x, y, z = d50_to_d65(x, y, z)
return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
case "rec2020":
r, g, b := lin_2020(v0, v1, v2)
x, y, z := lin_2020_to_xyz(r, g, b)
return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
case "srgb":
r, g, b := lin_srgb(v0, v1, v2)
x, y, z := lin_srgb_to_xyz(r, g, b)
return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
case "srgb-linear":
x, y, z := lin_srgb_to_xyz(v0, v1, v2)
return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
case "xyz", "xyz-d65":
return parsedColor{hasColorSpace: true, x: v0, y: v1, z: v2, hex: a}, true
case "xyz-d50":
x, y, z := d50_to_d65(v0, v1, v2)
return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
}
}
}
}
}
}
case "lab", "lch", "oklab", "oklch":
args := *token.Children
var v0, v1, v2, alpha css_ast.Token
switch len(args) {
case 3:
// "lab(1 2 3)"
v0, v1, v2 = args[0], args[1], args[2]
case 5:
// "lab(1 2 3 / 50%)"
if args[3].Kind == css_lexer.TDelimSlash {
v0, v1, v2, alpha = args[0], args[1], args[2], args[4]
}
}
if v0.Kind != css_lexer.T(0) {
if alpha, ok := parseAlphaByte(alpha); ok {
switch lowerText {
case "lab":
if v0, ok := v0.NumberOrFractionForPercentage(100, 0); ok {
if v1, ok := v1.NumberOrFractionForPercentage(125, css_ast.AllowAnyPercentage); ok {
if v2, ok := v2.NumberOrFractionForPercentage(125, css_ast.AllowAnyPercentage); ok {
v0, v1, v2 := helpers.NewF64(v0), helpers.NewF64(v1), helpers.NewF64(v2)
x, y, z := lab_to_xyz(v0, v1, v2)
x, y, z = d50_to_d65(x, y, z)
return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: alpha}, true
}
}
}
case "lch":
if v0, ok := v0.NumberOrFractionForPercentage(100, 0); ok {
if v1, ok := v1.NumberOrFractionForPercentage(125, css_ast.AllowPercentageAbove100); ok {
if v2, ok := degreesForAngle(v2); ok {
v0, v1, v2 := helpers.NewF64(v0), helpers.NewF64(v1), helpers.NewF64(v2)
l, a, b := lch_to_lab(v0, v1, v2)
x, y, z := lab_to_xyz(l, a, b)
x, y, z = d50_to_d65(x, y, z)
return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: alpha}, true
}
}
}
case "oklab":
if v0, ok := v0.NumberOrFractionForPercentage(1, 0); ok {
if v1, ok := v1.NumberOrFractionForPercentage(0.4, css_ast.AllowAnyPercentage); ok {
if v2, ok := v2.NumberOrFractionForPercentage(0.4, css_ast.AllowAnyPercentage); ok {
v0, v1, v2 := helpers.NewF64(v0), helpers.NewF64(v1), helpers.NewF64(v2)
x, y, z := oklab_to_xyz(v0, v1, v2)
return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: alpha}, true
}
}
}
case "oklch":
if v0, ok := v0.NumberOrFractionForPercentage(1, 0); ok {
if v1, ok := v1.NumberOrFractionForPercentage(0.4, css_ast.AllowPercentageAbove100); ok {
if v2, ok := degreesForAngle(v2); ok {
v0, v1, v2 := helpers.NewF64(v0), helpers.NewF64(v1), helpers.NewF64(v2)
l, a, b := oklch_to_oklab(v0, v1, v2)
x, y, z := oklab_to_xyz(l, a, b)
return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: alpha}, true
}
}
}
}
}
}
}
}
return parsedColor{}, false
}
// Reference: https://drafts.csswg.org/css-color/#hwb-to-rgb
func hwbToRgb(hue F64, white F64, black F64) (r F64, g F64, b F64) {
if white.Add(black).Value() >= 1 {
gray := white.Div(white.Add(black))
return gray, gray, gray
}
delta := white.Add(black).Neg().AddConst(1)
r, g, b = hslToRgb(hue, helpers.NewF64(1), helpers.NewF64(0.5))
r = delta.Mul(r).Add(white)
g = delta.Mul(g).Add(white)
b = delta.Mul(b).Add(white)
return
}
// Reference https://drafts.csswg.org/css-color/#hsl-to-rgb
func hslToRgb(hue F64, sat F64, light F64) (r F64, g F64, b F64) {
hue = hue.DivConst(360.0)
var t2 F64
if light.Value() <= 0.5 {
t2 = sat.AddConst(1).Mul(light)
} else {
t2 = light.Add(sat).Sub(light.Mul(sat))
}
t1 := light.MulConst(2).Sub(t2)
r = hueToRgb(t1, t2, hue.AddConst(1.0/3.0))
g = hueToRgb(t1, t2, hue)
b = hueToRgb(t1, t2, hue.SubConst(1.0/3.0))
return
}
func hueToRgb(t1 F64, t2 F64, hue F64) F64 {
hue = hue.Sub(hue.Floor())
hue = hue.MulConst(6)
var f F64
if hue.Value() < 1 {
f = helpers.Lerp(t1, t2, hue)
} else if hue.Value() < 3 {
f = t2
} else if hue.Value() < 4 {
f = helpers.Lerp(t1, t2, hue.Neg().AddConst(4))
} else {
f = t1
}
return f
}
func packRGBA(rf F64, gf F64, bf F64, a uint32) uint32 {
r := floatToByte(rf.Value())
g := floatToByte(gf.Value())
b := floatToByte(bf.Value())
return (r << 24) | (g << 16) | (b << 8) | a
}
func floatToByte(f float64) uint32 {
i := int(math.Round(f * 255))
if i < 0 {
i = 0
} else if i > 255 {
i = 255
}
return uint32(i)
}
func parseAlphaByte(token css_ast.Token) (uint32, bool) {
if token.Kind == css_lexer.T(0) {
return 255, true
}
return parseColorByte(token, 255)
}
func parseColorByte(token css_ast.Token, scale float64) (uint32, bool) {
var i int
var ok bool
switch token.Kind {
case css_lexer.TNumber:
if f, err := strconv.ParseFloat(token.Text, 64); err == nil {
i = int(math.Round(f * scale))
ok = true
}
case css_lexer.TPercentage:
if f, err := strconv.ParseFloat(token.PercentageValue(), 64); err == nil {
i = int(math.Round(f * (255.0 / 100.0)))
ok = true
}
}
if i < 0 {
i = 0
} else if i > 255 {
i = 255
}
return uint32(i), ok
}
func tryToConvertToHexWithoutClipping(x F64, y F64, z F64, a uint32) (uint32, bool) {
r, g, b := gam_srgb(xyz_to_lin_srgb(x, y, z))
if r.Value() < -0.5/255 || r.Value() > 255.5/255 ||
g.Value() < -0.5/255 || g.Value() > 255.5/255 ||
b.Value() < -0.5/255 || b.Value() > 255.5/255 {
return 0, false
}
return packRGBA(r, g, b, a), true
}
func (p *parser) tryToGenerateColor(token css_ast.Token, color parsedColor, wouldClipColor *bool) css_ast.Token {
// Note: Do NOT remove color information from fully transparent colors.
// Safari behaves differently than other browsers for color interpolation:
// https://css-tricks.com/thing-know-gradients-transparent-black/
// Attempt to convert other color spaces to sRGB, and only continue if the
// result (rounded to the nearest byte) will be in the 0-to-1 sRGB range
var hex uint32
if !color.hasColorSpace {
hex = color.hex
} else if result, ok := tryToConvertToHexWithoutClipping(color.x, color.y, color.z, color.hex); ok {
hex = result
} else if wouldClipColor != nil {
*wouldClipColor = true
return token
} else {
r, g, b := gamut_mapping_xyz_to_srgb(color.x, color.y, color.z)
hex = packRGBA(r, g, b, color.hex)
}
if hexA(hex) == 255 {
token.Children = nil
if name, ok := shortColorName[hex]; ok && p.options.minifySyntax {
token.Kind = css_lexer.TIdent
token.Text = name
} else {
token.Kind = css_lexer.THash
hex >>= 8
compact := compactHex(hex)
if p.options.minifySyntax && hex == expandHex(compact) {
token.Text = fmt.Sprintf("%03x", compact)
} else {
token.Text = fmt.Sprintf("%06x", hex)
}
}
} else if !p.options.unsupportedCSSFeatures.Has(compat.HexRGBA) {
token.Children = nil
token.Kind = css_lexer.THash
compact := compactHex(hex)
if p.options.minifySyntax && hex == expandHex(compact) {
token.Text = fmt.Sprintf("%04x", compact)
} else {
token.Text = fmt.Sprintf("%08x", hex)
}
} else {
token.Kind = css_lexer.TFunction
token.Text = "rgba"
commaToken := p.commaToken(token.Loc)
index := hexA(hex) * 4
alpha := alphaFractionTable[index : index+4]
if space := strings.IndexByte(alpha, ' '); space != -1 {
alpha = alpha[:space]
}
token.Children = &[]css_ast.Token{
{Loc: token.Loc, Kind: css_lexer.TNumber, Text: strconv.Itoa(hexR(hex))}, commaToken,
{Loc: token.Loc, Kind: css_lexer.TNumber, Text: strconv.Itoa(hexG(hex))}, commaToken,
{Loc: token.Loc, Kind: css_lexer.TNumber, Text: strconv.Itoa(hexB(hex))}, commaToken,
{Loc: token.Loc, Kind: css_lexer.TNumber, Text: alpha},
}
}
return token
}
// Every four characters in this table is the fraction for that index
const alphaFractionTable string = "" +
"0 .004.008.01 .016.02 .024.027.03 .035.04 .043.047.05 .055.06 " +
".063.067.07 .075.08 .082.086.09 .094.098.1 .106.11 .114.118.12 " +
".125.13 .133.137.14 .145.15 .153.157.16 .165.17 .173.176.18 .184" +
".19 .192.196.2 .204.208.21 .216.22 .224.227.23 .235.24 .243.247" +
".25 .255.26 .263.267.27 .275.28 .282.286.29 .294.298.3 .306.31 " +
".314.318.32 .325.33 .333.337.34 .345.35 .353.357.36 .365.37 .373" +
".376.38 .384.39 .392.396.4 .404.408.41 .416.42 .424.427.43 .435" +
".44 .443.447.45 .455.46 .463.467.47 .475.48 .482.486.49 .494.498" +
".5 .506.51 .514.518.52 .525.53 .533.537.54 .545.55 .553.557.56 " +
".565.57 .573.576.58 .584.59 .592.596.6 .604.608.61 .616.62 .624" +
".627.63 .635.64 .643.647.65 .655.66 .663.667.67 .675.68 .682.686" +
".69 .694.698.7 .706.71 .714.718.72 .725.73 .733.737.74 .745.75 " +
".753.757.76 .765.77 .773.776.78 .784.79 .792.796.8 .804.808.81 " +
".816.82 .824.827.83 .835.84 .843.847.85 .855.86 .863.867.87 .875" +
".88 .882.886.89 .894.898.9 .906.91 .914.918.92 .925.93 .933.937" +
".94 .945.95 .953.957.96 .965.97 .973.976.98 .984.99 .992.9961 "
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls_box_shadow.go | internal/css_parser/css_decls_box_shadow.go | package css_parser
import (
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
func (p *parser) lowerAndMangleBoxShadow(tokens []css_ast.Token, wouldClipColor *bool) []css_ast.Token {
insetCount := 0
colorCount := 0
numbersBegin := 0
numbersCount := 0
numbersDone := false
foundUnexpectedToken := false
for i, t := range tokens {
if t.Kind == css_lexer.TNumber || t.Kind == css_lexer.TDimension {
if numbersDone {
// Track if we found a non-number in between two numbers
foundUnexpectedToken = true
}
if p.options.minifySyntax && t.TurnLengthIntoNumberIfZero() {
// "0px" => "0"
tokens[i] = t
}
if numbersCount == 0 {
// Track the index of the first number
numbersBegin = i
}
numbersCount++
} else {
if numbersCount != 0 {
// Track when we find a non-number after a number
numbersDone = true
}
if looksLikeColor(t) {
colorCount++
tokens[i] = p.lowerAndMinifyColor(t, wouldClipColor)
} else if t.Kind == css_lexer.TIdent && strings.EqualFold(t.Text, "inset") {
insetCount++
} else {
// Track if we found a token other than a number, a color, or "inset"
foundUnexpectedToken = true
}
}
}
// If everything looks like a valid rule, trim trailing zeros off the numbers.
// There are three valid configurations of numbers:
//
// offset-x | offset-y
// offset-x | offset-y | blur-radius
// offset-x | offset-y | blur-radius | spread-radius
//
// If omitted, blur-radius and spread-radius are implied to be zero.
if p.options.minifySyntax && insetCount <= 1 && colorCount <= 1 && numbersCount > 2 && numbersCount <= 4 && !foundUnexpectedToken {
numbersEnd := numbersBegin + numbersCount
for numbersCount > 2 && tokens[numbersBegin+numbersCount-1].IsZero() {
numbersCount--
}
tokens = append(tokens[:numbersBegin+numbersCount], tokens[numbersEnd:]...)
}
// Set the whitespace flags
for i := range tokens {
var whitespace css_ast.WhitespaceFlags
if i > 0 || !p.options.minifyWhitespace {
whitespace |= css_ast.WhitespaceBefore
}
if i+1 < len(tokens) {
whitespace |= css_ast.WhitespaceAfter
}
tokens[i].Whitespace = whitespace
}
return tokens
}
func (p *parser) lowerAndMangleBoxShadows(tokens []css_ast.Token, wouldClipColor *bool) []css_ast.Token {
n := len(tokens)
end := 0
i := 0
for i < n {
// Find the comma or the end of the token list
comma := i
for comma < n && tokens[comma].Kind != css_lexer.TComma {
comma++
}
// Mangle this individual shadow
end += copy(tokens[end:], p.lowerAndMangleBoxShadow(tokens[i:comma], wouldClipColor))
// Skip over the comma
if comma < n {
tokens[end] = tokens[comma]
end++
comma++
}
i = comma
}
return tokens[:end]
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls_composes.go | internal/css_parser/css_decls_composes.go | package css_parser
import (
"fmt"
"strings"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/logger"
)
type composesContext struct {
parentRefs []ast.Ref
parentRange logger.Range
problemRange logger.Range
}
func (p *parser) handleComposesPragma(context composesContext, tokens []css_ast.Token) {
type nameWithLoc struct {
loc logger.Loc
text string
}
var names []nameWithLoc
fromGlobal := false
for i, t := range tokens {
if t.Kind == css_lexer.TIdent {
// Check for a "from" clause at the end
if strings.EqualFold(t.Text, "from") && i+2 == len(tokens) {
last := tokens[i+1]
// A string or a URL is an external file
if last.Kind == css_lexer.TString || last.Kind == css_lexer.TURL {
var importRecordIndex uint32
if last.Kind == css_lexer.TString {
importRecordIndex = uint32(len(p.importRecords))
p.importRecords = append(p.importRecords, ast.ImportRecord{
Kind: ast.ImportComposesFrom,
Path: logger.Path{Text: last.Text},
Range: p.source.RangeOfString(last.Loc),
})
} else {
importRecordIndex = last.PayloadIndex
p.importRecords[importRecordIndex].Kind = ast.ImportComposesFrom
}
for _, parentRef := range context.parentRefs {
composes := p.composes[parentRef]
for _, name := range names {
composes.ImportedNames = append(composes.ImportedNames, css_ast.ImportedComposesName{
ImportRecordIndex: importRecordIndex,
Alias: name.text,
AliasLoc: name.loc,
})
}
}
return
}
// An identifier must be "global"
if last.Kind == css_lexer.TIdent {
if strings.EqualFold(last.Text, "global") {
fromGlobal = true
break
}
p.log.AddID(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &p.tracker, css_lexer.RangeOfIdentifier(p.source, last.Loc),
fmt.Sprintf("\"composes\" declaration uses invalid location %q", last.Text))
p.prevError = t.Loc
return
}
}
names = append(names, nameWithLoc{t.Loc, t.Text})
continue
}
// Any unexpected tokens are a syntax error
var text string
switch t.Kind {
case css_lexer.TURL, css_lexer.TBadURL, css_lexer.TString, css_lexer.TUnterminatedString:
text = fmt.Sprintf("Unexpected %s", t.Kind.String())
default:
text = fmt.Sprintf("Unexpected %q", t.Text)
}
p.log.AddID(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &p.tracker, logger.Range{Loc: t.Loc}, text)
p.prevError = t.Loc
return
}
// If we get here, all of these names are not references to another file
old := p.makeLocalSymbols
if fromGlobal {
p.makeLocalSymbols = false
}
for _, parentRef := range context.parentRefs {
composes := p.composes[parentRef]
for _, name := range names {
composes.Names = append(composes.Names, p.symbolForName(name.loc, name.text))
}
}
p.makeLocalSymbols = old
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls_box.go | internal/css_parser/css_decls_box.go | package css_parser
import (
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/logger"
)
const (
boxTop = iota
boxRight
boxBottom
boxLeft
)
type boxSide struct {
token css_ast.Token
unitSafety unitSafetyTracker
ruleIndex uint32 // The index of the originating rule in the rules array
wasSingleRule bool // True if the originating rule was just for this side
}
type boxTracker struct {
keyText string
sides [4]boxSide
allowAuto bool // If true, allow the "auto" keyword
important bool // True if all active rules were flagged as "!important"
key css_ast.D
}
type unitSafetyStatus uint8
const (
unitSafe unitSafetyStatus = iota // "margin: 0 1px 2cm 3%;"
unitUnsafeSingle // "margin: 0 1vw 2vw 3vw;"
unitUnsafeMixed // "margin: 0 1vw 2vh 3ch;"
)
// We can only compact rules together if they have the same unit safety level.
// We want to avoid a situation where the browser treats some of the original
// rules as valid and others as invalid.
//
// Safe:
// top: 1px; left: 0; bottom: 1px; right: 0;
// top: 1Q; left: 2Q; bottom: 3Q; right: 4Q;
//
// Unsafe:
// top: 1vh; left: 2vw; bottom: 3vh; right: 4vw;
// top: 1Q; left: 2Q; bottom: 3Q; right: 0;
// inset: 1Q 0 0 0; top: 0;
type unitSafetyTracker struct {
unit string
status unitSafetyStatus
}
func (a unitSafetyTracker) isSafeWith(b unitSafetyTracker) bool {
return a.status == b.status && a.status != unitUnsafeMixed && (a.status != unitUnsafeSingle || a.unit == b.unit)
}
func (t *unitSafetyTracker) includeUnitOf(token css_ast.Token) {
switch token.Kind {
case css_lexer.TNumber:
if token.Text == "0" {
return
}
case css_lexer.TPercentage:
return
case css_lexer.TDimension:
if token.DimensionUnitIsSafeLength() {
return
} else if unit := token.DimensionUnit(); t.status == unitSafe {
t.status = unitUnsafeSingle
t.unit = unit
return
} else if t.status == unitUnsafeSingle && t.unit == unit {
return
}
}
t.status = unitUnsafeMixed
}
func (box *boxTracker) updateSide(rules []css_ast.Rule, side int, new boxSide) {
if old := box.sides[side]; old.token.Kind != css_lexer.TEndOfFile &&
(!new.wasSingleRule || old.wasSingleRule) &&
old.unitSafety.status == unitSafe && new.unitSafety.status == unitSafe {
rules[old.ruleIndex] = css_ast.Rule{}
}
box.sides[side] = new
}
func (box *boxTracker) mangleSides(rules []css_ast.Rule, decl *css_ast.RDeclaration, minifyWhitespace bool) {
// Reset if we see a change in the "!important" flag
if box.important != decl.Important {
box.sides = [4]boxSide{}
box.important = decl.Important
}
allowedIdent := ""
if box.allowAuto {
allowedIdent = "auto"
}
if quad, ok := expandTokenQuad(decl.Value, allowedIdent); ok {
// Use a single tracker for the whole rule
unitSafety := unitSafetyTracker{}
for _, t := range quad {
if !box.allowAuto || t.Kind.IsNumeric() {
unitSafety.includeUnitOf(t)
}
}
for side, t := range quad {
if unitSafety.status == unitSafe {
t.TurnLengthIntoNumberIfZero()
}
box.updateSide(rules, side, boxSide{
token: t,
ruleIndex: uint32(len(rules) - 1),
unitSafety: unitSafety,
})
}
box.compactRules(rules, decl.KeyRange, minifyWhitespace)
} else {
box.sides = [4]boxSide{}
}
}
func (box *boxTracker) mangleSide(rules []css_ast.Rule, decl *css_ast.RDeclaration, minifyWhitespace bool, side int) {
// Reset if we see a change in the "!important" flag
if box.important != decl.Important {
box.sides = [4]boxSide{}
box.important = decl.Important
}
if tokens := decl.Value; len(tokens) == 1 {
if t := tokens[0]; t.Kind.IsNumeric() || (t.Kind == css_lexer.TIdent && box.allowAuto && strings.EqualFold(t.Text, "auto")) {
unitSafety := unitSafetyTracker{}
if !box.allowAuto || t.Kind.IsNumeric() {
unitSafety.includeUnitOf(t)
}
if unitSafety.status == unitSafe && t.TurnLengthIntoNumberIfZero() {
tokens[0] = t
}
box.updateSide(rules, side, boxSide{
token: t,
ruleIndex: uint32(len(rules) - 1),
wasSingleRule: true,
unitSafety: unitSafety,
})
box.compactRules(rules, decl.KeyRange, minifyWhitespace)
return
}
}
box.sides = [4]boxSide{}
}
func (box *boxTracker) compactRules(rules []css_ast.Rule, keyRange logger.Range, minifyWhitespace bool) {
// Don't compact if the shorthand form is unsupported
if box.key == css_ast.DUnknown {
return
}
// All tokens must be present
if eof := css_lexer.TEndOfFile; box.sides[0].token.Kind == eof || box.sides[1].token.Kind == eof ||
box.sides[2].token.Kind == eof || box.sides[3].token.Kind == eof {
return
}
// All tokens must have the same unit
for _, side := range box.sides[1:] {
if !side.unitSafety.isSafeWith(box.sides[0].unitSafety) {
return
}
}
// Generate the most minimal representation
tokens := compactTokenQuad(
box.sides[0].token,
box.sides[1].token,
box.sides[2].token,
box.sides[3].token,
minifyWhitespace,
)
// Remove all of the existing declarations
var minLoc logger.Loc
for i, side := range box.sides {
if loc := rules[side.ruleIndex].Loc; i == 0 || loc.Start < minLoc.Start {
minLoc = loc
}
rules[side.ruleIndex] = css_ast.Rule{}
}
// Insert the combined declaration where the last rule was
rules[box.sides[3].ruleIndex] = css_ast.Rule{Loc: minLoc, Data: &css_ast.RDeclaration{
Key: box.key,
KeyText: box.keyText,
Value: tokens,
KeyRange: keyRange,
Important: box.important,
}}
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls.go | internal/css_parser/css_decls.go | package css_parser
import (
"strings"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/logger"
)
func (p *parser) commaToken(loc logger.Loc) css_ast.Token {
t := css_ast.Token{
Loc: loc,
Kind: css_lexer.TComma,
Text: ",",
}
if !p.options.minifyWhitespace {
t.Whitespace = css_ast.WhitespaceAfter
}
return t
}
func expandTokenQuad(tokens []css_ast.Token, allowedIdent string) (result [4]css_ast.Token, ok bool) {
n := len(tokens)
if n < 1 || n > 4 {
return
}
// Don't do this if we encounter any unexpected tokens such as "var()"
for i := 0; i < n; i++ {
if t := tokens[i]; !t.Kind.IsNumeric() && (t.Kind != css_lexer.TIdent || allowedIdent == "" || t.Text != allowedIdent) {
return
}
}
result[0] = tokens[0]
if n > 1 {
result[1] = tokens[1]
} else {
result[1] = result[0]
}
if n > 2 {
result[2] = tokens[2]
} else {
result[2] = result[0]
}
if n > 3 {
result[3] = tokens[3]
} else {
result[3] = result[1]
}
ok = true
return
}
func compactTokenQuad(a css_ast.Token, b css_ast.Token, c css_ast.Token, d css_ast.Token, minifyWhitespace bool) []css_ast.Token {
tokens := []css_ast.Token{a, b, c, d}
if tokens[3].EqualIgnoringWhitespace(tokens[1]) {
if tokens[2].EqualIgnoringWhitespace(tokens[0]) {
if tokens[1].EqualIgnoringWhitespace(tokens[0]) {
tokens = tokens[:1]
} else {
tokens = tokens[:2]
}
} else {
tokens = tokens[:3]
}
}
for i := range tokens {
var whitespace css_ast.WhitespaceFlags
if !minifyWhitespace || i > 0 {
whitespace |= css_ast.WhitespaceBefore
}
if i+1 < len(tokens) {
whitespace |= css_ast.WhitespaceAfter
}
tokens[i].Whitespace = whitespace
}
return tokens
}
func (p *parser) processDeclarations(rules []css_ast.Rule, composesContext *composesContext) (rewrittenRules []css_ast.Rule) {
margin := boxTracker{key: css_ast.DMargin, keyText: "margin", allowAuto: true}
padding := boxTracker{key: css_ast.DPadding, keyText: "padding", allowAuto: false}
inset := boxTracker{key: css_ast.DInset, keyText: "inset", allowAuto: true}
borderRadius := borderRadiusTracker{}
rewrittenRules = make([]css_ast.Rule, 0, len(rules))
didWarnAboutComposes := false
wouldClipColorFlag := false
var declarationKeys map[string]struct{}
// Don't automatically generate the "inset" property if it's not supported
if p.options.unsupportedCSSFeatures.Has(compat.InsetProperty) {
inset.key = css_ast.DUnknown
inset.keyText = ""
}
// If this is a local class selector, track which CSS properties it declares.
// This is used to warn when CSS "composes" is used incorrectly.
if composesContext != nil {
for _, ref := range composesContext.parentRefs {
composes, ok := p.composes[ref]
if !ok {
composes = &css_ast.Composes{}
p.composes[ref] = composes
}
properties := composes.Properties
if properties == nil {
properties = make(map[string]logger.Loc)
composes.Properties = properties
}
for _, rule := range rules {
if decl, ok := rule.Data.(*css_ast.RDeclaration); ok && decl.Key != css_ast.DComposes {
properties[decl.KeyText] = decl.KeyRange.Loc
}
}
}
}
for i := 0; i < len(rules); i++ {
rule := rules[i]
rewrittenRules = append(rewrittenRules, rule)
decl, ok := rule.Data.(*css_ast.RDeclaration)
if !ok {
continue
}
// If the previous loop iteration would have clipped a color, we will
// duplicate it and insert the clipped copy before the unclipped copy
var wouldClipColor *bool
if wouldClipColorFlag {
wouldClipColorFlag = false
clone := *decl
clone.Value = css_ast.CloneTokensWithoutImportRecords(clone.Value)
decl = &clone
rule.Data = decl
n := len(rewrittenRules) - 2
rewrittenRules = append(rewrittenRules[:n], rule, rewrittenRules[n])
} else {
wouldClipColor = &wouldClipColorFlag
}
switch decl.Key {
case css_ast.DComposes:
// Only process "composes" directives if we're in "local-css" or
// "global-css" mode. In these cases, "composes" directives will always
// be removed (because they are being processed) even if they contain
// errors. Otherwise we leave "composes" directives there untouched and
// don't check them for errors.
if p.options.symbolMode != symbolModeDisabled {
if composesContext == nil {
if !didWarnAboutComposes {
didWarnAboutComposes = true
p.log.AddID(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &p.tracker, decl.KeyRange, "\"composes\" is not valid here")
}
} else if composesContext.problemRange.Len > 0 {
if !didWarnAboutComposes {
didWarnAboutComposes = true
p.log.AddIDWithNotes(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &p.tracker, decl.KeyRange, "\"composes\" only works inside single class selectors",
[]logger.MsgData{p.tracker.MsgData(composesContext.problemRange, "The parent selector is not a single class selector because of the syntax here:")})
}
} else {
p.handleComposesPragma(*composesContext, decl.Value)
}
rewrittenRules = rewrittenRules[:len(rewrittenRules)-1]
}
case css_ast.DBackground:
for i, t := range decl.Value {
t = p.lowerAndMinifyColor(t, wouldClipColor)
t = p.lowerAndMinifyGradient(t, wouldClipColor)
decl.Value[i] = t
}
case css_ast.DBackgroundImage,
css_ast.DBorderImage,
css_ast.DMaskImage:
for i, t := range decl.Value {
t = p.lowerAndMinifyGradient(t, wouldClipColor)
decl.Value[i] = t
}
case css_ast.DBackgroundColor,
css_ast.DBorderBlockEndColor,
css_ast.DBorderBlockStartColor,
css_ast.DBorderBottomColor,
css_ast.DBorderColor,
css_ast.DBorderInlineEndColor,
css_ast.DBorderInlineStartColor,
css_ast.DBorderLeftColor,
css_ast.DBorderRightColor,
css_ast.DBorderTopColor,
css_ast.DCaretColor,
css_ast.DColor,
css_ast.DColumnRuleColor,
css_ast.DFill,
css_ast.DFloodColor,
css_ast.DLightingColor,
css_ast.DOutlineColor,
css_ast.DStopColor,
css_ast.DStroke,
css_ast.DTextDecorationColor,
css_ast.DTextEmphasisColor:
if len(decl.Value) == 1 {
decl.Value[0] = p.lowerAndMinifyColor(decl.Value[0], wouldClipColor)
}
case css_ast.DTransform:
if p.options.minifySyntax {
decl.Value = p.mangleTransforms(decl.Value)
}
case css_ast.DBoxShadow:
decl.Value = p.lowerAndMangleBoxShadows(decl.Value, wouldClipColor)
// Container name
case css_ast.DContainer:
p.processContainerShorthand(decl.Value)
case css_ast.DContainerName:
p.processContainerName(decl.Value)
// Animation name
case css_ast.DAnimation:
p.processAnimationShorthand(decl.Value)
case css_ast.DAnimationName:
p.processAnimationName(decl.Value)
// List style
case css_ast.DListStyle:
p.processListStyleShorthand(decl.Value)
case css_ast.DListStyleType:
if len(decl.Value) == 1 {
p.processListStyleType(&decl.Value[0])
}
// Font
case css_ast.DFont:
if p.options.minifySyntax {
decl.Value = p.mangleFont(decl.Value)
}
case css_ast.DFontFamily:
if p.options.minifySyntax {
if value, ok := p.mangleFontFamily(decl.Value); ok {
decl.Value = value
}
}
case css_ast.DFontWeight:
if len(decl.Value) == 1 && p.options.minifySyntax {
decl.Value[0] = p.mangleFontWeight(decl.Value[0])
}
// Margin
case css_ast.DMargin:
if p.options.minifySyntax {
margin.mangleSides(rewrittenRules, decl, p.options.minifyWhitespace)
}
case css_ast.DMarginTop:
if p.options.minifySyntax {
margin.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxTop)
}
case css_ast.DMarginRight:
if p.options.minifySyntax {
margin.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxRight)
}
case css_ast.DMarginBottom:
if p.options.minifySyntax {
margin.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxBottom)
}
case css_ast.DMarginLeft:
if p.options.minifySyntax {
margin.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxLeft)
}
// Padding
case css_ast.DPadding:
if p.options.minifySyntax {
padding.mangleSides(rewrittenRules, decl, p.options.minifyWhitespace)
}
case css_ast.DPaddingTop:
if p.options.minifySyntax {
padding.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxTop)
}
case css_ast.DPaddingRight:
if p.options.minifySyntax {
padding.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxRight)
}
case css_ast.DPaddingBottom:
if p.options.minifySyntax {
padding.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxBottom)
}
case css_ast.DPaddingLeft:
if p.options.minifySyntax {
padding.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxLeft)
}
// Inset
case css_ast.DInset:
if p.options.unsupportedCSSFeatures.Has(compat.InsetProperty) {
if decls, ok := p.lowerInset(rule.Loc, decl); ok {
rewrittenRules = rewrittenRules[:len(rewrittenRules)-1]
for i := range decls {
rewrittenRules = append(rewrittenRules, decls[i])
if p.options.minifySyntax {
inset.mangleSide(rewrittenRules, decls[i].Data.(*css_ast.RDeclaration), p.options.minifyWhitespace, i)
}
}
break
}
}
if p.options.minifySyntax {
inset.mangleSides(rewrittenRules, decl, p.options.minifyWhitespace)
}
case css_ast.DTop:
if p.options.minifySyntax {
inset.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxTop)
}
case css_ast.DRight:
if p.options.minifySyntax {
inset.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxRight)
}
case css_ast.DBottom:
if p.options.minifySyntax {
inset.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxBottom)
}
case css_ast.DLeft:
if p.options.minifySyntax {
inset.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxLeft)
}
// Border radius
case css_ast.DBorderRadius:
if p.options.minifySyntax {
borderRadius.mangleCorners(rewrittenRules, decl, p.options.minifyWhitespace)
}
case css_ast.DBorderTopLeftRadius:
if p.options.minifySyntax {
borderRadius.mangleCorner(rewrittenRules, decl, p.options.minifyWhitespace, borderRadiusTopLeft)
}
case css_ast.DBorderTopRightRadius:
if p.options.minifySyntax {
borderRadius.mangleCorner(rewrittenRules, decl, p.options.minifyWhitespace, borderRadiusTopRight)
}
case css_ast.DBorderBottomRightRadius:
if p.options.minifySyntax {
borderRadius.mangleCorner(rewrittenRules, decl, p.options.minifyWhitespace, borderRadiusBottomRight)
}
case css_ast.DBorderBottomLeftRadius:
if p.options.minifySyntax {
borderRadius.mangleCorner(rewrittenRules, decl, p.options.minifyWhitespace, borderRadiusBottomLeft)
}
}
if prefixes, ok := p.options.cssPrefixData[decl.Key]; ok {
if declarationKeys == nil {
// Only generate this map if it's needed
declarationKeys = make(map[string]struct{})
for _, rule := range rules {
if decl, ok := rule.Data.(*css_ast.RDeclaration); ok {
declarationKeys[decl.KeyText] = struct{}{}
}
}
}
if (prefixes & compat.WebkitPrefix) != 0 {
rewrittenRules = p.insertPrefixedDeclaration(rewrittenRules, "-webkit-", rule.Loc, decl, declarationKeys)
}
if (prefixes & compat.KhtmlPrefix) != 0 {
rewrittenRules = p.insertPrefixedDeclaration(rewrittenRules, "-khtml-", rule.Loc, decl, declarationKeys)
}
if (prefixes & compat.MozPrefix) != 0 {
rewrittenRules = p.insertPrefixedDeclaration(rewrittenRules, "-moz-", rule.Loc, decl, declarationKeys)
}
if (prefixes & compat.MsPrefix) != 0 {
rewrittenRules = p.insertPrefixedDeclaration(rewrittenRules, "-ms-", rule.Loc, decl, declarationKeys)
}
if (prefixes & compat.OPrefix) != 0 {
rewrittenRules = p.insertPrefixedDeclaration(rewrittenRules, "-o-", rule.Loc, decl, declarationKeys)
}
}
// If this loop iteration would have clipped a color, the out-of-gamut
// colors will not be clipped and this flag will be set. We then set up the
// next iteration of the loop to duplicate this rule and process it again
// with color clipping enabled.
if wouldClipColorFlag {
if p.options.unsupportedCSSFeatures.Has(compat.ColorFunctions) {
// Only do this if there was no previous instance of that property so
// we avoid overwriting any manually-specified fallback values
for j := len(rewrittenRules) - 2; j >= 0; j-- {
if prev, ok := rewrittenRules[j].Data.(*css_ast.RDeclaration); ok && prev.Key == decl.Key {
wouldClipColorFlag = false
break
}
}
if wouldClipColorFlag {
// If the code above would have clipped a color outside of the sRGB gamut,
// process this rule again so we can generate the clipped version next time
i -= 1
continue
}
}
wouldClipColorFlag = false
}
}
// Compact removed rules
if p.options.minifySyntax {
end := 0
for _, rule := range rewrittenRules {
if rule.Data != nil {
rewrittenRules[end] = rule
end++
}
}
rewrittenRules = rewrittenRules[:end]
}
return
}
func (p *parser) insertPrefixedDeclaration(rules []css_ast.Rule, prefix string, loc logger.Loc, decl *css_ast.RDeclaration, declarationKeys map[string]struct{}) []css_ast.Rule {
keyText := prefix + decl.KeyText
// Don't insert a prefixed declaration if there already is one
if _, ok := declarationKeys[keyText]; ok {
// We found a previous declaration with a matching prefixed property.
// The value is ignored, which matches the behavior of "autoprefixer".
return rules
}
// Additional special cases for when the prefix applies
switch decl.Key {
case css_ast.DBackgroundClip:
// The prefix is only needed for "background-clip: text"
if len(decl.Value) != 1 || decl.Value[0].Kind != css_lexer.TIdent || !strings.EqualFold(decl.Value[0].Text, "text") {
return rules
}
case css_ast.DPosition:
// The prefix is only needed for "position: sticky"
if len(decl.Value) != 1 || decl.Value[0].Kind != css_lexer.TIdent || !strings.EqualFold(decl.Value[0].Text, "sticky") {
return rules
}
case css_ast.DWidth, css_ast.DMinWidth, css_ast.DMaxWidth,
css_ast.DHeight, css_ast.DMinHeight, css_ast.DMaxHeight:
// The prefix is only needed for "width: stretch"
if len(decl.Value) != 1 || decl.Value[0].Kind != css_lexer.TIdent || !strings.EqualFold(decl.Value[0].Text, "stretch") {
return rules
}
}
value := css_ast.CloneTokensWithoutImportRecords(decl.Value)
// Additional special cases for how to transform the contents
switch decl.Key {
case css_ast.DPosition:
// The prefix applies to the value, not the property
keyText = decl.KeyText
value[0].Text = "-webkit-sticky"
case css_ast.DWidth, css_ast.DMinWidth, css_ast.DMaxWidth,
css_ast.DHeight, css_ast.DMinHeight, css_ast.DMaxHeight:
// The prefix applies to the value, not the property
keyText = decl.KeyText
// This currently only applies to "stretch" (already checked above)
switch prefix {
case "-webkit-":
value[0].Text = "-webkit-fill-available"
case "-moz-":
value[0].Text = "-moz-available"
}
case css_ast.DUserSelect:
// The prefix applies to the value as well as the property
if prefix == "-moz-" && len(value) == 1 && value[0].Kind == css_lexer.TIdent && strings.EqualFold(value[0].Text, "none") {
value[0].Text = "-moz-none"
}
case css_ast.DMaskComposite:
// WebKit uses different names for these values
if prefix == "-webkit-" {
for i, token := range value {
if token.Kind == css_lexer.TIdent {
switch token.Text {
case "add":
value[i].Text = "source-over"
case "subtract":
value[i].Text = "source-out"
case "intersect":
value[i].Text = "source-in"
case "exclude":
value[i].Text = "xor"
}
}
}
}
}
// If we didn't change the key, manually search for a previous duplicate rule
if keyText == decl.KeyText {
for _, rule := range rules {
if prevDecl, ok := rule.Data.(*css_ast.RDeclaration); ok && prevDecl.KeyText == keyText && css_ast.TokensEqual(prevDecl.Value, value, nil) {
return rules
}
}
}
// Overwrite the latest declaration with the prefixed declaration
rules[len(rules)-1] = css_ast.Rule{Loc: loc, Data: &css_ast.RDeclaration{
KeyText: keyText,
KeyRange: decl.KeyRange,
Value: value,
Important: decl.Important,
}}
// Re-add the latest declaration after the inserted declaration
rules = append(rules, css_ast.Rule{Loc: loc, Data: decl})
return rules
}
func (p *parser) lowerInset(loc logger.Loc, decl *css_ast.RDeclaration) ([]css_ast.Rule, bool) {
if tokens, ok := expandTokenQuad(decl.Value, ""); ok {
mask := ^css_ast.WhitespaceAfter
if p.options.minifyWhitespace {
mask = 0
}
for i := range tokens {
tokens[i].Whitespace &= mask
}
return []css_ast.Rule{
{Loc: loc, Data: &css_ast.RDeclaration{
KeyText: "top",
KeyRange: decl.KeyRange,
Key: css_ast.DTop,
Value: tokens[0:1],
Important: decl.Important,
}},
{Loc: loc, Data: &css_ast.RDeclaration{
KeyText: "right",
KeyRange: decl.KeyRange,
Key: css_ast.DRight,
Value: tokens[1:2],
Important: decl.Important,
}},
{Loc: loc, Data: &css_ast.RDeclaration{
KeyText: "bottom",
KeyRange: decl.KeyRange,
Key: css_ast.DBottom,
Value: tokens[2:3],
Important: decl.Important,
}},
{Loc: loc, Data: &css_ast.RDeclaration{
KeyText: "left",
KeyRange: decl.KeyRange,
Key: css_ast.DLeft,
Value: tokens[3:4],
Important: decl.Important,
}},
}, true
}
return nil, false
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_reduce_calc.go | internal/css_parser/css_reduce_calc.go | package css_parser
import (
"fmt"
"math"
"strconv"
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/logger"
)
func (p *parser) tryToReduceCalcExpression(token css_ast.Token) css_ast.Token {
if term := tryToParseCalcTerm(*token.Children); term != nil {
whitespace := css_ast.WhitespaceBefore | css_ast.WhitespaceAfter
if p.options.minifyWhitespace {
whitespace = 0
}
term = term.partiallySimplify()
if result, ok := term.convertToToken(whitespace); ok {
if result.Kind == css_lexer.TOpenParen {
result.Kind = css_lexer.TFunction
result.Text = "calc"
}
result.Loc = token.Loc
result.Whitespace = css_ast.WhitespaceBefore | css_ast.WhitespaceAfter
return result
}
}
return token
}
type calcTermWithOp struct {
data calcTerm
opLoc logger.Loc
}
// See: https://www.w3.org/TR/css-values-4/#calc-internal
type calcTerm interface {
convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool)
partiallySimplify() calcTerm
}
type calcSum struct {
terms []calcTermWithOp
}
type calcProduct struct {
terms []calcTermWithOp
}
type calcNegate struct {
term calcTermWithOp
}
type calcInvert struct {
term calcTermWithOp
}
type calcNumeric struct {
unit string
number float64
loc logger.Loc
}
type calcValue struct {
token css_ast.Token
isInvalidPlusOrMinus bool
}
func floatToStringForCalc(a float64) (string, bool) {
// Handle non-finite cases
if math.IsNaN(a) || math.IsInf(a, 0) {
return "", false
}
// Print the number as a string
text := fmt.Sprintf("%.05f", a)
for text[len(text)-1] == '0' {
text = text[:len(text)-1]
}
if text[len(text)-1] == '.' {
text = text[:len(text)-1]
}
if strings.HasPrefix(text, "0.") {
text = text[1:]
} else if strings.HasPrefix(text, "-0.") {
text = "-" + text[2:]
}
// Bail if the number is not exactly represented
if number, err := strconv.ParseFloat(text, 64); err != nil || number != a {
return "", false
}
return text, true
}
func (c *calcSum) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
// Specification: https://www.w3.org/TR/css-values-4/#calc-serialize
tokens := make([]css_ast.Token, 0, len(c.terms)*2)
// ALGORITHM DEVIATION: Avoid parenthesizing product nodes inside sum nodes
if product, ok := c.terms[0].data.(*calcProduct); ok {
token, ok := product.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, *token.Children...)
} else {
token, ok := c.terms[0].data.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, token)
}
for _, term := range c.terms[1:] {
// If child is a Negate node, append " - " to s, then serialize the Negate’s child and append the result to s.
if negate, ok := term.data.(*calcNegate); ok {
token, ok := negate.term.data.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, css_ast.Token{
Loc: term.opLoc,
Kind: css_lexer.TDelimMinus,
Text: "-",
Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
}, token)
continue
}
// If child is a negative numeric value, append " - " to s, then serialize the negation of child as normal and append the result to s.
if numeric, ok := term.data.(*calcNumeric); ok && numeric.number < 0 {
clone := *numeric
clone.number = -clone.number
token, ok := clone.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, css_ast.Token{
Loc: term.opLoc,
Kind: css_lexer.TDelimMinus,
Text: "-",
Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
}, token)
continue
}
// Otherwise, append " + " to s, then serialize child and append the result to s.
tokens = append(tokens, css_ast.Token{
Loc: term.opLoc,
Kind: css_lexer.TDelimPlus,
Text: "+",
Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
})
// ALGORITHM DEVIATION: Avoid parenthesizing product nodes inside sum nodes
if product, ok := term.data.(*calcProduct); ok {
token, ok := product.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, *token.Children...)
} else {
token, ok := term.data.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, token)
}
}
return css_ast.Token{
Loc: tokens[0].Loc,
Kind: css_lexer.TOpenParen,
Text: "(",
Children: &tokens,
}, true
}
func (c *calcProduct) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
// Specification: https://www.w3.org/TR/css-values-4/#calc-serialize
tokens := make([]css_ast.Token, 0, len(c.terms)*2)
token, ok := c.terms[0].data.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, token)
for _, term := range c.terms[1:] {
// If child is an Invert node, append " / " to s, then serialize the Invert’s child and append the result to s.
if invert, ok := term.data.(*calcInvert); ok {
token, ok := invert.term.data.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, css_ast.Token{
Loc: term.opLoc,
Kind: css_lexer.TDelimSlash,
Text: "/",
Whitespace: whitespace,
}, token)
continue
}
// Otherwise, append " * " to s, then serialize child and append the result to s.
token, ok := term.data.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
tokens = append(tokens, css_ast.Token{
Loc: term.opLoc,
Kind: css_lexer.TDelimAsterisk,
Text: "*",
Whitespace: whitespace,
}, token)
}
return css_ast.Token{
Loc: tokens[0].Loc,
Kind: css_lexer.TOpenParen,
Text: "(",
Children: &tokens,
}, true
}
func (c *calcNegate) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
// Specification: https://www.w3.org/TR/css-values-4/#calc-serialize
token, ok := c.term.data.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
return css_ast.Token{
Kind: css_lexer.TOpenParen,
Text: "(",
Children: &[]css_ast.Token{
{Loc: c.term.opLoc, Kind: css_lexer.TNumber, Text: "-1"},
{Loc: c.term.opLoc, Kind: css_lexer.TDelimSlash, Text: "*", Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter},
token,
},
}, true
}
func (c *calcInvert) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
// Specification: https://www.w3.org/TR/css-values-4/#calc-serialize
token, ok := c.term.data.convertToToken(whitespace)
if !ok {
return css_ast.Token{}, false
}
return css_ast.Token{
Kind: css_lexer.TOpenParen,
Text: "(",
Children: &[]css_ast.Token{
{Loc: c.term.opLoc, Kind: css_lexer.TNumber, Text: "1"},
{Loc: c.term.opLoc, Kind: css_lexer.TDelimSlash, Text: "/", Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter},
token,
},
}, true
}
func (c *calcNumeric) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
text, ok := floatToStringForCalc(c.number)
if !ok {
return css_ast.Token{}, false
}
if c.unit == "" {
return css_ast.Token{
Loc: c.loc,
Kind: css_lexer.TNumber,
Text: text,
}, true
}
if c.unit == "%" {
return css_ast.Token{
Loc: c.loc,
Kind: css_lexer.TPercentage,
Text: text + "%",
}, true
}
return css_ast.Token{
Loc: c.loc,
Kind: css_lexer.TDimension,
Text: text + c.unit,
UnitOffset: uint16(len(text)),
}, true
}
func (c *calcValue) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
t := c.token
t.Whitespace = 0
return t, true
}
func (c *calcSum) partiallySimplify() calcTerm {
// Specification: https://www.w3.org/TR/css-values-4/#calc-simplification
// For each of root’s children that are Sum nodes, replace them with their children.
terms := make([]calcTermWithOp, 0, len(c.terms))
for _, term := range c.terms {
term.data = term.data.partiallySimplify()
if sum, ok := term.data.(*calcSum); ok {
terms = append(terms, sum.terms...)
} else {
terms = append(terms, term)
}
}
// For each set of root’s children that are numeric values with identical units, remove
// those children and replace them with a single numeric value containing the sum of the
// removed nodes, and with the same unit. (E.g. combine numbers, combine percentages,
// combine px values, etc.)
for i := 0; i < len(terms); i++ {
term := terms[i]
if numeric, ok := term.data.(*calcNumeric); ok {
end := i + 1
for j := end; j < len(terms); j++ {
term2 := terms[j]
if numeric2, ok := term2.data.(*calcNumeric); ok && strings.EqualFold(numeric2.unit, numeric.unit) {
numeric.number += numeric2.number
} else {
terms[end] = term2
end++
}
}
terms = terms[:end]
}
}
// If root has only a single child at this point, return the child.
if len(terms) == 1 {
return terms[0].data
}
// Otherwise, return root.
c.terms = terms
return c
}
func (c *calcProduct) partiallySimplify() calcTerm {
// Specification: https://www.w3.org/TR/css-values-4/#calc-simplification
// For each of root’s children that are Product nodes, replace them with their children.
terms := make([]calcTermWithOp, 0, len(c.terms))
for _, term := range c.terms {
term.data = term.data.partiallySimplify()
if product, ok := term.data.(*calcProduct); ok {
terms = append(terms, product.terms...)
} else {
terms = append(terms, term)
}
}
// If root has multiple children that are numbers (not percentages or dimensions), remove
// them and replace them with a single number containing the product of the removed nodes.
for i, term := range terms {
if numeric, ok := term.data.(*calcNumeric); ok && numeric.unit == "" {
end := i + 1
for j := end; j < len(terms); j++ {
term2 := terms[j]
if numeric2, ok := term2.data.(*calcNumeric); ok && numeric2.unit == "" {
numeric.number *= numeric2.number
} else {
terms[end] = term2
end++
}
}
terms = terms[:end]
break
}
}
// If root contains only numeric values and/or Invert nodes containing numeric values,
// and multiplying the types of all the children (noting that the type of an Invert
// node is the inverse of its child’s type) results in a type that matches any of the
// types that a math function can resolve to, return the result of multiplying all the
// values of the children (noting that the value of an Invert node is the reciprocal
// of its child’s value), expressed in the result’s canonical unit.
if len(terms) == 2 {
// Right now, only handle the case of two numbers, one of which has no unit
if first, ok := terms[0].data.(*calcNumeric); ok {
if second, ok := terms[1].data.(*calcNumeric); ok {
if first.unit == "" {
second.number *= first.number
return second
}
if second.unit == "" {
first.number *= second.number
return first
}
}
}
}
// ALGORITHM DEVIATION: Divide instead of multiply if the reciprocal is shorter
for i := 1; i < len(terms); i++ {
if numeric, ok := terms[i].data.(*calcNumeric); ok {
reciprocal := 1 / numeric.number
if multiply, ok := floatToStringForCalc(numeric.number); ok {
if divide, ok := floatToStringForCalc(reciprocal); ok && len(divide) < len(multiply) {
numeric.number = reciprocal
terms[i].data = &calcInvert{term: calcTermWithOp{
data: numeric,
opLoc: terms[i].opLoc,
}}
}
}
}
}
// If root has only a single child at this point, return the child.
if len(terms) == 1 {
return terms[0].data
}
// Otherwise, return root.
c.terms = terms
return c
}
func (c *calcNegate) partiallySimplify() calcTerm {
// Specification: https://www.w3.org/TR/css-values-4/#calc-simplification
c.term.data = c.term.data.partiallySimplify()
// If root’s child is a numeric value, return an equivalent numeric value, but with the value negated (0 - value).
if numeric, ok := c.term.data.(*calcNumeric); ok {
numeric.number = -numeric.number
return numeric
}
// If root’s child is a Negate node, return the child’s child.
if negate, ok := c.term.data.(*calcNegate); ok {
return negate.term.data
}
return c
}
func (c *calcInvert) partiallySimplify() calcTerm {
// Specification: https://www.w3.org/TR/css-values-4/#calc-simplification
c.term.data = c.term.data.partiallySimplify()
// If root’s child is a number (not a percentage or dimension) return the reciprocal of the child’s value.
if numeric, ok := c.term.data.(*calcNumeric); ok && numeric.unit == "" {
numeric.number = 1 / numeric.number
return numeric
}
// If root’s child is an Invert node, return the child’s child.
if invert, ok := c.term.data.(*calcInvert); ok {
return invert.term.data
}
return c
}
func (c *calcNumeric) partiallySimplify() calcTerm {
return c
}
func (c *calcValue) partiallySimplify() calcTerm {
return c
}
func tryToParseCalcTerm(tokens []css_ast.Token) calcTerm {
// Specification: https://www.w3.org/TR/css-values-4/#calc-internal
terms := make([]calcTermWithOp, len(tokens))
for i, token := range tokens {
var term calcTerm
if token.Kind == css_lexer.TFunction && strings.EqualFold(token.Text, "var") {
// Using "var()" should bail because it can expand to any number of tokens
return nil
} else if token.Kind == css_lexer.TOpenParen || (token.Kind == css_lexer.TFunction && strings.EqualFold(token.Text, "calc")) {
term = tryToParseCalcTerm(*token.Children)
if term == nil {
return nil
}
} else if token.Kind == css_lexer.TNumber {
if number, err := strconv.ParseFloat(token.Text, 64); err == nil {
term = &calcNumeric{loc: token.Loc, number: number}
} else {
term = &calcValue{token: token}
}
} else if token.Kind == css_lexer.TPercentage {
if number, err := strconv.ParseFloat(token.PercentageValue(), 64); err == nil {
term = &calcNumeric{loc: token.Loc, number: number, unit: "%"}
} else {
term = &calcValue{token: token}
}
} else if token.Kind == css_lexer.TDimension {
if number, err := strconv.ParseFloat(token.DimensionValue(), 64); err == nil {
term = &calcNumeric{loc: token.Loc, number: number, unit: token.DimensionUnit()}
} else {
term = &calcValue{token: token}
}
} else if token.Kind == css_lexer.TIdent && strings.EqualFold(token.Text, "Infinity") {
term = &calcNumeric{loc: token.Loc, number: math.Inf(1)}
} else if token.Kind == css_lexer.TIdent && strings.EqualFold(token.Text, "-Infinity") {
term = &calcNumeric{loc: token.Loc, number: math.Inf(-1)}
} else if token.Kind == css_lexer.TIdent && strings.EqualFold(token.Text, "NaN") {
term = &calcNumeric{loc: token.Loc, number: math.NaN()}
} else {
term = &calcValue{
token: token,
// From the specification: "In addition, whitespace is required on both sides of the
// + and - operators. (The * and / operators can be used without white space around them.)"
isInvalidPlusOrMinus: i > 0 && i+1 < len(tokens) &&
(token.Kind == css_lexer.TDelimPlus || token.Kind == css_lexer.TDelimMinus) &&
(((token.Whitespace&css_ast.WhitespaceBefore) == 0 && (tokens[i-1].Whitespace&css_ast.WhitespaceAfter) == 0) ||
(token.Whitespace&css_ast.WhitespaceAfter) == 0 && (tokens[i+1].Whitespace&css_ast.WhitespaceBefore) == 0),
}
}
terms[i].data = term
}
// Collect children into Product and Invert nodes
first := 1
for first+1 < len(terms) {
// If this is a "*" or "/" operator
if value, ok := terms[first].data.(*calcValue); ok && (value.token.Kind == css_lexer.TDelimAsterisk || value.token.Kind == css_lexer.TDelimSlash) {
// Scan over the run
last := first
for last+3 < len(terms) {
if value, ok := terms[last+2].data.(*calcValue); ok && (value.token.Kind == css_lexer.TDelimAsterisk || value.token.Kind == css_lexer.TDelimSlash) {
last += 2
} else {
break
}
}
// Generate a node for the run
product := calcProduct{terms: make([]calcTermWithOp, (last-first)/2+2)}
for i := range product.terms {
term := terms[first+i*2-1]
if i > 0 {
op := terms[first+i*2-2].data.(*calcValue).token
term.opLoc = op.Loc
if op.Kind == css_lexer.TDelimSlash {
term.data = &calcInvert{term: term}
}
}
product.terms[i] = term
}
// Replace the run with a single node
terms[first-1].data = &product
terms = append(terms[:first], terms[last+2:]...)
continue
}
first++
}
// Collect children into Sum and Negate nodes
first = 1
for first+1 < len(terms) {
// If this is a "+" or "-" operator
if value, ok := terms[first].data.(*calcValue); ok && !value.isInvalidPlusOrMinus &&
(value.token.Kind == css_lexer.TDelimPlus || value.token.Kind == css_lexer.TDelimMinus) {
// Scan over the run
last := first
for last+3 < len(terms) {
if value, ok := terms[last+2].data.(*calcValue); ok && !value.isInvalidPlusOrMinus &&
(value.token.Kind == css_lexer.TDelimPlus || value.token.Kind == css_lexer.TDelimMinus) {
last += 2
} else {
break
}
}
// Generate a node for the run
sum := calcSum{terms: make([]calcTermWithOp, (last-first)/2+2)}
for i := range sum.terms {
term := terms[first+i*2-1]
if i > 0 {
op := terms[first+i*2-2].data.(*calcValue).token
term.opLoc = op.Loc
if op.Kind == css_lexer.TDelimMinus {
term.data = &calcNegate{term: term}
}
}
sum.terms[i] = term
}
// Replace the run with a single node
terms[first-1].data = &sum
terms = append(terms[:first], terms[last+2:]...)
continue
}
first++
}
// This only succeeds if everything reduces to a single term
if len(terms) == 1 {
return terms[0].data
}
return nil
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls_font_weight.go | internal/css_parser/css_decls_font_weight.go | package css_parser
import (
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
func (p *parser) mangleFontWeight(token css_ast.Token) css_ast.Token {
if token.Kind != css_lexer.TIdent {
return token
}
switch strings.ToLower(token.Text) {
case "normal":
token.Text = "400"
token.Kind = css_lexer.TNumber
case "bold":
token.Text = "700"
token.Kind = css_lexer.TNumber
}
return token
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_parser/css_decls_font.go | internal/css_parser/css_decls_font.go | package css_parser
import (
"strconv"
"strings"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
)
// Specification: https://drafts.csswg.org/css-fonts/#font-prop
// [ <font-style> || <font-variant-css2> || <font-weight> || <font-stretch-css3> ]? <font-size> [ / <line-height> ]? <font-family>
func (p *parser) mangleFont(tokens []css_ast.Token) []css_ast.Token {
var result []css_ast.Token
// Scan up to the font size
pos := 0
for ; pos < len(tokens); pos++ {
token := tokens[pos]
if isFontSize(token) {
break
}
switch token.Kind {
case css_lexer.TIdent:
switch strings.ToLower(token.Text) {
case "normal":
// "All subproperties of the font property are first reset to their initial values"
// This implies that "normal" doesn't do anything. Also all of the optional values
// contain "normal" as an option and they are unordered so it's impossible to say
// what property "normal" corresponds to. Just drop these tokens to save space.
continue
// <font-style>
case "italic":
case "oblique":
if pos+1 < len(tokens) && tokens[pos+1].IsAngle() {
result = append(result, token, tokens[pos+1])
pos++
continue
}
// <font-variant-css2>
case "small-caps":
// <font-weight>
case "bold", "bolder", "lighter":
result = append(result, p.mangleFontWeight(token))
continue
// <font-stretch-css3>
case "ultra-condensed", "extra-condensed", "condensed", "semi-condensed",
"semi-expanded", "expanded", "extra-expanded", "ultra-expanded":
default:
// All other tokens are unrecognized, so we bail if we hit one
return tokens
}
result = append(result, token)
case css_lexer.TNumber:
// "Only values greater than or equal to 1, and less than or equal to
// 1000, are valid, and all other values are invalid."
if value, err := strconv.ParseFloat(token.Text, 64); err != nil || value < 1 || value > 1000 {
return tokens
}
result = append(result, token)
default:
// All other tokens are unrecognized, so we bail if we hit one
return tokens
}
}
// <font-size>
if pos == len(tokens) {
return tokens
}
result = append(result, tokens[pos])
pos++
// / <line-height>
if pos < len(tokens) && tokens[pos].Kind == css_lexer.TDelimSlash {
if pos+1 == len(tokens) {
return tokens
}
result = append(result, tokens[pos], tokens[pos+1])
pos += 2
// Remove the whitespace around the "/" character
if p.options.minifyWhitespace {
result[len(result)-3].Whitespace &= ^css_ast.WhitespaceAfter
result[len(result)-2].Whitespace = 0
result[len(result)-1].Whitespace &= ^css_ast.WhitespaceBefore
}
}
// <font-family>
if family, ok := p.mangleFontFamily(tokens[pos:]); ok {
if len(result) > 0 && len(family) > 0 && family[0].Kind != css_lexer.TString {
family[0].Whitespace |= css_ast.WhitespaceBefore
}
return append(result, family...)
}
return tokens
}
var fontSizeKeywords = map[string]bool{
// <absolute-size>: https://drafts.csswg.org/css-fonts/#valdef-font-size-absolute-size
"xx-small": true,
"x-small": true,
"small": true,
"medium": true,
"large": true,
"x-large": true,
"xx-large": true,
"xxx-large": true,
// <relative-size>: https://drafts.csswg.org/css-fonts/#valdef-font-size-relative-size
"larger": true,
"smaller": true,
}
// Specification: https://drafts.csswg.org/css-fonts/#font-size-prop
func isFontSize(token css_ast.Token) bool {
// <length-percentage>
if token.Kind == css_lexer.TDimension || token.Kind == css_lexer.TPercentage {
return true
}
// <absolute-size> or <relative-size>
if token.Kind == css_lexer.TIdent {
_, ok := fontSizeKeywords[strings.ToLower(token.Text)]
return ok
}
return false
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_printer/css_printer_test.go | internal/css_printer/css_printer_test.go | package css_printer
import (
"strings"
"testing"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/css_parser"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/test"
)
func expectPrintedCommon(t *testing.T, name string, contents string, expected string, options Options) {
t.Helper()
t.Run(name, func(t *testing.T) {
t.Helper()
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
tree := css_parser.Parse(log, test.SourceForTest(contents), css_parser.OptionsFromConfig(config.LoaderCSS, &config.Options{
MinifyWhitespace: options.MinifyWhitespace,
}))
msgs := log.Done()
var text strings.Builder
for _, msg := range msgs {
if msg.Kind == logger.Error {
text.WriteString(msg.String(logger.OutputOptions{}, logger.TerminalInfo{}))
}
}
test.AssertEqualWithDiff(t, text.String(), "")
symbols := ast.NewSymbolMap(1)
symbols.SymbolsForSource[0] = tree.Symbols
result := Print(tree, symbols, options)
test.AssertEqualWithDiff(t, string(result.CSS), expected)
})
}
func expectPrinted(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents, contents, expected, Options{})
}
func expectPrintedMinify(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents+" [minified]", contents, expected, Options{
MinifyWhitespace: true,
})
}
func expectPrintedASCII(t *testing.T, contents string, expected string) {
t.Helper()
expectPrintedCommon(t, contents+" [ascii]", contents, expected, Options{
ASCIIOnly: true,
})
}
func expectPrintedString(t *testing.T, stringValue string, expected string) {
t.Helper()
t.Run(stringValue, func(t *testing.T) {
t.Helper()
p := printer{}
p.printQuoted(stringValue, 0)
test.AssertEqualWithDiff(t, string(p.css), expected)
})
}
func TestStringQuote(t *testing.T) {
expectPrintedString(t, "", "\"\"")
expectPrintedString(t, "foo", "\"foo\"")
expectPrintedString(t, "f\"o", "'f\"o'")
expectPrintedString(t, "f'\"'o", "\"f'\\\"'o\"")
expectPrintedString(t, "f\"'\"o", "'f\"\\'\"o'")
expectPrintedString(t, "f\\o", "\"f\\\\o\"")
expectPrintedString(t, "f\ro", "\"f\\do\"")
expectPrintedString(t, "f\no", "\"f\\ao\"")
expectPrintedString(t, "f\fo", "\"f\\co\"")
expectPrintedString(t, "f\r\no", "\"f\\d\\ao\"")
expectPrintedString(t, "f\r0", "\"f\\d 0\"")
expectPrintedString(t, "f\n0", "\"f\\a 0\"")
expectPrintedString(t, "f\n ", "\"f\\a \"")
expectPrintedString(t, "f\n\t", "\"f\\a \t\"")
expectPrintedString(t, "f\nf", "\"f\\a f\"")
expectPrintedString(t, "f\nF", "\"f\\a F\"")
expectPrintedString(t, "f\ng", "\"f\\ag\"")
expectPrintedString(t, "f\nG", "\"f\\aG\"")
expectPrintedString(t, "f\x01o", "\"f\x01o\"")
expectPrintedString(t, "f\to", "\"f\to\"")
expectPrintedString(t, "</script>", "\"</script>\"")
expectPrintedString(t, "</style>", "\"<\\/style>\"")
expectPrintedString(t, "</style", "\"<\\/style\"")
expectPrintedString(t, "</STYLE", "\"<\\/STYLE\"")
expectPrintedString(t, "</StYlE", "\"<\\/StYlE\"")
expectPrintedString(t, ">/style", "\">/style\"")
expectPrintedString(t, ">/STYLE", "\">/STYLE\"")
expectPrintedString(t, ">/StYlE", "\">/StYlE\"")
}
func TestURLQuote(t *testing.T) {
expectPrinted(t, "* { background: url('foo') }", "* {\n background: url(foo);\n}\n")
expectPrinted(t, "* { background: url('f o') }", "* {\n background: url(f\\ o);\n}\n")
expectPrinted(t, "* { background: url('f o') }", "* {\n background: url(\"f o\");\n}\n")
expectPrinted(t, "* { background: url('foo)') }", "* {\n background: url(foo\\));\n}\n")
expectPrinted(t, "* { background: url('(foo') }", "* {\n background: url(\\(foo);\n}\n")
expectPrinted(t, "* { background: url('(foo)') }", "* {\n background: url(\"(foo)\");\n}\n")
expectPrinted(t, "* { background: url('\"foo\"') }", "* {\n background: url('\"foo\"');\n}\n")
}
func TestImportant(t *testing.T) {
expectPrinted(t, "a { b: c!important }", "a {\n b: c !important;\n}\n")
expectPrinted(t, "a { b: c!important; }", "a {\n b: c !important;\n}\n")
expectPrinted(t, "a { b: c! important }", "a {\n b: c !important;\n}\n")
expectPrinted(t, "a { b: c! important; }", "a {\n b: c !important;\n}\n")
expectPrinted(t, "a { b: c ! important }", "a {\n b: c !important;\n}\n")
expectPrinted(t, "a { b: c ! important; }", "a {\n b: c !important;\n}\n")
expectPrinted(t, "a { b: c !IMPORTANT; }", "a {\n b: c !important;\n}\n")
expectPrinted(t, "a { b: c !ImPoRtAnT; }", "a {\n b: c !important;\n}\n")
expectPrintedMinify(t, "a { b: c !important }", "a{b:c!important}")
}
func TestSelector(t *testing.T) {
expectPrintedMinify(t, "a + b c > d ~ e{}", "a+b c>d~e{}")
expectPrinted(t, ":unknown( x (a+b), 'c' ) {}", ":unknown(x (a+b), \"c\") {\n}\n")
expectPrinted(t, ":unknown( x (a-b), 'c' ) {}", ":unknown(x (a-b), \"c\") {\n}\n")
expectPrinted(t, ":unknown( x (a,b), 'c' ) {}", ":unknown(x (a, b), \"c\") {\n}\n")
expectPrinted(t, ":unknown( x ( a + b ), 'c' ) {}", ":unknown(x (a + b), \"c\") {\n}\n")
expectPrinted(t, ":unknown( x ( a - b ), 'c' ) {}", ":unknown(x (a - b), \"c\") {\n}\n")
expectPrinted(t, ":unknown( x ( a , b ), 'c' ) {}", ":unknown(x (a, b), \"c\") {\n}\n")
expectPrintedMinify(t, ":unknown( x (a+b), 'c' ) {}", ":unknown(x (a+b),\"c\"){}")
expectPrintedMinify(t, ":unknown( x (a-b), 'c' ) {}", ":unknown(x (a-b),\"c\"){}")
expectPrintedMinify(t, ":unknown( x (a,b), 'c' ) {}", ":unknown(x (a,b),\"c\"){}")
expectPrintedMinify(t, ":unknown( x ( a + b ), 'c' ) {}", ":unknown(x (a + b),\"c\"){}")
expectPrintedMinify(t, ":unknown( x ( a - b ), 'c' ) {}", ":unknown(x (a - b),\"c\"){}")
expectPrintedMinify(t, ":unknown( x ( a , b ), 'c' ) {}", ":unknown(x (a,b),\"c\"){}")
// ":foo()" is a parse error, but should ideally still be preserved so they don't accidentally become valid
expectPrinted(t, ":is {}", ":is {\n}\n")
expectPrinted(t, ":is() {}", ":is() {\n}\n")
expectPrinted(t, ":hover {}", ":hover {\n}\n")
expectPrinted(t, ":hover() {}", ":hover() {\n}\n")
expectPrintedMinify(t, ":is {}", ":is{}")
expectPrintedMinify(t, ":is() {}", ":is(){}")
expectPrintedMinify(t, ":hover {}", ":hover{}")
expectPrintedMinify(t, ":hover() {}", ":hover(){}")
}
func TestNestedSelector(t *testing.T) {
expectPrintedMinify(t, "a { &b {} }", "a{&b{}}")
expectPrintedMinify(t, "a { & b {} }", "a{& b{}}")
expectPrintedMinify(t, "a { & :b {} }", "a{& :b{}}")
expectPrintedMinify(t, "& a & b & c {}", "& a & b & c{}")
}
func TestBadQualifiedRules(t *testing.T) {
expectPrinted(t, ";", "; {\n}\n")
expectPrinted(t, "$bad: rule;", "$bad: rule; {\n}\n")
expectPrinted(t, "a {}; b {};", "a {\n}\n; b {\n}\n; {\n}\n")
expectPrinted(t, "a { div.major { color: blue } color: red }", "a {\n div.major {\n color: blue;\n }\n color: red;\n}\n")
expectPrinted(t, "a { div:hover { color: blue } color: red }", "a {\n div:hover {\n color: blue;\n }\n color: red;\n}\n")
expectPrinted(t, "a { div:hover { color: blue }; color: red }", "a {\n div:hover {\n color: blue;\n }\n color: red;\n}\n")
expectPrinted(t, "$bad{ color: red }", "$bad {\n color: red;\n}\n")
expectPrinted(t, "$bad { color: red }", "$bad {\n color: red;\n}\n")
expectPrinted(t, "$bad foo{ color: red }", "$bad foo {\n color: red;\n}\n")
expectPrinted(t, "$bad foo { color: red }", "$bad foo {\n color: red;\n}\n")
expectPrintedMinify(t, "$bad{ color: red }", "$bad{color:red}")
expectPrintedMinify(t, "$bad { color: red }", "$bad{color:red}")
expectPrintedMinify(t, "$bad foo{ color: red }", "$bad foo{color:red}")
expectPrintedMinify(t, "$bad foo { color: red }", "$bad foo{color:red}")
}
func TestDeclaration(t *testing.T) {
expectPrinted(t, "* { unknown: x (a+b) }", "* {\n unknown: x (a+b);\n}\n")
expectPrinted(t, "* { unknown: x (a-b) }", "* {\n unknown: x (a-b);\n}\n")
expectPrinted(t, "* { unknown: x (a,b) }", "* {\n unknown: x (a, b);\n}\n")
expectPrinted(t, "* { unknown: x ( a + b ) }", "* {\n unknown: x (a + b);\n}\n")
expectPrinted(t, "* { unknown: x ( a - b ) }", "* {\n unknown: x (a - b);\n}\n")
expectPrinted(t, "* { unknown: x ( a , b ) }", "* {\n unknown: x (a, b);\n}\n")
expectPrintedMinify(t, "* { unknown: x (a+b) }", "*{unknown:x (a+b)}")
expectPrintedMinify(t, "* { unknown: x (a-b) }", "*{unknown:x (a-b)}")
expectPrintedMinify(t, "* { unknown: x (a,b) }", "*{unknown:x (a,b)}")
expectPrintedMinify(t, "* { unknown: x ( a + b ) }", "*{unknown:x (a + b)}")
expectPrintedMinify(t, "* { unknown: x ( a - b ) }", "*{unknown:x (a - b)}")
expectPrintedMinify(t, "* { unknown: x ( a , b ) }", "*{unknown:x (a,b)}")
// Pretty-print long lists in declarations
expectPrinted(t, "a { b: c, d }", "a {\n b: c, d;\n}\n")
expectPrinted(t, "a { b: c, (d, e) }", "a {\n b: c, (d, e);\n}\n")
expectPrinted(t, "a { b: c, d, e }", "a {\n b:\n c,\n d,\n e;\n}\n")
expectPrinted(t, "a { b: c, (d, e), f }", "a {\n b:\n c,\n (d, e),\n f;\n}\n")
expectPrintedMinify(t, "a { b: c, d }", "a{b:c,d}")
expectPrintedMinify(t, "a { b: c, (d, e) }", "a{b:c,(d,e)}")
expectPrintedMinify(t, "a { b: c, d, e }", "a{b:c,d,e}")
expectPrintedMinify(t, "a { b: c, (d, e), f }", "a{b:c,(d,e),f}")
}
func TestVerbatimWhitespace(t *testing.T) {
expectPrinted(t, "*{--x:}", "* {\n --x:;\n}\n")
expectPrinted(t, "*{--x: }", "* {\n --x: ;\n}\n")
expectPrinted(t, "* { --x:; }", "* {\n --x:;\n}\n")
expectPrinted(t, "* { --x: ; }", "* {\n --x: ;\n}\n")
expectPrintedMinify(t, "*{--x:}", "*{--x:}")
expectPrintedMinify(t, "*{--x: }", "*{--x: }")
expectPrintedMinify(t, "* { --x:; }", "*{--x:}")
expectPrintedMinify(t, "* { --x: ; }", "*{--x: }")
expectPrinted(t, "*{--x:!important}", "* {\n --x:!important;\n}\n")
expectPrinted(t, "*{--x: !important}", "* {\n --x: !important;\n}\n")
expectPrinted(t, "*{ --x:!important }", "* {\n --x:!important;\n}\n")
expectPrinted(t, "*{ --x: !important }", "* {\n --x: !important;\n}\n")
expectPrinted(t, "* { --x:!important; }", "* {\n --x:!important;\n}\n")
expectPrinted(t, "* { --x: !important; }", "* {\n --x: !important;\n}\n")
expectPrinted(t, "* { --x:! important ; }", "* {\n --x:!important;\n}\n")
expectPrinted(t, "* { --x: ! important ; }", "* {\n --x: !important;\n}\n")
expectPrintedMinify(t, "*{--x:!important}", "*{--x:!important}")
expectPrintedMinify(t, "*{--x: !important}", "*{--x: !important}")
expectPrintedMinify(t, "*{ --x:!important }", "*{--x:!important}")
expectPrintedMinify(t, "*{ --x: !important }", "*{--x: !important}")
expectPrintedMinify(t, "* { --x:!important; }", "*{--x:!important}")
expectPrintedMinify(t, "* { --x: !important; }", "*{--x: !important}")
expectPrintedMinify(t, "* { --x:! important ; }", "*{--x:!important}")
expectPrintedMinify(t, "* { --x: ! important ; }", "*{--x: !important}")
expectPrinted(t, "* { --x:y; }", "* {\n --x:y;\n}\n")
expectPrinted(t, "* { --x: y; }", "* {\n --x: y;\n}\n")
expectPrinted(t, "* { --x:y ; }", "* {\n --x:y ;\n}\n")
expectPrinted(t, "* { --x:y, ; }", "* {\n --x:y, ;\n}\n")
expectPrinted(t, "* { --x: var(y,); }", "* {\n --x: var(y,);\n}\n")
expectPrinted(t, "* { --x: var(y, ); }", "* {\n --x: var(y, );\n}\n")
expectPrintedMinify(t, "* { --x:y; }", "*{--x:y}")
expectPrintedMinify(t, "* { --x: y; }", "*{--x: y}")
expectPrintedMinify(t, "* { --x:y ; }", "*{--x:y }")
expectPrintedMinify(t, "* { --x:y, ; }", "*{--x:y, }")
expectPrintedMinify(t, "* { --x: var(y,); }", "*{--x: var(y,)}")
expectPrintedMinify(t, "* { --x: var(y, ); }", "*{--x: var(y, )}")
expectPrinted(t, "* { --x:(y); }", "* {\n --x:(y);\n}\n")
expectPrinted(t, "* { --x:(y) ; }", "* {\n --x:(y) ;\n}\n")
expectPrinted(t, "* { --x: (y); }", "* {\n --x: (y);\n}\n")
expectPrinted(t, "* { --x:(y ); }", "* {\n --x:(y );\n}\n")
expectPrinted(t, "* { --x:( y); }", "* {\n --x:( y);\n}\n")
expectPrintedMinify(t, "* { --x:(y); }", "*{--x:(y)}")
expectPrintedMinify(t, "* { --x:(y) ; }", "*{--x:(y) }")
expectPrintedMinify(t, "* { --x: (y); }", "*{--x: (y)}")
expectPrintedMinify(t, "* { --x:(y ); }", "*{--x:(y )}")
expectPrintedMinify(t, "* { --x:( y); }", "*{--x:( y)}")
expectPrinted(t, "* { --x:f(y); }", "* {\n --x:f(y);\n}\n")
expectPrinted(t, "* { --x:f(y) ; }", "* {\n --x:f(y) ;\n}\n")
expectPrinted(t, "* { --x: f(y); }", "* {\n --x: f(y);\n}\n")
expectPrinted(t, "* { --x:f(y ); }", "* {\n --x:f(y );\n}\n")
expectPrinted(t, "* { --x:f( y); }", "* {\n --x:f( y);\n}\n")
expectPrintedMinify(t, "* { --x:f(y); }", "*{--x:f(y)}")
expectPrintedMinify(t, "* { --x:f(y) ; }", "*{--x:f(y) }")
expectPrintedMinify(t, "* { --x: f(y); }", "*{--x: f(y)}")
expectPrintedMinify(t, "* { --x:f(y ); }", "*{--x:f(y )}")
expectPrintedMinify(t, "* { --x:f( y); }", "*{--x:f( y)}")
expectPrinted(t, "* { --x:[y]; }", "* {\n --x:[y];\n}\n")
expectPrinted(t, "* { --x:[y] ; }", "* {\n --x:[y] ;\n}\n")
expectPrinted(t, "* { --x: [y]; }", "* {\n --x: [y];\n}\n")
expectPrinted(t, "* { --x:[y ]; }", "* {\n --x:[y ];\n}\n")
expectPrinted(t, "* { --x:[ y]; }", "* {\n --x:[ y];\n}\n")
expectPrintedMinify(t, "* { --x:[y]; }", "*{--x:[y]}")
expectPrintedMinify(t, "* { --x:[y] ; }", "*{--x:[y] }")
expectPrintedMinify(t, "* { --x: [y]; }", "*{--x: [y]}")
expectPrintedMinify(t, "* { --x:[y ]; }", "*{--x:[y ]}")
expectPrintedMinify(t, "* { --x:[ y]; }", "*{--x:[ y]}")
// Note: These cases now behave like qualified rules
expectPrinted(t, "* { --x:{y}; }", "* {\n --x: {\n y;\n }\n}\n")
expectPrinted(t, "* { --x:{y} ; }", "* {\n --x: {\n y;\n }\n}\n")
expectPrinted(t, "* { --x: {y}; }", "* {\n --x: {\n y;\n }\n}\n")
expectPrinted(t, "* { --x:{y }; }", "* {\n --x: {\n y;\n }\n}\n")
expectPrinted(t, "* { --x:{ y}; }", "* {\n --x: {\n y;\n }\n}\n")
// Note: These cases now behave like qualified rules
expectPrintedMinify(t, "* { --x:{y}; }", "*{--x:{y}}")
expectPrintedMinify(t, "* { --x:{y} ; }", "*{--x:{y}}")
expectPrintedMinify(t, "* { --x: {y}; }", "*{--x:{y}}")
expectPrintedMinify(t, "* { --x:{y }; }", "*{--x:{y}}")
expectPrintedMinify(t, "* { --x:{ y}; }", "*{--x:{y}}")
expectPrintedMinify(t, "@supports ( --x : y , z ) { a { color: red; } }", "@supports ( --x : y , z ){a{color:red}}")
expectPrintedMinify(t, "@supports ( --x : ) { a { color: red; } }", "@supports ( --x : ){a{color:red}}")
expectPrintedMinify(t, "@supports (--x: ) { a { color: red; } }", "@supports (--x: ){a{color:red}}")
expectPrintedMinify(t, "@supports ( --x y , z ) { a { color: red; } }", "@supports (--x y,z){a{color:red}}")
expectPrintedMinify(t, "@supports ( --x ) { a { color: red; } }", "@supports (--x){a{color:red}}")
expectPrintedMinify(t, "@supports ( ) { a { color: red; } }", "@supports (){a{color:red}}")
expectPrintedMinify(t, "@supports ( . --x : y , z ) { a { color: red; } }", "@supports (. --x : y,z){a{color:red}}")
}
func TestAtRule(t *testing.T) {
expectPrintedMinify(t, "@unknown;", "@unknown;")
expectPrintedMinify(t, "@unknown x;", "@unknown x;")
expectPrintedMinify(t, "@unknown{}", "@unknown{}")
expectPrintedMinify(t, "@unknown{\na: b;\nc: d;\n}", "@unknown{a: b; c: d;}")
expectPrinted(t, "@unknown x{}", "@unknown x {}\n")
expectPrinted(t, "@unknown x {}", "@unknown x {}\n")
expectPrintedMinify(t, "@unknown x{}", "@unknown x{}")
expectPrintedMinify(t, "@unknown x {}", "@unknown x{}")
expectPrinted(t, "@unknown x ( a + b ) ;", "@unknown x (a + b);\n")
expectPrinted(t, "@unknown x ( a - b ) ;", "@unknown x (a - b);\n")
expectPrinted(t, "@unknown x ( a , b ) ;", "@unknown x (a, b);\n")
expectPrintedMinify(t, "@unknown x ( a + b ) ;", "@unknown x (a + b);")
expectPrintedMinify(t, "@unknown x ( a - b ) ;", "@unknown x (a - b);")
expectPrintedMinify(t, "@unknown x ( a , b ) ;", "@unknown x (a,b);")
}
func TestAtCharset(t *testing.T) {
expectPrinted(t, "@charset \"UTF-8\";", "@charset \"UTF-8\";\n")
expectPrintedMinify(t, "@charset \"UTF-8\";", "@charset \"UTF-8\";")
}
func TestAtImport(t *testing.T) {
expectPrinted(t, "@import\"foo.css\";", "@import \"foo.css\";\n")
expectPrinted(t, "@import \"foo.css\";", "@import \"foo.css\";\n")
expectPrinted(t, "@import url(foo.css);", "@import \"foo.css\";\n")
expectPrinted(t, "@import url(\"foo.css\");", "@import \"foo.css\";\n")
expectPrinted(t, "@import url(\"foo.css\") (a < 1);", "@import \"foo.css\" (a < 1);\n")
expectPrinted(t, "@import url(\"foo.css\") supports(foo) (a < 1);", "@import \"foo.css\" supports(foo) (a < 1);\n")
expectPrinted(t, "@import url(\"foo.css\") layer;", "@import \"foo.css\" layer;\n")
expectPrinted(t, "@import url(\"foo.css\") layer (a < 1);", "@import \"foo.css\" layer (a < 1);\n")
expectPrinted(t, "@import url(\"foo.css\") layer supports(foo);", "@import \"foo.css\" layer supports(foo);\n")
expectPrinted(t, "@import url(\"foo.css\") layer supports(foo) (a < 1);", "@import \"foo.css\" layer supports(foo) (a < 1);\n")
expectPrintedMinify(t, "@import\"foo.css\";", "@import\"foo.css\";")
expectPrintedMinify(t, "@import \"foo.css\";", "@import\"foo.css\";")
expectPrintedMinify(t, "@import url(foo.css);", "@import\"foo.css\";")
expectPrintedMinify(t, "@import url(\"foo.css\");", "@import\"foo.css\";")
expectPrintedMinify(t, "@import url(\"foo.css\") (a < 1);", "@import\"foo.css\"(a<1);")
expectPrintedMinify(t, "@import url(\"foo.css\") supports(foo) (a < 1);", "@import\"foo.css\"supports(foo) (a<1);")
expectPrintedMinify(t, "@import url(\"foo.css\") layer;", "@import\"foo.css\"layer;")
expectPrintedMinify(t, "@import url(\"foo.css\") layer (a < 1);", "@import\"foo.css\"layer (a<1);")
expectPrintedMinify(t, "@import url(\"foo.css\") layer supports(foo);", "@import\"foo.css\"layer supports(foo);")
expectPrintedMinify(t, "@import url(\"foo.css\") layer supports(foo) (a < 1);", "@import\"foo.css\"layer supports(foo) (a<1);")
}
func TestAtKeyframes(t *testing.T) {
expectPrintedMinify(t, "@keyframes name { 0%, 50% { color: red } 25%, 75% { color: blue } }",
"@keyframes name{0%,50%{color:red}25%,75%{color:blue}}")
expectPrintedMinify(t, "@keyframes name { from { color: red } to { color: blue } }",
"@keyframes name{from{color:red}to{color:blue}}")
}
func TestAtMedia(t *testing.T) {
expectPrinted(t, "@media screen { div { color: red } }", "@media screen {\n div {\n color: red;\n }\n}\n")
expectPrinted(t, "@media screen{div{color:red}}", "@media screen {\n div {\n color: red;\n }\n}\n")
expectPrintedMinify(t, "@media screen { div { color: red } }", "@media screen{div{color:red}}")
expectPrintedMinify(t, "@media screen{div{color:red}}", "@media screen{div{color:red}}")
expectPrintedMinify(t, "@media (a) {div{color:red}}", "@media(a){div{color:red}}")
expectPrintedMinify(t, "@media (a) or (b) {div{color:red}}", "@media(a)or (b){div{color:red}}")
expectPrintedMinify(t, "@media (a) and (b) {div{color:red}}", "@media(a)and (b){div{color:red}}")
expectPrintedMinify(t, "@media not a {div{color:red}}", "@media not a{div{color:red}}")
expectPrintedMinify(t, "@media not a and (b) and (c) {div{color:red}}", "@media not a and (b)and (c){div{color:red}}")
expectPrintedMinify(t, "@media not (a) {div{color:red}}", "@media not (a){div{color:red}}")
expectPrintedMinify(t, "@media not ( (a) or (b) ) {div{color:red}}", "@media not ((a)or (b)){div{color:red}}")
expectPrintedMinify(t, "@media not ( (a) and (b) ) {div{color:red}}", "@media not ((a)and (b)){div{color:red}}")
expectPrintedMinify(t, "@media (width < 2px) {div{color:red}}", "@media(width<2px){div{color:red}}")
expectPrintedMinify(t, "@media (1px < width) {div{color:red}}", "@media(1px<width){div{color:red}}")
expectPrintedMinify(t, "@media (1px < width < 2px) {div{color:red}}", "@media(1px<width<2px){div{color:red}}")
expectPrintedMinify(t, "@media junk(1, 2, 3) {div{color:red}}", "@media junk(1,2,3){div{color:red}}")
}
func TestAtFontFace(t *testing.T) {
expectPrinted(t, "@font-face { font-family: 'Open Sans'; src: url('OpenSans.woff') format('woff') }",
"@font-face {\n font-family: \"Open Sans\";\n src: url(OpenSans.woff) format(\"woff\");\n}\n")
expectPrintedMinify(t, "@font-face { font-family: 'Open Sans'; src: url('OpenSans.woff') format('woff') }",
"@font-face{font-family:\"Open Sans\";src:url(OpenSans.woff) format(\"woff\")}")
}
func TestAtPage(t *testing.T) {
expectPrinted(t, "@page { margin: 1cm }", "@page {\n margin: 1cm;\n}\n")
expectPrinted(t, "@page :first { margin: 1cm }", "@page :first {\n margin: 1cm;\n}\n")
expectPrintedMinify(t, "@page { margin: 1cm }", "@page{margin:1cm}")
expectPrintedMinify(t, "@page :first { margin: 1cm }", "@page :first{margin:1cm}")
}
func TestMsGridColumnsWhitespace(t *testing.T) {
// Must not insert a space between the "]" and the "("
expectPrinted(t, "div { -ms-grid-columns: (1fr)[3] }", "div {\n -ms-grid-columns: (1fr)[3];\n}\n")
expectPrinted(t, "div { -ms-grid-columns: 1fr (20px 1fr)[3] }", "div {\n -ms-grid-columns: 1fr (20px 1fr)[3];\n}\n")
expectPrintedMinify(t, "div { -ms-grid-columns: (1fr)[3] }", "div{-ms-grid-columns:(1fr)[3]}")
expectPrintedMinify(t, "div { -ms-grid-columns: 1fr (20px 1fr)[3] }", "div{-ms-grid-columns:1fr (20px 1fr)[3]}")
}
func TestASCII(t *testing.T) {
expectPrintedASCII(t, "* { background: url(🐈) }", "* {\n background: url(\\1f408);\n}\n")
expectPrintedASCII(t, "* { background: url(🐈6) }", "* {\n background: url(\\1f408 6);\n}\n")
expectPrintedASCII(t, "* { background: url('🐈') }", "* {\n background: url(\\1f408);\n}\n")
expectPrintedASCII(t, "* { background: url('🐈6') }", "* {\n background: url(\\1f408 6);\n}\n")
expectPrintedASCII(t, "* { background: url('(🐈)') }", "* {\n background: url(\"(\\1f408)\");\n}\n")
expectPrintedASCII(t, "* { background: url('(🐈6)') }", "* {\n background: url(\"(\\1f408 6)\");\n}\n")
expectPrintedASCII(t, "div { 🐈: 🐈('🐈') }", "div {\n \\1f408: \\1f408(\"\\1f408\");\n}\n")
expectPrintedASCII(t, "div { 🐈 : 🐈 ('🐈 ') }", "div {\n \\1f408: \\1f408 (\"\\1f408 \");\n}\n")
expectPrintedASCII(t, "div { 🐈6: 🐈6('🐈6') }", "div {\n \\1f408 6: \\1f408 6(\"\\1f408 6\");\n}\n")
expectPrintedASCII(t, "@🐈;", "@\\1f408;\n")
expectPrintedASCII(t, "@🐈 {}", "@\\1f408 {}\n")
expectPrintedASCII(t, "@🐈 x {}", "@\\1f408 x {}\n")
expectPrintedASCII(t, "#🐈#x {}", "#\\1f408#x {\n}\n")
expectPrintedASCII(t, "#🐈 #x {}", "#\\1f408 #x {\n}\n")
expectPrintedASCII(t, "#🐈::x {}", "#\\1f408::x {\n}\n")
expectPrintedASCII(t, "#🐈 ::x {}", "#\\1f408 ::x {\n}\n")
expectPrintedASCII(t, ".🐈.x {}", ".\\1f408.x {\n}\n")
expectPrintedASCII(t, ".🐈 .x {}", ".\\1f408 .x {\n}\n")
expectPrintedASCII(t, ".🐈::x {}", ".\\1f408::x {\n}\n")
expectPrintedASCII(t, ".🐈 ::x {}", ".\\1f408 ::x {\n}\n")
expectPrintedASCII(t, "🐈|🐈.x {}", "\\1f408|\\1f408.x {\n}\n")
expectPrintedASCII(t, "🐈|🐈 .x {}", "\\1f408|\\1f408 .x {\n}\n")
expectPrintedASCII(t, "🐈|🐈::x {}", "\\1f408|\\1f408::x {\n}\n")
expectPrintedASCII(t, "🐈|🐈 ::x {}", "\\1f408|\\1f408 ::x {\n}\n")
expectPrintedASCII(t, "::🐈:x {}", "::\\1f408:x {\n}\n")
expectPrintedASCII(t, "::🐈 :x {}", "::\\1f408 :x {\n}\n")
expectPrintedASCII(t, "[🐈] {}", "[\\1f408] {\n}\n")
expectPrintedASCII(t, "[🐈=🐈] {}", "[\\1f408=\\1f408] {\n}\n")
expectPrintedASCII(t, "[🐈|🐈=🐈] {}", "[\\1f408|\\1f408=\\1f408] {\n}\n")
// A space must be consumed after an escaped code point even with six digits
expectPrintedASCII(t, ".\\10FFF abc:after { content: '\\10FFF abc' }", ".\\10fff abc:after {\n content: \"\\10fff abc\";\n}\n")
expectPrintedASCII(t, ".\U00010FFFabc:after { content: '\U00010FFFabc' }", ".\\10fff abc:after {\n content: \"\\10fff abc\";\n}\n")
expectPrintedASCII(t, ".\\10FFFFabc:after { content: '\\10FFFFabc' }", ".\\10ffffabc:after {\n content: \"\\10ffffabc\";\n}\n")
expectPrintedASCII(t, ".\\10FFFF abc:after { content: '\\10FFFF abc' }", ".\\10ffffabc:after {\n content: \"\\10ffffabc\";\n}\n")
expectPrintedASCII(t, ".\U0010FFFFabc:after { content: '\U0010FFFFabc' }", ".\\10ffffabc:after {\n content: \"\\10ffffabc\";\n}\n")
// This character should always be escaped
expectPrinted(t, ".\\FEFF:after { content: '\uFEFF' }", ".\\feff:after {\n content: \"\\feff\";\n}\n")
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_printer/css_printer.go | internal/css_printer/css_printer.go | package css_printer
import (
"fmt"
"strings"
"unicode/utf8"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/sourcemap"
)
const quoteForURL byte = 0
type printer struct {
options Options
symbols ast.SymbolMap
importRecords []ast.ImportRecord
css []byte
hasLegalComment map[string]struct{}
extractedLegalComments []string
jsonMetadataImports []string
builder sourcemap.ChunkBuilder
oldLineStart int
oldLineEnd int
}
type Options struct {
// This will be present if the input file had a source map. In that case we
// want to map all the way back to the original input file(s).
InputSourceMap *sourcemap.SourceMap
// If we're writing out a source map, this table of line start indices lets
// us do binary search on to figure out what line a given AST node came from
LineOffsetTables []sourcemap.LineOffsetTable
// Local symbol renaming results go here
LocalNames map[ast.Ref]string
LineLimit int
InputSourceIndex uint32
UnsupportedFeatures compat.CSSFeature
MinifyWhitespace bool
ASCIIOnly bool
SourceMap config.SourceMap
AddSourceMappings bool
LegalComments config.LegalComments
NeedsMetafile bool
}
type PrintResult struct {
CSS []byte
ExtractedLegalComments []string
JSONMetadataImports []string
// This source map chunk just contains the VLQ-encoded offsets for the "CSS"
// field above. It's not a full source map. The bundler will be joining many
// source map chunks together to form the final source map.
SourceMapChunk sourcemap.Chunk
}
func Print(tree css_ast.AST, symbols ast.SymbolMap, options Options) PrintResult {
p := printer{
options: options,
symbols: symbols,
importRecords: tree.ImportRecords,
builder: sourcemap.MakeChunkBuilder(options.InputSourceMap, options.LineOffsetTables, options.ASCIIOnly),
}
for _, rule := range tree.Rules {
p.printRule(rule, 0, false)
}
result := PrintResult{
CSS: p.css,
ExtractedLegalComments: p.extractedLegalComments,
JSONMetadataImports: p.jsonMetadataImports,
}
if options.SourceMap != config.SourceMapNone {
// This is expensive. Only do this if it's necessary. For example, skipping
// this if it's not needed sped up end-to-end parsing and printing of a
// large CSS file from 66ms to 52ms (around 25% faster).
result.SourceMapChunk = p.builder.GenerateChunk(p.css)
}
return result
}
func (p *printer) recordImportPathForMetafile(importRecordIndex uint32) {
if p.options.NeedsMetafile {
record := p.importRecords[importRecordIndex]
external := ""
if (record.Flags & ast.ShouldNotBeExternalInMetafile) == 0 {
external = ",\n \"external\": true"
}
p.jsonMetadataImports = append(p.jsonMetadataImports, fmt.Sprintf("\n {\n \"path\": %s,\n \"kind\": %s%s\n }",
helpers.QuoteForJSON(record.Path.Text, p.options.ASCIIOnly),
helpers.QuoteForJSON(record.Kind.StringForMetafile(), p.options.ASCIIOnly),
external))
}
}
func (p *printer) printRule(rule css_ast.Rule, indent int32, omitTrailingSemicolon bool) {
if r, ok := rule.Data.(*css_ast.RComment); ok {
switch p.options.LegalComments {
case config.LegalCommentsNone:
return
case config.LegalCommentsEndOfFile,
config.LegalCommentsLinkedWithComment,
config.LegalCommentsExternalWithoutComment:
// Don't record the same legal comment more than once per file
if p.hasLegalComment == nil {
p.hasLegalComment = make(map[string]struct{})
} else if _, ok := p.hasLegalComment[r.Text]; ok {
return
}
p.hasLegalComment[r.Text] = struct{}{}
p.extractedLegalComments = append(p.extractedLegalComments, r.Text)
return
}
}
if p.options.LineLimit > 0 {
p.printNewlinePastLineLimit(indent)
}
if p.options.AddSourceMappings {
shouldPrintMapping := true
if indent == 0 || p.options.MinifyWhitespace {
switch rule.Data.(type) {
case *css_ast.RSelector, *css_ast.RQualified, *css_ast.RBadDeclaration:
// These rules will begin with a potentially more accurate mapping. We
// shouldn't print a mapping here if there's no indent in between this
// mapping and the rule.
shouldPrintMapping = false
}
}
if shouldPrintMapping {
p.builder.AddSourceMapping(rule.Loc, "", p.css)
}
}
if !p.options.MinifyWhitespace {
p.printIndent(indent)
}
switch r := rule.Data.(type) {
case *css_ast.RAtCharset:
// It's not valid to remove the space in between these two tokens
p.print("@charset ")
// It's not valid to print the string with single quotes
p.printQuotedWithQuote(r.Encoding, '"', 0)
p.print(";")
case *css_ast.RAtImport:
if p.options.MinifyWhitespace {
p.print("@import")
} else {
p.print("@import ")
}
record := p.importRecords[r.ImportRecordIndex]
var flags printQuotedFlags
if record.Flags.Has(ast.ContainsUniqueKey) {
flags |= printQuotedNoWrap
}
p.printQuoted(record.Path.Text, flags)
p.recordImportPathForMetafile(r.ImportRecordIndex)
if conditions := r.ImportConditions; conditions != nil {
space := !p.options.MinifyWhitespace
if len(conditions.Layers) > 0 {
if space {
p.print(" ")
}
p.printTokens(conditions.Layers, printTokensOpts{})
space = true
}
if len(conditions.Supports) > 0 {
if space {
p.print(" ")
}
p.printTokens(conditions.Supports, printTokensOpts{})
space = true
}
if len(conditions.Queries) > 0 {
if space {
p.print(" ")
}
for i, query := range conditions.Queries {
if i > 0 {
if p.options.MinifyWhitespace {
p.print(",")
} else {
p.print(", ")
}
}
p.printMediaQuery(query, 0)
}
}
}
p.print(";")
case *css_ast.RAtKeyframes:
p.print("@")
p.printIdent(r.AtToken, identNormal, mayNeedWhitespaceAfter)
p.print(" ")
p.printSymbol(r.Name.Loc, r.Name.Ref, identNormal, canDiscardWhitespaceAfter)
if !p.options.MinifyWhitespace {
p.print(" ")
}
if p.options.MinifyWhitespace {
p.print("{")
} else {
p.print("{\n")
}
indent++
for _, block := range r.Blocks {
if p.options.AddSourceMappings {
p.builder.AddSourceMapping(block.Loc, "", p.css)
}
if !p.options.MinifyWhitespace {
p.printIndent(indent)
}
for i, sel := range block.Selectors {
if i > 0 {
if p.options.MinifyWhitespace {
p.print(",")
} else {
p.print(", ")
}
}
p.print(sel)
}
if !p.options.MinifyWhitespace {
p.print(" ")
}
p.printRuleBlock(block.Rules, indent, block.CloseBraceLoc)
if !p.options.MinifyWhitespace {
p.print("\n")
}
}
indent--
if p.options.AddSourceMappings && r.CloseBraceLoc.Start != 0 {
p.builder.AddSourceMapping(r.CloseBraceLoc, "", p.css)
}
if !p.options.MinifyWhitespace {
p.printIndent(indent)
}
p.print("}")
case *css_ast.RKnownAt:
p.print("@")
whitespace := mayNeedWhitespaceAfter
if len(r.Prelude) == 0 {
whitespace = canDiscardWhitespaceAfter
}
p.printIdent(r.AtToken, identNormal, whitespace)
if (!p.options.MinifyWhitespace && r.Rules != nil) || len(r.Prelude) > 0 {
p.print(" ")
}
p.printTokens(r.Prelude, printTokensOpts{})
if r.Rules == nil {
p.print(";")
} else {
if !p.options.MinifyWhitespace && len(r.Prelude) > 0 {
p.print(" ")
}
p.printRuleBlock(r.Rules, indent, r.CloseBraceLoc)
}
case *css_ast.RUnknownAt:
p.print("@")
whitespace := mayNeedWhitespaceAfter
if len(r.Prelude) == 0 {
whitespace = canDiscardWhitespaceAfter
}
p.printIdent(r.AtToken, identNormal, whitespace)
if (!p.options.MinifyWhitespace && len(r.Block) != 0) || len(r.Prelude) > 0 {
p.print(" ")
}
p.printTokens(r.Prelude, printTokensOpts{})
if !p.options.MinifyWhitespace && len(r.Block) != 0 && len(r.Prelude) > 0 {
p.print(" ")
}
if len(r.Block) == 0 {
p.print(";")
} else {
p.printTokens(r.Block, printTokensOpts{})
}
case *css_ast.RSelector:
p.printComplexSelectors(r.Selectors, indent, layoutMultiLine)
if !p.options.MinifyWhitespace {
p.print(" ")
}
p.printRuleBlock(r.Rules, indent, r.CloseBraceLoc)
case *css_ast.RQualified:
hasWhitespaceAfter := p.printTokens(r.Prelude, printTokensOpts{})
if !hasWhitespaceAfter && !p.options.MinifyWhitespace {
p.print(" ")
}
p.printRuleBlock(r.Rules, indent, r.CloseBraceLoc)
case *css_ast.RDeclaration:
p.printIdent(r.KeyText, identNormal, canDiscardWhitespaceAfter)
p.print(":")
hasWhitespaceAfter := p.printTokens(r.Value, printTokensOpts{
indent: indent,
isDeclaration: true,
})
if r.Important {
if !hasWhitespaceAfter && !p.options.MinifyWhitespace && len(r.Value) > 0 {
p.print(" ")
}
p.print("!important")
}
if !omitTrailingSemicolon {
p.print(";")
}
case *css_ast.RBadDeclaration:
p.printTokens(r.Tokens, printTokensOpts{})
if !omitTrailingSemicolon {
p.print(";")
}
case *css_ast.RComment:
p.printIndentedComment(indent, r.Text)
case *css_ast.RAtLayer:
p.print("@layer")
for i, parts := range r.Names {
if i == 0 {
p.print(" ")
} else if !p.options.MinifyWhitespace {
p.print(", ")
} else {
p.print(",")
}
p.print(strings.Join(parts, "."))
}
if r.Rules == nil {
p.print(";")
} else {
if !p.options.MinifyWhitespace {
p.print(" ")
}
p.printRuleBlock(r.Rules, indent, r.CloseBraceLoc)
}
case *css_ast.RAtMedia:
p.print("@media")
var flags mqFlags
if p.options.MinifyWhitespace {
flags = mqAfterIdentifier
} else {
p.print(" ")
}
for i, query := range r.Queries {
if i > 0 {
if p.options.MinifyWhitespace {
p.print(",")
} else {
p.print(", ")
}
}
p.printMediaQuery(query, flags)
flags = 0
}
if !p.options.MinifyWhitespace && len(r.Queries) > 0 {
p.print(" ")
}
p.printRuleBlock(r.Rules, indent, r.CloseBraceLoc)
default:
panic("Internal error")
}
if !p.options.MinifyWhitespace {
p.print("\n")
}
}
type mqFlags uint8
const (
mqNeedsParens mqFlags = 1 << iota
mqAfterIdentifier
)
func (p *printer) printMediaQuery(query css_ast.MediaQuery, flags mqFlags) {
if q, ok := query.Data.(*css_ast.MQArbitraryTokens); ok {
if (flags & mqAfterIdentifier) != 0 {
p.print(" ")
}
p.printTokens(q.Tokens, printTokensOpts{})
return
}
switch q := query.Data.(type) {
case *css_ast.MQType:
if (flags & mqAfterIdentifier) != 0 {
p.print(" ")
}
if p.options.AddSourceMappings {
p.builder.AddSourceMapping(query.Loc, "", p.css)
}
switch q.Op {
case css_ast.MQTypeOpNot:
p.print("not ")
case css_ast.MQTypeOpOnly:
p.print("only ")
}
p.printIdent(q.Type, identNormal, 0)
if q.AndOrNull.Data != nil {
p.print(" and ")
p.printMediaQuery(q.AndOrNull, 0)
}
case *css_ast.MQNot:
if (flags & mqNeedsParens) != 0 {
p.print("(")
} else if (flags & mqAfterIdentifier) != 0 {
p.print(" ")
}
if p.options.AddSourceMappings {
p.builder.AddSourceMapping(query.Loc, "", p.css)
}
p.print("not ")
p.printMediaQuery(q.Inner, mqNeedsParens)
if (flags & mqNeedsParens) != 0 {
p.print(")")
}
case *css_ast.MQBinary:
if (flags & mqNeedsParens) != 0 {
p.print("(")
}
for i, inner := range q.Terms {
if i > 0 {
if !p.options.MinifyWhitespace {
p.print(" ")
}
switch q.Op {
case css_ast.MQBinaryOpAnd:
p.print("and ")
case css_ast.MQBinaryOpOr:
p.print("or ")
}
}
p.printMediaQuery(inner, mqNeedsParens)
}
if (flags & mqNeedsParens) != 0 {
p.print(")")
}
case *css_ast.MQPlainOrBoolean:
p.print("(")
if p.options.AddSourceMappings {
p.builder.AddSourceMapping(query.Loc, "", p.css)
}
p.printIdent(q.Name, identNormal, 0)
if q.ValueOrNil != nil {
if p.options.MinifyWhitespace {
p.print(":")
} else {
p.print(": ")
}
p.printTokens(q.ValueOrNil, printTokensOpts{})
}
p.print(")")
case *css_ast.MQRange:
space := " "
if p.options.MinifyWhitespace {
space = ""
}
p.print("(")
if q.BeforeCmp != css_ast.MQCmpNone {
p.printTokens(q.Before, printTokensOpts{})
p.print(space)
p.print(q.BeforeCmp.String())
p.print(space)
}
if p.options.AddSourceMappings {
p.builder.AddSourceMapping(q.NameLoc, "", p.css)
}
p.printIdent(q.Name, identNormal, 0)
if q.AfterCmp != css_ast.MQCmpNone {
p.print(space)
p.print(q.AfterCmp.String())
p.print(space)
p.printTokens(q.After, printTokensOpts{})
}
p.print(")")
default:
panic("Internal error")
}
}
func (p *printer) printIndentedComment(indent int32, text string) {
// Avoid generating a comment containing the character sequence "</style"
if !p.options.UnsupportedFeatures.Has(compat.InlineStyle) {
text = helpers.EscapeClosingTag(text, "/style")
}
// Re-indent multi-line comments
for {
newline := strings.IndexByte(text, '\n')
if newline == -1 {
break
}
p.print(text[:newline+1])
if !p.options.MinifyWhitespace {
p.printIndent(indent)
}
text = text[newline+1:]
}
p.print(text)
}
func (p *printer) printRuleBlock(rules []css_ast.Rule, indent int32, closeBraceLoc logger.Loc) {
if p.options.MinifyWhitespace {
p.print("{")
} else {
p.print("{\n")
}
for i, decl := range rules {
omitTrailingSemicolon := p.options.MinifyWhitespace && i+1 == len(rules)
p.printRule(decl, indent+1, omitTrailingSemicolon)
}
if p.options.AddSourceMappings && closeBraceLoc.Start != 0 {
p.builder.AddSourceMapping(closeBraceLoc, "", p.css)
}
if !p.options.MinifyWhitespace {
p.printIndent(indent)
}
p.print("}")
}
type selectorLayout uint8
const (
layoutMultiLine selectorLayout = iota
layoutSingleLine
)
func (p *printer) printComplexSelectors(selectors []css_ast.ComplexSelector, indent int32, layout selectorLayout) {
for i, complex := range selectors {
if i > 0 {
if p.options.MinifyWhitespace {
p.print(",")
if p.options.LineLimit > 0 {
p.printNewlinePastLineLimit(indent)
}
} else if layout == layoutMultiLine {
p.print(",\n")
p.printIndent(indent)
} else {
p.print(", ")
}
}
for j, compound := range complex.Selectors {
p.printCompoundSelector(compound, j == 0, indent)
}
}
}
func (p *printer) printCompoundSelector(sel css_ast.CompoundSelector, isFirst bool, indent int32) {
if !isFirst && sel.Combinator.Byte == 0 {
// A space is required in between compound selectors if there is no
// combinator in the middle. It's fine to convert "a + b" into "a+b"
// but not to convert "a b" into "ab".
if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit(indent) {
p.print(" ")
}
}
if sel.Combinator.Byte != 0 {
if !isFirst && !p.options.MinifyWhitespace {
p.print(" ")
}
if p.options.AddSourceMappings {
p.builder.AddSourceMapping(sel.Combinator.Loc, "", p.css)
}
p.css = append(p.css, sel.Combinator.Byte)
if (p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit(indent)) && !p.options.MinifyWhitespace {
p.print(" ")
}
}
if sel.TypeSelector != nil {
whitespace := mayNeedWhitespaceAfter
if len(sel.SubclassSelectors) > 0 {
// There is no chance of whitespace before a subclass selector or pseudo
// class selector
whitespace = canDiscardWhitespaceAfter
}
p.printNamespacedName(*sel.TypeSelector, whitespace)
}
for _, loc := range sel.NestingSelectorLocs {
if p.options.AddSourceMappings {
p.builder.AddSourceMapping(loc, "", p.css)
}
p.print("&")
}
for i, ss := range sel.SubclassSelectors {
whitespace := mayNeedWhitespaceAfter
// There is no chance of whitespace between subclass selectors
if i+1 < len(sel.SubclassSelectors) {
whitespace = canDiscardWhitespaceAfter
}
if p.options.AddSourceMappings {
p.builder.AddSourceMapping(ss.Range.Loc, "", p.css)
}
switch s := ss.Data.(type) {
case *css_ast.SSHash:
p.print("#")
// This deliberately does not use identHash. From the specification:
// "In <id-selector>, the <hash-token>'s value must be an identifier."
p.printSymbol(s.Name.Loc, s.Name.Ref, identNormal, whitespace)
case *css_ast.SSClass:
p.print(".")
p.printSymbol(s.Name.Loc, s.Name.Ref, identNormal, whitespace)
case *css_ast.SSAttribute:
p.print("[")
p.printNamespacedName(s.NamespacedName, canDiscardWhitespaceAfter)
if s.MatcherOp != "" {
p.print(s.MatcherOp)
printAsIdent := false
// Print the value as an identifier if it's possible
if css_lexer.WouldStartIdentifierWithoutEscapes(s.MatcherValue) {
printAsIdent = true
for _, c := range s.MatcherValue {
if !css_lexer.IsNameContinue(c) {
printAsIdent = false
break
}
}
}
if printAsIdent {
p.printIdent(s.MatcherValue, identNormal, canDiscardWhitespaceAfter)
} else {
p.printQuoted(s.MatcherValue, 0)
}
}
if s.MatcherModifier != 0 {
p.print(" ")
p.print(string(rune(s.MatcherModifier)))
}
p.print("]")
case *css_ast.SSPseudoClass:
p.printPseudoClassSelector(*s, whitespace)
case *css_ast.SSPseudoClassWithSelectorList:
p.print(":")
p.print(s.Kind.String())
p.print("(")
if s.Index.A != "" || s.Index.B != "" {
p.printNthIndex(s.Index)
if len(s.Selectors) > 0 {
if p.options.MinifyWhitespace && s.Selectors[0].Selectors[0].TypeSelector == nil {
p.print(" of")
} else {
p.print(" of ")
}
}
}
p.printComplexSelectors(s.Selectors, indent, layoutSingleLine)
p.print(")")
default:
panic("Internal error")
}
}
}
func (p *printer) printNthIndex(index css_ast.NthIndex) {
if index.A != "" {
if index.A == "-1" {
p.print("-")
} else if index.A != "1" {
p.print(index.A)
}
p.print("n")
if index.B != "" {
if !strings.HasPrefix(index.B, "-") {
p.print("+")
}
p.print(index.B)
}
} else if index.B != "" {
p.print(index.B)
}
}
func (p *printer) printNamespacedName(nsName css_ast.NamespacedName, whitespace trailingWhitespace) {
if prefix := nsName.NamespacePrefix; prefix != nil {
if p.options.AddSourceMappings {
p.builder.AddSourceMapping(prefix.Range.Loc, "", p.css)
}
switch prefix.Kind {
case css_lexer.TIdent:
p.printIdent(prefix.Text, identNormal, canDiscardWhitespaceAfter)
case css_lexer.TDelimAsterisk:
p.print("*")
default:
panic("Internal error")
}
p.print("|")
}
if p.options.AddSourceMappings {
p.builder.AddSourceMapping(nsName.Name.Range.Loc, "", p.css)
}
switch nsName.Name.Kind {
case css_lexer.TIdent:
p.printIdent(nsName.Name.Text, identNormal, whitespace)
case css_lexer.TDelimAsterisk:
p.print("*")
case css_lexer.TDelimAmpersand:
p.print("&")
default:
panic("Internal error")
}
}
func (p *printer) printPseudoClassSelector(pseudo css_ast.SSPseudoClass, whitespace trailingWhitespace) {
if pseudo.IsElement {
p.print("::")
} else {
p.print(":")
}
// This checks for "nil" so we can distinguish ":is()" from ":is"
if pseudo.Args != nil {
p.printIdent(pseudo.Name, identNormal, canDiscardWhitespaceAfter)
p.print("(")
p.printTokens(pseudo.Args, printTokensOpts{})
p.print(")")
} else {
p.printIdent(pseudo.Name, identNormal, whitespace)
}
}
func (p *printer) print(text string) {
p.css = append(p.css, text...)
}
func bestQuoteCharForString(text string, forURL bool) byte {
forURLCost := 0
singleCost := 2
doubleCost := 2
for _, c := range text {
switch c {
case '\'':
forURLCost++
singleCost++
case '"':
forURLCost++
doubleCost++
case '(', ')', ' ', '\t':
forURLCost++
case '\\', '\n', '\r', '\f':
forURLCost++
singleCost++
doubleCost++
}
}
// Quotes can sometimes be omitted for URL tokens
if forURL && forURLCost < singleCost && forURLCost < doubleCost {
return quoteForURL
}
// Prefer double quotes to single quotes if there is no cost difference
if singleCost < doubleCost {
return '\''
}
return '"'
}
type printQuotedFlags uint8
const (
printQuotedNoWrap printQuotedFlags = 1 << iota
)
func (p *printer) printQuoted(text string, flags printQuotedFlags) {
p.printQuotedWithQuote(text, bestQuoteCharForString(text, false), flags)
}
type escapeKind uint8
const (
escapeNone escapeKind = iota
escapeBackslash
escapeHex
)
func (p *printer) printWithEscape(c rune, escape escapeKind, remainingText string, mayNeedWhitespaceAfter bool) {
var temp [utf8.UTFMax]byte
if escape == escapeBackslash && ((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F')) {
// Hexadecimal characters cannot use a plain backslash escape
escape = escapeHex
}
switch escape {
case escapeNone:
width := utf8.EncodeRune(temp[:], c)
p.css = append(p.css, temp[:width]...)
case escapeBackslash:
p.css = append(p.css, '\\')
width := utf8.EncodeRune(temp[:], c)
p.css = append(p.css, temp[:width]...)
case escapeHex:
text := fmt.Sprintf("\\%x", c)
p.css = append(p.css, text...)
// Make sure the next character is not interpreted as part of the escape sequence
if len(text) < 1+6 {
if next := utf8.RuneLen(c); next < len(remainingText) {
c = rune(remainingText[next])
if c == ' ' || c == '\t' || (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
p.css = append(p.css, ' ')
}
} else if mayNeedWhitespaceAfter {
// If the last character is a hexadecimal escape, print a space afterwards
// for the escape sequence to consume. That way we're sure it won't
// accidentally consume a semantically significant space afterward.
p.css = append(p.css, ' ')
}
}
}
}
// Note: This function is hot in profiles
func (p *printer) printQuotedWithQuote(text string, quote byte, flags printQuotedFlags) {
if quote != quoteForURL {
p.css = append(p.css, quote)
}
n := len(text)
i := 0
runStart := 0
// Only compute the line length if necessary
var startLineLength int
wrapLongLines := false
if p.options.LineLimit > 0 && quote != quoteForURL && (flags&printQuotedNoWrap) == 0 {
startLineLength = p.currentLineLength()
if startLineLength > p.options.LineLimit {
startLineLength = p.options.LineLimit
}
wrapLongLines = true
}
for i < n {
// Wrap long lines that are over the limit using escaped newlines
if wrapLongLines && startLineLength+i >= p.options.LineLimit {
if runStart < i {
p.css = append(p.css, text[runStart:i]...)
runStart = i
}
p.css = append(p.css, "\\\n"...)
startLineLength -= p.options.LineLimit
}
c, width := utf8.DecodeRuneInString(text[i:])
escape := escapeNone
switch c {
case '\x00', '\r', '\n', '\f':
// Use a hexadecimal escape for characters that would be invalid escapes
escape = escapeHex
case '\\', rune(quote):
escape = escapeBackslash
case '(', ')', ' ', '\t', '"', '\'':
// These characters must be escaped in URL tokens
if quote == quoteForURL {
escape = escapeBackslash
}
case '/':
// Avoid generating the sequence "</style" in CSS code
if !p.options.UnsupportedFeatures.Has(compat.InlineStyle) && i >= 1 && text[i-1] == '<' && i+6 <= len(text) && strings.EqualFold(text[i+1:i+6], "style") {
escape = escapeBackslash
}
default:
if (p.options.ASCIIOnly && c >= 0x80) || c == '\uFEFF' {
escape = escapeHex
}
}
if escape != escapeNone {
if runStart < i {
p.css = append(p.css, text[runStart:i]...)
}
p.printWithEscape(c, escape, text[i:], false)
runStart = i + width
}
i += width
}
if runStart < n {
p.css = append(p.css, text[runStart:]...)
}
if quote != quoteForURL {
p.css = append(p.css, quote)
}
}
func (p *printer) currentLineLength() int {
css := p.css
n := len(css)
stop := p.oldLineEnd
// Update "oldLineStart" to the start of the current line
for i := n; i > stop; i-- {
if c := css[i-1]; c == '\r' || c == '\n' {
p.oldLineStart = i
break
}
}
p.oldLineEnd = n
return n - p.oldLineStart
}
func (p *printer) printNewlinePastLineLimit(indent int32) bool {
if p.currentLineLength() < p.options.LineLimit {
return false
}
p.print("\n")
if !p.options.MinifyWhitespace {
p.printIndent(indent)
}
return true
}
type identMode uint8
const (
identNormal identMode = iota
identHash
identDimensionUnit
identDimensionUnitAfterExponent
)
type trailingWhitespace uint8
const (
mayNeedWhitespaceAfter trailingWhitespace = iota
canDiscardWhitespaceAfter
)
// Note: This function is hot in profiles
func (p *printer) printIdent(text string, mode identMode, whitespace trailingWhitespace) {
n := len(text)
// Special escape behavior for the first character
initialEscape := escapeNone
switch mode {
case identNormal:
if !css_lexer.WouldStartIdentifierWithoutEscapes(text) {
initialEscape = escapeBackslash
}
case identDimensionUnit, identDimensionUnitAfterExponent:
if !css_lexer.WouldStartIdentifierWithoutEscapes(text) {
initialEscape = escapeBackslash
} else if n > 0 {
if c := text[0]; c >= '0' && c <= '9' {
// Unit: "2x"
initialEscape = escapeHex
} else if (c == 'e' || c == 'E') && mode != identDimensionUnitAfterExponent {
if n >= 2 && text[1] >= '0' && text[1] <= '9' {
// Unit: "e2x"
initialEscape = escapeHex
} else if n >= 3 && text[1] == '-' && text[2] >= '0' && text[2] <= '9' {
// Unit: "e-2x"
initialEscape = escapeHex
}
}
}
}
// Fast path: the identifier does not need to be escaped. This fast path is
// important for performance. For example, doing this sped up end-to-end
// parsing and printing of a large CSS file from 84ms to 66ms (around 25%
// faster).
if initialEscape == escapeNone {
for i := 0; i < n; i++ {
if c := text[i]; c >= 0x80 || !css_lexer.IsNameContinue(rune(c)) {
goto slowPath
}
}
p.css = append(p.css, text...)
return
slowPath:
}
// Slow path: the identifier needs to be escaped
for i, c := range text {
escape := escapeNone
if p.options.ASCIIOnly && c >= 0x80 {
escape = escapeHex
} else if c == '\r' || c == '\n' || c == '\f' || c == '\uFEFF' {
// Use a hexadecimal escape for characters that would be invalid escapes
escape = escapeHex
} else {
// Escape non-identifier characters
if !css_lexer.IsNameContinue(c) {
escape = escapeBackslash
}
// Special escape behavior for the first character
if i == 0 && initialEscape != escapeNone {
escape = initialEscape
}
}
// If the last character is a hexadecimal escape, print a space afterwards
// for the escape sequence to consume. That way we're sure it won't
// accidentally consume a semantically significant space afterward.
mayNeedWhitespaceAfter := whitespace == mayNeedWhitespaceAfter && escape != escapeNone && i+utf8.RuneLen(c) == n
p.printWithEscape(c, escape, text[i:], mayNeedWhitespaceAfter)
}
}
func (p *printer) printSymbol(loc logger.Loc, ref ast.Ref, mode identMode, whitespace trailingWhitespace) {
ref = ast.FollowSymbols(p.symbols, ref)
originalName := p.symbols.Get(ref).OriginalName
name, ok := p.options.LocalNames[ref]
if !ok {
name = originalName
}
if p.options.AddSourceMappings {
if originalName == name {
originalName = ""
}
p.builder.AddSourceMapping(loc, originalName, p.css)
}
p.printIdent(name, mode, whitespace)
}
func (p *printer) printIndent(indent int32) {
n := int(indent)
if p.options.LineLimit > 0 && n*2 >= p.options.LineLimit {
n = p.options.LineLimit / 2
}
for i := 0; i < n; i++ {
p.css = append(p.css, " "...)
}
}
type printTokensOpts struct {
indent int32
multiLineCommaPeriod uint8
isDeclaration bool
}
func functionMultiLineCommaPeriod(token css_ast.Token) uint8 {
if token.Kind == css_lexer.TFunction {
commaCount := 0
for _, t := range *token.Children {
if t.Kind == css_lexer.TComma {
commaCount++
}
}
switch strings.ToLower(token.Text) {
case "linear-gradient", "radial-gradient", "conic-gradient",
"repeating-linear-gradient", "repeating-radial-gradient", "repeating-conic-gradient":
if commaCount >= 2 {
return 1
}
case "matrix":
if commaCount == 5 {
return 2
}
case "matrix3d":
if commaCount == 15 {
return 4
}
}
}
return 0
}
func (p *printer) printTokens(tokens []css_ast.Token, opts printTokensOpts) bool {
hasWhitespaceAfter := len(tokens) > 0 && (tokens[0].Whitespace&css_ast.WhitespaceBefore) != 0
// Pretty-print long comma-separated declarations of 3 or more items
commaPeriod := int(opts.multiLineCommaPeriod)
if !p.options.MinifyWhitespace && opts.isDeclaration {
commaCount := 0
for _, t := range tokens {
if t.Kind == css_lexer.TComma {
commaCount++
if commaCount >= 2 {
commaPeriod = 1
break
}
}
if t.Kind == css_lexer.TFunction && functionMultiLineCommaPeriod(t) > 0 {
commaPeriod = 1
break
}
}
}
commaCount := 0
for i, t := range tokens {
if t.Kind == css_lexer.TComma {
commaCount++
}
if t.Kind == css_lexer.TWhitespace {
hasWhitespaceAfter = true
continue
}
if hasWhitespaceAfter {
if commaPeriod > 0 && (i == 0 || (tokens[i-1].Kind == css_lexer.TComma && commaCount%commaPeriod == 0)) {
p.print("\n")
p.printIndent(opts.indent + 1)
} else if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit(opts.indent+1) {
p.print(" ")
}
}
hasWhitespaceAfter = (t.Whitespace&css_ast.WhitespaceAfter) != 0 ||
(i+1 < len(tokens) && (tokens[i+1].Whitespace&css_ast.WhitespaceBefore) != 0)
whitespace := mayNeedWhitespaceAfter
if !hasWhitespaceAfter {
whitespace = canDiscardWhitespaceAfter
}
if p.options.AddSourceMappings {
p.builder.AddSourceMapping(t.Loc, "", p.css)
}
switch t.Kind {
case css_lexer.TIdent:
p.printIdent(t.Text, identNormal, whitespace)
case css_lexer.TSymbol:
ref := ast.Ref{SourceIndex: p.options.InputSourceIndex, InnerIndex: t.PayloadIndex}
p.printSymbol(t.Loc, ref, identNormal, whitespace)
case css_lexer.TFunction:
p.printIdent(t.Text, identNormal, whitespace)
p.print("(")
case css_lexer.TDimension:
value := t.DimensionValue()
p.print(value)
mode := identDimensionUnit
if strings.ContainsAny(value, "eE") {
mode = identDimensionUnitAfterExponent
}
p.printIdent(t.DimensionUnit(), mode, whitespace)
case css_lexer.TAtKeyword:
p.print("@")
p.printIdent(t.Text, identNormal, whitespace)
case css_lexer.THash:
p.print("#")
p.printIdent(t.Text, identHash, whitespace)
case css_lexer.TString:
p.printQuoted(t.Text, 0)
case css_lexer.TURL:
record := p.importRecords[t.PayloadIndex]
text := record.Path.Text
tryToAvoidQuote := true
var flags printQuotedFlags
if record.Flags.Has(ast.ContainsUniqueKey) {
flags |= printQuotedNoWrap
// If the caller will be substituting a path here later using string
// substitution, then we can't be sure that it will form a valid URL
// token when unquoted (e.g. it may contain spaces). So we need to
// quote the unique key here just in case. For more info see this
// issue: https://github.com/evanw/esbuild/issues/3410
tryToAvoidQuote = false
} else if p.options.LineLimit > 0 && p.currentLineLength()+len(text) >= p.options.LineLimit {
tryToAvoidQuote = false
}
p.print("url(")
p.printQuotedWithQuote(text, bestQuoteCharForString(text, tryToAvoidQuote), flags)
p.print(")")
p.recordImportPathForMetafile(t.PayloadIndex)
case css_lexer.TUnterminatedString:
// We must end this with a newline so that this string stays unterminated
p.print(t.Text)
p.print("\n")
if !p.options.MinifyWhitespace {
p.printIndent(opts.indent)
}
hasWhitespaceAfter = false
default:
p.print(t.Text)
}
if t.Children != nil {
childCommaPeriod := uint8(0)
if commaPeriod > 0 && opts.isDeclaration {
childCommaPeriod = functionMultiLineCommaPeriod(t)
}
if childCommaPeriod > 0 {
opts.indent++
if !p.options.MinifyWhitespace {
p.print("\n")
p.printIndent(opts.indent + 1)
}
}
p.printTokens(*t.Children, printTokensOpts{
indent: opts.indent,
multiLineCommaPeriod: childCommaPeriod,
})
if childCommaPeriod > 0 {
opts.indent--
}
switch t.Kind {
case css_lexer.TFunction:
p.print(")")
case css_lexer.TOpenParen:
p.print(")")
case css_lexer.TOpenBrace:
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/api_helpers/use_timer.go | internal/api_helpers/use_timer.go | package api_helpers
// This flag is set by the CLI to activate the timer. It's put here instead of
// by the timer to discourage code from checking this flag. Only the code that
// creates the root timer should check this flag. Other code should check that
// the timer is not null to detect if the timer is being used or not.
var UseTimer bool
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_ast/js_ast_test.go | internal/js_ast/js_ast_test.go | package js_ast
import "testing"
func assertEqual(t *testing.T, a interface{}, b interface{}) {
if a != b {
t.Fatalf("%s != %s", a, b)
}
}
func TestGenerateNonUniqueNameFromPath(t *testing.T) {
assertEqual(t, GenerateNonUniqueNameFromPath("<stdin>"), "stdin")
assertEqual(t, GenerateNonUniqueNameFromPath("foo/bar"), "bar")
assertEqual(t, GenerateNonUniqueNameFromPath("foo/bar.js"), "bar")
assertEqual(t, GenerateNonUniqueNameFromPath("foo/bar.min.js"), "bar_min")
assertEqual(t, GenerateNonUniqueNameFromPath("trailing//slashes//"), "slashes")
assertEqual(t, GenerateNonUniqueNameFromPath("path/with/spaces in name.js"), "spaces_in_name")
assertEqual(t, GenerateNonUniqueNameFromPath("path\\on\\windows.js"), "windows")
assertEqual(t, GenerateNonUniqueNameFromPath("node_modules/demo-pkg/index.js"), "demo_pkg")
assertEqual(t, GenerateNonUniqueNameFromPath("node_modules\\demo-pkg\\index.js"), "demo_pkg")
assertEqual(t, GenerateNonUniqueNameFromPath("123_invalid_identifier.js"), "invalid_identifier")
assertEqual(t, GenerateNonUniqueNameFromPath("emoji 🍕 name.js"), "emoji_name")
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_ast/js_ast_helpers.go | internal/js_ast/js_ast_helpers.go | package js_ast
import (
"math"
"strconv"
"strings"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/logger"
)
type HelperContext struct {
isUnbound func(ast.Ref) bool
}
func MakeHelperContext(isUnbound func(ast.Ref) bool) HelperContext {
return HelperContext{
isUnbound: isUnbound,
}
}
// If this returns true, then calling this expression captures the target of
// the property access as "this" when calling the function in the property.
func IsPropertyAccess(expr Expr) bool {
switch expr.Data.(type) {
case *EDot, *EIndex:
return true
}
return false
}
func IsOptionalChain(value Expr) bool {
switch e := value.Data.(type) {
case *EDot:
return e.OptionalChain != OptionalChainNone
case *EIndex:
return e.OptionalChain != OptionalChainNone
case *ECall:
return e.OptionalChain != OptionalChainNone
}
return false
}
func Assign(a Expr, b Expr) Expr {
return Expr{Loc: a.Loc, Data: &EBinary{Op: BinOpAssign, Left: a, Right: b}}
}
func AssignStmt(a Expr, b Expr) Stmt {
return Stmt{Loc: a.Loc, Data: &SExpr{Value: Assign(a, b)}}
}
// Wraps the provided expression in the "!" prefix operator. The expression
// will potentially be simplified to avoid generating unnecessary extra "!"
// operators. For example, calling this with "!!x" will return "!x" instead
// of returning "!!!x".
func Not(expr Expr) Expr {
if result, ok := MaybeSimplifyNot(expr); ok {
return result
}
return Expr{Loc: expr.Loc, Data: &EUnary{Op: UnOpNot, Value: expr}}
}
// The given "expr" argument should be the operand of a "!" prefix operator
// (i.e. the "x" in "!x"). This returns a simplified expression for the
// whole operator (i.e. the "!x") if it can be simplified, or false if not.
// It's separate from "Not()" above to avoid allocation on failure in case
// that is undesired.
//
// This function intentionally avoids mutating the input AST so it can be
// called after the AST has been frozen (i.e. after parsing ends).
func MaybeSimplifyNot(expr Expr) (Expr, bool) {
switch e := expr.Data.(type) {
case *EAnnotation:
return MaybeSimplifyNot(e.Value)
case *EInlinedEnum:
if value, ok := MaybeSimplifyNot(e.Value); ok {
return value, true
}
case *ENull, *EUndefined:
return Expr{Loc: expr.Loc, Data: &EBoolean{Value: true}}, true
case *EBoolean:
return Expr{Loc: expr.Loc, Data: &EBoolean{Value: !e.Value}}, true
case *ENumber:
return Expr{Loc: expr.Loc, Data: &EBoolean{Value: e.Value == 0 || math.IsNaN(e.Value)}}, true
case *EBigInt:
if equal, ok := CheckEqualityBigInt(e.Value, "0"); ok {
return Expr{Loc: expr.Loc, Data: &EBoolean{Value: equal}}, true
}
case *EString:
return Expr{Loc: expr.Loc, Data: &EBoolean{Value: len(e.Value) == 0}}, true
case *EFunction, *EArrow, *ERegExp:
return Expr{Loc: expr.Loc, Data: &EBoolean{Value: false}}, true
case *EUnary:
// "!!!a" => "!a"
if e.Op == UnOpNot && KnownPrimitiveType(e.Value.Data) == PrimitiveBoolean {
return e.Value, true
}
case *EBinary:
// Make sure that these transformations are all safe for special values.
// For example, "!(a < b)" is not the same as "a >= b" if a and/or b are
// NaN (or undefined, or null, or possibly other problem cases too).
switch e.Op {
case BinOpLooseEq:
// "!(a == b)" => "a != b"
return Expr{Loc: expr.Loc, Data: &EBinary{Op: BinOpLooseNe, Left: e.Left, Right: e.Right}}, true
case BinOpLooseNe:
// "!(a != b)" => "a == b"
return Expr{Loc: expr.Loc, Data: &EBinary{Op: BinOpLooseEq, Left: e.Left, Right: e.Right}}, true
case BinOpStrictEq:
// "!(a === b)" => "a !== b"
return Expr{Loc: expr.Loc, Data: &EBinary{Op: BinOpStrictNe, Left: e.Left, Right: e.Right}}, true
case BinOpStrictNe:
// "!(a !== b)" => "a === b"
return Expr{Loc: expr.Loc, Data: &EBinary{Op: BinOpStrictEq, Left: e.Left, Right: e.Right}}, true
case BinOpComma:
// "!(a, b)" => "a, !b"
return Expr{Loc: expr.Loc, Data: &EBinary{Op: BinOpComma, Left: e.Left, Right: Not(e.Right)}}, true
}
}
return Expr{}, false
}
// This function intentionally avoids mutating the input AST so it can be
// called after the AST has been frozen (i.e. after parsing ends).
func MaybeSimplifyEqualityComparison(loc logger.Loc, e *EBinary, unsupportedFeatures compat.JSFeature) (Expr, bool) {
value, primitive := e.Left, e.Right
// Detect when the primitive comes first and flip the order of our checks
if IsPrimitiveLiteral(value.Data) {
value, primitive = primitive, value
}
// "!x === true" => "!x"
// "!x === false" => "!!x"
// "!x !== true" => "!!x"
// "!x !== false" => "!x"
if boolean, ok := primitive.Data.(*EBoolean); ok && KnownPrimitiveType(value.Data) == PrimitiveBoolean {
if boolean.Value == (e.Op == BinOpLooseNe || e.Op == BinOpStrictNe) {
return Not(value), true
} else {
return value, true
}
}
// "typeof x != 'undefined'" => "typeof x < 'u'"
// "typeof x == 'undefined'" => "typeof x > 'u'"
if !unsupportedFeatures.Has(compat.TypeofExoticObjectIsObject) {
// Only do this optimization if we know that the "typeof" operator won't
// return something random. The only case of this happening was Internet
// Explorer returning "unknown" for some objects, which messes with this
// optimization. So we don't do this when targeting Internet Explorer.
if typeof, ok := value.Data.(*EUnary); ok && typeof.Op == UnOpTypeof {
if str, ok := primitive.Data.(*EString); ok && helpers.UTF16EqualsString(str.Value, "undefined") {
flip := value == e.Right
op := BinOpLt
if (e.Op == BinOpLooseEq || e.Op == BinOpStrictEq) != flip {
op = BinOpGt
}
primitive.Data = &EString{Value: []uint16{'u'}}
if flip {
value, primitive = primitive, value
}
return Expr{Loc: loc, Data: &EBinary{Op: op, Left: value, Right: primitive}}, true
}
}
}
return Expr{}, false
}
func IsSymbolInstance(data E) bool {
switch e := data.(type) {
case *EDot:
return e.IsSymbolInstance
case *EIndex:
return e.IsSymbolInstance
}
return false
}
func IsPrimitiveLiteral(data E) bool {
switch e := data.(type) {
case *EAnnotation:
return IsPrimitiveLiteral(e.Value.Data)
case *EInlinedEnum:
return IsPrimitiveLiteral(e.Value.Data)
case *ENull, *EUndefined, *EString, *EBoolean, *ENumber, *EBigInt:
return true
}
return false
}
type PrimitiveType uint8
const (
PrimitiveUnknown PrimitiveType = iota
PrimitiveMixed
PrimitiveNull
PrimitiveUndefined
PrimitiveBoolean
PrimitiveNumber
PrimitiveString
PrimitiveBigInt
)
// This can be used when the returned type is either one or the other
func MergedKnownPrimitiveTypes(a Expr, b Expr) PrimitiveType {
x := KnownPrimitiveType(a.Data)
if x == PrimitiveUnknown {
return PrimitiveUnknown
}
y := KnownPrimitiveType(b.Data)
if y == PrimitiveUnknown {
return PrimitiveUnknown
}
if x == y {
return x
}
return PrimitiveMixed // Definitely some kind of primitive
}
// Note: This function does not say whether the expression is side-effect free
// or not. For example, the expression "++x" always returns a primitive.
func KnownPrimitiveType(expr E) PrimitiveType {
switch e := expr.(type) {
case *EAnnotation:
return KnownPrimitiveType(e.Value.Data)
case *EInlinedEnum:
return KnownPrimitiveType(e.Value.Data)
case *ENull:
return PrimitiveNull
case *EUndefined:
return PrimitiveUndefined
case *EBoolean:
return PrimitiveBoolean
case *ENumber:
return PrimitiveNumber
case *EString:
return PrimitiveString
case *EBigInt:
return PrimitiveBigInt
case *ETemplate:
if e.TagOrNil.Data == nil {
return PrimitiveString
}
case *EIf:
return MergedKnownPrimitiveTypes(e.Yes, e.No)
case *EUnary:
switch e.Op {
case UnOpVoid:
return PrimitiveUndefined
case UnOpTypeof:
return PrimitiveString
case UnOpNot, UnOpDelete:
return PrimitiveBoolean
case UnOpPos:
return PrimitiveNumber // Cannot be bigint because that throws an exception
case UnOpNeg, UnOpCpl:
value := KnownPrimitiveType(e.Value.Data)
if value == PrimitiveBigInt {
return PrimitiveBigInt
}
if value != PrimitiveUnknown && value != PrimitiveMixed {
return PrimitiveNumber
}
return PrimitiveMixed // Can be number or bigint
case UnOpPreDec, UnOpPreInc, UnOpPostDec, UnOpPostInc:
return PrimitiveMixed // Can be number or bigint
}
case *EBinary:
switch e.Op {
case BinOpStrictEq, BinOpStrictNe, BinOpLooseEq, BinOpLooseNe,
BinOpLt, BinOpGt, BinOpLe, BinOpGe,
BinOpInstanceof, BinOpIn:
return PrimitiveBoolean
case BinOpLogicalOr, BinOpLogicalAnd:
return MergedKnownPrimitiveTypes(e.Left, e.Right)
case BinOpNullishCoalescing:
left := KnownPrimitiveType(e.Left.Data)
right := KnownPrimitiveType(e.Right.Data)
if left == PrimitiveNull || left == PrimitiveUndefined {
return right
}
if left != PrimitiveUnknown {
if left != PrimitiveMixed {
return left // Definitely not null or undefined
}
if right != PrimitiveUnknown {
return PrimitiveMixed // Definitely some kind of primitive
}
}
case BinOpAdd:
left := KnownPrimitiveType(e.Left.Data)
right := KnownPrimitiveType(e.Right.Data)
if left == PrimitiveString || right == PrimitiveString {
return PrimitiveString
}
if left == PrimitiveBigInt && right == PrimitiveBigInt {
return PrimitiveBigInt
}
if left != PrimitiveUnknown && left != PrimitiveMixed && left != PrimitiveBigInt &&
right != PrimitiveUnknown && right != PrimitiveMixed && right != PrimitiveBigInt {
return PrimitiveNumber
}
return PrimitiveMixed // Can be number or bigint or string (or an exception)
case BinOpAddAssign:
right := KnownPrimitiveType(e.Right.Data)
if right == PrimitiveString {
return PrimitiveString
}
return PrimitiveMixed // Can be number or bigint or string (or an exception)
case
BinOpSub, BinOpSubAssign,
BinOpMul, BinOpMulAssign,
BinOpDiv, BinOpDivAssign,
BinOpRem, BinOpRemAssign,
BinOpPow, BinOpPowAssign,
BinOpBitwiseAnd, BinOpBitwiseAndAssign,
BinOpBitwiseOr, BinOpBitwiseOrAssign,
BinOpBitwiseXor, BinOpBitwiseXorAssign,
BinOpShl, BinOpShlAssign,
BinOpShr, BinOpShrAssign,
BinOpUShr, BinOpUShrAssign:
return PrimitiveMixed // Can be number or bigint (or an exception)
case BinOpAssign, BinOpComma:
return KnownPrimitiveType(e.Right.Data)
}
}
return PrimitiveUnknown
}
func CanChangeStrictToLoose(a Expr, b Expr) bool {
x := KnownPrimitiveType(a.Data)
y := KnownPrimitiveType(b.Data)
return x == y && x != PrimitiveUnknown && x != PrimitiveMixed
}
// Returns true if the result of the "typeof" operator on this expression is
// statically determined and this expression has no side effects (i.e. can be
// removed without consequence).
func TypeofWithoutSideEffects(data E) (string, bool) {
switch e := data.(type) {
case *EAnnotation:
if e.Flags.Has(CanBeRemovedIfUnusedFlag) {
return TypeofWithoutSideEffects(e.Value.Data)
}
case *EInlinedEnum:
return TypeofWithoutSideEffects(e.Value.Data)
case *ENull:
return "object", true
case *EUndefined:
return "undefined", true
case *EBoolean:
return "boolean", true
case *ENumber:
return "number", true
case *EBigInt:
return "bigint", true
case *EString:
return "string", true
case *EFunction, *EArrow:
return "function", true
}
return "", false
}
// The goal of this function is to "rotate" the AST if it's possible to use the
// left-associative property of the operator to avoid unnecessary parentheses.
//
// When using this, make absolutely sure that the operator is actually
// associative. For example, the "+" operator is not associative for
// floating-point numbers.
//
// This function intentionally avoids mutating the input AST so it can be
// called after the AST has been frozen (i.e. after parsing ends).
func JoinWithLeftAssociativeOp(op OpCode, a Expr, b Expr) Expr {
// "(a, b) op c" => "a, b op c"
if comma, ok := a.Data.(*EBinary); ok && comma.Op == BinOpComma {
// Don't mutate the original AST
clone := *comma
clone.Right = JoinWithLeftAssociativeOp(op, clone.Right, b)
return Expr{Loc: a.Loc, Data: &clone}
}
// "a op (b op c)" => "(a op b) op c"
// "a op (b op (c op d))" => "((a op b) op c) op d"
for {
if binary, ok := b.Data.(*EBinary); ok && binary.Op == op {
a = JoinWithLeftAssociativeOp(op, a, binary.Left)
b = binary.Right
} else {
break
}
}
// "a op b" => "a op b"
// "(a op b) op c" => "(a op b) op c"
return Expr{Loc: a.Loc, Data: &EBinary{Op: op, Left: a, Right: b}}
}
func JoinWithComma(a Expr, b Expr) Expr {
if a.Data == nil {
return b
}
if b.Data == nil {
return a
}
return Expr{Loc: a.Loc, Data: &EBinary{Op: BinOpComma, Left: a, Right: b}}
}
func JoinAllWithComma(all []Expr) (result Expr) {
for _, value := range all {
result = JoinWithComma(result, value)
}
return
}
func ConvertBindingToExpr(binding Binding, wrapIdentifier func(logger.Loc, ast.Ref) Expr) Expr {
loc := binding.Loc
switch b := binding.Data.(type) {
case *BMissing:
return Expr{Loc: loc, Data: &EMissing{}}
case *BIdentifier:
if wrapIdentifier != nil {
return wrapIdentifier(loc, b.Ref)
}
return Expr{Loc: loc, Data: &EIdentifier{Ref: b.Ref}}
case *BArray:
exprs := make([]Expr, len(b.Items))
for i, item := range b.Items {
expr := ConvertBindingToExpr(item.Binding, wrapIdentifier)
if b.HasSpread && i+1 == len(b.Items) {
expr = Expr{Loc: expr.Loc, Data: &ESpread{Value: expr}}
} else if item.DefaultValueOrNil.Data != nil {
expr = Assign(expr, item.DefaultValueOrNil)
}
exprs[i] = expr
}
return Expr{Loc: loc, Data: &EArray{
Items: exprs,
IsSingleLine: b.IsSingleLine,
}}
case *BObject:
properties := make([]Property, len(b.Properties))
for i, property := range b.Properties {
value := ConvertBindingToExpr(property.Value, wrapIdentifier)
kind := PropertyField
if property.IsSpread {
kind = PropertySpread
}
var flags PropertyFlags
if property.IsComputed {
flags |= PropertyIsComputed
}
properties[i] = Property{
Kind: kind,
Flags: flags,
Key: property.Key,
ValueOrNil: value,
InitializerOrNil: property.DefaultValueOrNil,
}
}
return Expr{Loc: loc, Data: &EObject{
Properties: properties,
IsSingleLine: b.IsSingleLine,
}}
default:
panic("Internal error")
}
}
// This will return a nil expression if the expression can be totally removed.
//
// This function intentionally avoids mutating the input AST so it can be
// called after the AST has been frozen (i.e. after parsing ends).
func (ctx HelperContext) SimplifyUnusedExpr(expr Expr, unsupportedFeatures compat.JSFeature) Expr {
switch e := expr.Data.(type) {
case *EAnnotation:
if e.Flags.Has(CanBeRemovedIfUnusedFlag) {
return Expr{}
}
case *EInlinedEnum:
return ctx.SimplifyUnusedExpr(e.Value, unsupportedFeatures)
case *ENull, *EUndefined, *EMissing, *EBoolean, *ENumber, *EBigInt,
*EString, *EThis, *ERegExp, *EFunction, *EArrow, *EImportMeta:
return Expr{}
case *EDot:
if e.CanBeRemovedIfUnused {
return Expr{}
}
case *EIdentifier:
if e.MustKeepDueToWithStmt {
break
}
if e.CanBeRemovedIfUnused || !ctx.isUnbound(e.Ref) {
return Expr{}
}
case *ETemplate:
if e.TagOrNil.Data == nil {
var comma Expr
var templateLoc logger.Loc
var template *ETemplate
for _, part := range e.Parts {
// If we know this value is some kind of primitive, then we know that
// "ToString" has no side effects and can be avoided.
if KnownPrimitiveType(part.Value.Data) != PrimitiveUnknown {
if template != nil {
comma = JoinWithComma(comma, Expr{Loc: templateLoc, Data: template})
template = nil
}
comma = JoinWithComma(comma, ctx.SimplifyUnusedExpr(part.Value, unsupportedFeatures))
continue
}
// Make sure "ToString" is still evaluated on the value. We can't use
// string addition here because that may evaluate "ValueOf" instead.
if template == nil {
template = &ETemplate{}
templateLoc = part.Value.Loc
}
template.Parts = append(template.Parts, TemplatePart{Value: part.Value})
}
if template != nil {
comma = JoinWithComma(comma, Expr{Loc: templateLoc, Data: template})
}
return comma
} else if e.CanBeUnwrappedIfUnused {
// If the function call was annotated as being able to be removed if the
// result is unused, then we can remove it and just keep the arguments.
// Note that there are no implicit "ToString" operations for tagged
// template literals.
var comma Expr
for _, part := range e.Parts {
comma = JoinWithComma(comma, ctx.SimplifyUnusedExpr(part.Value, unsupportedFeatures))
}
return comma
}
case *EArray:
// Arrays with "..." spread expressions can't be unwrapped because the
// "..." triggers code evaluation via iterators. In that case, just trim
// the other items instead and leave the array expression there.
for _, spread := range e.Items {
if _, ok := spread.Data.(*ESpread); ok {
items := make([]Expr, 0, len(e.Items))
for _, item := range e.Items {
item = ctx.SimplifyUnusedExpr(item, unsupportedFeatures)
if item.Data != nil {
items = append(items, item)
}
}
// Don't mutate the original AST
clone := *e
clone.Items = items
return Expr{Loc: expr.Loc, Data: &clone}
}
}
// Otherwise, the array can be completely removed. We only need to keep any
// array items with side effects. Apply this simplification recursively.
var result Expr
for _, item := range e.Items {
result = JoinWithComma(result, ctx.SimplifyUnusedExpr(item, unsupportedFeatures))
}
return result
case *EObject:
// Objects with "..." spread expressions can't be unwrapped because the
// "..." triggers code evaluation via getters. In that case, just trim
// the other items instead and leave the object expression there.
for _, spread := range e.Properties {
if spread.Kind == PropertySpread {
properties := make([]Property, 0, len(e.Properties))
for _, property := range e.Properties {
// Spread properties must always be evaluated
if property.Kind != PropertySpread {
value := ctx.SimplifyUnusedExpr(property.ValueOrNil, unsupportedFeatures)
if value.Data != nil {
// Keep the value
property.ValueOrNil = value
} else if !property.Flags.Has(PropertyIsComputed) {
// Skip this property if the key doesn't need to be computed
continue
} else {
// Replace values without side effects with "0" because it's short
property.ValueOrNil.Data = &ENumber{}
}
}
properties = append(properties, property)
}
// Don't mutate the original AST
clone := *e
clone.Properties = properties
return Expr{Loc: expr.Loc, Data: &clone}
}
}
// Otherwise, the object can be completely removed. We only need to keep any
// object properties with side effects. Apply this simplification recursively.
var result Expr
for _, property := range e.Properties {
if property.Flags.Has(PropertyIsComputed) {
// Make sure "ToString" is still evaluated on the key
result = JoinWithComma(result, Expr{Loc: property.Key.Loc, Data: &EBinary{
Op: BinOpAdd,
Left: property.Key,
Right: Expr{Loc: property.Key.Loc, Data: &EString{}},
}})
}
result = JoinWithComma(result, ctx.SimplifyUnusedExpr(property.ValueOrNil, unsupportedFeatures))
}
return result
case *EIf:
yes := ctx.SimplifyUnusedExpr(e.Yes, unsupportedFeatures)
no := ctx.SimplifyUnusedExpr(e.No, unsupportedFeatures)
// "foo() ? 1 : 2" => "foo()"
if yes.Data == nil && no.Data == nil {
return ctx.SimplifyUnusedExpr(e.Test, unsupportedFeatures)
}
// "foo() ? 1 : bar()" => "foo() || bar()"
if yes.Data == nil {
return JoinWithLeftAssociativeOp(BinOpLogicalOr, e.Test, no)
}
// "foo() ? bar() : 2" => "foo() && bar()"
if no.Data == nil {
return JoinWithLeftAssociativeOp(BinOpLogicalAnd, e.Test, yes)
}
if yes != e.Yes || no != e.No {
return Expr{Loc: expr.Loc, Data: &EIf{Test: e.Test, Yes: yes, No: no}}
}
case *EUnary:
switch e.Op {
// These operators must not have any type conversions that can execute code
// such as "toString" or "valueOf". They must also never throw any exceptions.
case UnOpVoid, UnOpNot:
return ctx.SimplifyUnusedExpr(e.Value, unsupportedFeatures)
case UnOpNeg:
if _, ok := e.Value.Data.(*EBigInt); ok {
// Consider negated bigints to have no side effects
return Expr{}
}
case UnOpTypeof:
if _, ok := e.Value.Data.(*EIdentifier); ok && e.WasOriginallyTypeofIdentifier {
// "typeof x" must not be transformed into if "x" since doing so could
// cause an exception to be thrown. Instead we can just remove it since
// "typeof x" is special-cased in the standard to never throw.
return Expr{}
}
return ctx.SimplifyUnusedExpr(e.Value, unsupportedFeatures)
}
case *EBinary:
left := e.Left
right := e.Right
switch e.Op {
// These operators must not have any type conversions that can execute code
// such as "toString" or "valueOf". They must also never throw any exceptions.
case BinOpStrictEq, BinOpStrictNe, BinOpComma:
return JoinWithComma(ctx.SimplifyUnusedExpr(left, unsupportedFeatures), ctx.SimplifyUnusedExpr(right, unsupportedFeatures))
// We can simplify "==" and "!=" even though they can call "toString" and/or
// "valueOf" if we can statically determine that the types of both sides are
// primitives. In that case there won't be any chance for user-defined
// "toString" and/or "valueOf" to be called.
case BinOpLooseEq, BinOpLooseNe:
if MergedKnownPrimitiveTypes(left, right) != PrimitiveUnknown {
return JoinWithComma(ctx.SimplifyUnusedExpr(left, unsupportedFeatures), ctx.SimplifyUnusedExpr(right, unsupportedFeatures))
}
case BinOpLogicalAnd, BinOpLogicalOr, BinOpNullishCoalescing:
// If this is a boolean logical operation and the result is unused, then
// we know the left operand will only be used for its boolean value and
// can be simplified under that assumption
if e.Op != BinOpNullishCoalescing {
left = ctx.SimplifyBooleanExpr(left)
}
// Preserve short-circuit behavior: the left expression is only unused if
// the right expression can be completely removed. Otherwise, the left
// expression is important for the branch.
right = ctx.SimplifyUnusedExpr(right, unsupportedFeatures)
if right.Data == nil {
return ctx.SimplifyUnusedExpr(left, unsupportedFeatures)
}
// Try to take advantage of the optional chain operator to shorten code
if !unsupportedFeatures.Has(compat.OptionalChain) {
if binary, ok := left.Data.(*EBinary); ok {
// "a != null && a.b()" => "a?.b()"
// "a == null || a.b()" => "a?.b()"
if (binary.Op == BinOpLooseNe && e.Op == BinOpLogicalAnd) || (binary.Op == BinOpLooseEq && e.Op == BinOpLogicalOr) {
var test Expr
if _, ok := binary.Right.Data.(*ENull); ok {
test = binary.Left
} else if _, ok := binary.Left.Data.(*ENull); ok {
test = binary.Right
}
// Note: Technically unbound identifiers can refer to a getter on
// the global object and that getter can have side effects that can
// be observed if we run that getter once instead of twice. But this
// seems like terrible coding practice and very unlikely to come up
// in real software, so we deliberately ignore this possibility and
// optimize for size instead of for this obscure edge case.
//
// If this is ever changed, then we must also pessimize the lowering
// of "foo?.bar" to save the value of "foo" to ensure that it's only
// evaluated once. Specifically "foo?.bar" would have to expand to:
//
// var _a;
// (_a = foo) == null ? void 0 : _a.bar;
//
// instead of:
//
// foo == null ? void 0 : foo.bar;
//
// Babel does the first one while TypeScript does the second one.
// Since TypeScript doesn't handle this extreme edge case and
// TypeScript is very widely used, I think it's fine for us to not
// handle this edge case either.
if id, ok := test.Data.(*EIdentifier); ok && !id.MustKeepDueToWithStmt && TryToInsertOptionalChain(test, right) {
return right
}
}
}
}
case BinOpAdd:
if result, isStringAddition := simplifyUnusedStringAdditionChain(expr); isStringAddition {
return result
}
}
if left != e.Left || right != e.Right {
return Expr{Loc: expr.Loc, Data: &EBinary{Op: e.Op, Left: left, Right: right}}
}
case *ECall:
// A call that has been marked "__PURE__" can be removed if all arguments
// can be removed. The annotation causes us to ignore the target.
if e.CanBeUnwrappedIfUnused {
var result Expr
for _, arg := range e.Args {
if _, ok := arg.Data.(*ESpread); ok {
arg.Data = &EArray{Items: []Expr{arg}, IsSingleLine: true}
}
result = JoinWithComma(result, ctx.SimplifyUnusedExpr(arg, unsupportedFeatures))
}
return result
}
// Attempt to shorten IIFEs
if len(e.Args) == 0 {
switch target := e.Target.Data.(type) {
case *EFunction:
if len(target.Fn.Args) != 0 {
break
}
// Just delete "(function() {})()" completely
if len(target.Fn.Body.Block.Stmts) == 0 {
return Expr{}
}
case *EArrow:
if len(target.Args) != 0 {
break
}
// Just delete "(() => {})()" completely
if len(target.Body.Block.Stmts) == 0 {
return Expr{}
}
if len(target.Body.Block.Stmts) == 1 {
switch s := target.Body.Block.Stmts[0].Data.(type) {
case *SExpr:
if !target.IsAsync {
// Replace "(() => { foo() })()" with "foo()"
return s.Value
} else {
// Replace "(async () => { foo() })()" with "(async () => foo())()"
clone := *target
clone.Body.Block.Stmts[0].Data = &SReturn{ValueOrNil: s.Value}
clone.PreferExpr = true
return Expr{Loc: expr.Loc, Data: &ECall{Target: Expr{Loc: e.Target.Loc, Data: &clone}}}
}
case *SReturn:
if !target.IsAsync {
// Replace "(() => foo())()" with "foo()"
return s.ValueOrNil
}
}
}
}
}
case *ENew:
// A constructor call that has been marked "__PURE__" can be removed if all
// arguments can be removed. The annotation causes us to ignore the target.
if e.CanBeUnwrappedIfUnused {
var result Expr
for _, arg := range e.Args {
if _, ok := arg.Data.(*ESpread); ok {
arg.Data = &EArray{Items: []Expr{arg}, IsSingleLine: true}
}
result = JoinWithComma(result, ctx.SimplifyUnusedExpr(arg, unsupportedFeatures))
}
return result
}
}
return expr
}
// This function intentionally avoids mutating the input AST so it can be
// called after the AST has been frozen (i.e. after parsing ends).
func simplifyUnusedStringAdditionChain(expr Expr) (Expr, bool) {
switch e := expr.Data.(type) {
case *EString:
// "'x' + y" => "'' + y"
return Expr{Loc: expr.Loc, Data: &EString{}}, true
case *EBinary:
if e.Op == BinOpAdd {
left, leftIsStringAddition := simplifyUnusedStringAdditionChain(e.Left)
if right, rightIsString := e.Right.Data.(*EString); rightIsString {
// "('' + x) + 'y'" => "'' + x"
if leftIsStringAddition {
return left, true
}
// "x + 'y'" => "x + ''"
if !leftIsStringAddition && len(right.Value) > 0 {
return Expr{Loc: expr.Loc, Data: &EBinary{
Op: BinOpAdd,
Left: left,
Right: Expr{Loc: e.Right.Loc, Data: &EString{}},
}}, true
}
}
// Don't mutate the original AST
if left != e.Left {
expr.Data = &EBinary{Op: BinOpAdd, Left: left, Right: e.Right}
}
return expr, leftIsStringAddition
}
}
return expr, false
}
func ToInt32(f float64) int32 {
// The easy way
i := int32(f)
if float64(i) == f {
return i
}
// Special-case non-finite numbers (casting them is unspecified behavior in Go)
if math.IsNaN(f) || math.IsInf(f, 0) {
return 0
}
// The hard way
i = int32(uint32(math.Mod(math.Abs(f), 4294967296)))
if math.Signbit(f) {
return -i
}
return i
}
func ToUint32(f float64) uint32 {
return uint32(ToInt32(f))
}
// If this returns true, we know the result can't be NaN
func isInt32OrUint32(data E) bool {
switch e := data.(type) {
case *EBinary:
switch e.Op {
case BinOpUShr: // This is the only bitwise operator that can't return a bigint (because it throws instead)
return true
case BinOpLogicalOr, BinOpLogicalAnd:
return isInt32OrUint32(e.Left.Data) && isInt32OrUint32(e.Right.Data)
}
case *EIf:
return isInt32OrUint32(e.Yes.Data) && isInt32OrUint32(e.No.Data)
}
return false
}
func ToNumberWithoutSideEffects(data E) (float64, bool) {
switch e := data.(type) {
case *EAnnotation:
return ToNumberWithoutSideEffects(e.Value.Data)
case *EInlinedEnum:
return ToNumberWithoutSideEffects(e.Value.Data)
case *ENull:
return 0, true
case *EUndefined, *ERegExp:
return math.NaN(), true
case *EArray:
if len(e.Items) == 0 {
// "+[]" => "0"
return 0, true
}
case *EObject:
if len(e.Properties) == 0 {
// "+{}" => "NaN"
return math.NaN(), true
}
case *EBoolean:
if e.Value {
return 1, true
} else {
return 0, true
}
case *ENumber:
return e.Value, true
case *EString:
// "+''" => "0"
if len(e.Value) == 0 {
return 0, true
}
// "+'1'" => "1"
if num, ok := StringToEquivalentNumberValue(e.Value); ok {
return num, true
}
}
return 0, false
}
func ToStringWithoutSideEffects(data E) (string, bool) {
switch e := data.(type) {
case *ENull:
return "null", true
case *EUndefined:
return "undefined", true
case *EBoolean:
if e.Value {
return "true", true
} else {
return "false", true
}
case *EBigInt:
// Only do this if there is no radix
if len(e.Value) < 2 || e.Value[0] != '0' {
return e.Value, true
}
case *ENumber:
if str, ok := TryToStringOnNumberSafely(e.Value, 10); ok {
return str, true
}
case *ERegExp:
return e.Value, true
case *EDot:
// This is dumb but some JavaScript obfuscators use this to generate string literals
if e.Name == "constructor" {
switch e.Target.Data.(type) {
case *EString:
return "function String() { [native code] }", true
case *ERegExp:
return "function RegExp() { [native code] }", true
}
}
}
return "", false
}
func extractNumericValue(data E) (float64, bool) {
switch e := data.(type) {
case *EAnnotation:
return extractNumericValue(e.Value.Data)
case *EInlinedEnum:
return extractNumericValue(e.Value.Data)
case *ENumber:
return e.Value, true
}
return 0, false
}
func extractNumericValues(left Expr, right Expr) (float64, float64, bool) {
if a, ok := extractNumericValue(left.Data); ok {
if b, ok := extractNumericValue(right.Data); ok {
return a, b, true
}
}
return 0, 0, false
}
func extractStringValue(data E) ([]uint16, bool) {
switch e := data.(type) {
case *EAnnotation:
return extractStringValue(e.Value.Data)
case *EInlinedEnum:
return extractStringValue(e.Value.Data)
case *EString:
return e.Value, true
}
return nil, false
}
func extractStringValues(left Expr, right Expr) ([]uint16, []uint16, bool) {
if a, ok := extractStringValue(left.Data); ok {
if b, ok := extractStringValue(right.Data); ok {
return a, b, true
}
}
return nil, nil, false
}
func stringCompareUCS2(a []uint16, b []uint16) int {
var n int
if len(a) < len(b) {
n = len(a)
} else {
n = len(b)
}
for i := 0; i < n; i++ {
if delta := int(a[i]) - int(b[i]); delta != 0 {
return delta
}
}
return len(a) - len(b)
}
func approximatePrintedIntCharCount(intValue float64) int {
count := 1 + (int)(math.Max(0, math.Floor(math.Log10(math.Abs(intValue)))))
if intValue < 0 {
count++
}
return count
}
func ShouldFoldBinaryOperatorWhenMinifying(binary *EBinary) bool {
switch binary.Op {
case
// Equality tests should always result in smaller code when folded
BinOpLooseEq,
BinOpLooseNe,
BinOpStrictEq,
BinOpStrictNe,
// Minification always folds right signed shift operations since they are
// unlikely to result in larger output. Note: ">>>" could result in
// bigger output such as "-1 >>> 0" becoming "4294967295".
BinOpShr,
// Minification always folds the following bitwise operations since they
// are unlikely to result in larger output.
BinOpBitwiseAnd,
BinOpBitwiseOr,
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_ast/js_ident.go | internal/js_ast/js_ident.go | package js_ast
import (
"strings"
"unicode"
"unicode/utf8"
)
func IsIdentifier(text string) bool {
if len(text) == 0 {
return false
}
for i, codePoint := range text {
if i == 0 {
if !IsIdentifierStart(codePoint) {
return false
}
} else {
if !IsIdentifierContinue(codePoint) {
return false
}
}
}
return true
}
func IsIdentifierES5AndESNext(text string) bool {
if len(text) == 0 {
return false
}
for i, codePoint := range text {
if i == 0 {
if !IsIdentifierStartES5AndESNext(codePoint) {
return false
}
} else {
if !IsIdentifierContinueES5AndESNext(codePoint) {
return false
}
}
}
return true
}
func ForceValidIdentifier(prefix string, text string) string {
sb := strings.Builder{}
// Private identifiers must be prefixed by "#"
if prefix != "" {
sb.WriteString(prefix)
}
// Identifier start
c, width := utf8.DecodeRuneInString(text)
text = text[width:]
if IsIdentifierStart(c) {
sb.WriteRune(c)
} else {
sb.WriteRune('_')
}
// Identifier continue
for text != "" {
c, width := utf8.DecodeRuneInString(text)
text = text[width:]
if IsIdentifierContinue(c) {
sb.WriteRune(c)
} else {
sb.WriteRune('_')
}
}
return sb.String()
}
// This does "IsIdentifier(UTF16ToString(text))" without any allocations
func IsIdentifierUTF16(text []uint16) bool {
n := len(text)
if n == 0 {
return false
}
for i := 0; i < n; i++ {
isStart := i == 0
r1 := rune(text[i])
if r1 >= 0xD800 && r1 <= 0xDBFF && i+1 < n {
if r2 := rune(text[i+1]); r2 >= 0xDC00 && r2 <= 0xDFFF {
r1 = (r1 << 10) + r2 + (0x10000 - (0xD800 << 10) - 0xDC00)
i++
}
}
if isStart {
if !IsIdentifierStart(r1) {
return false
}
} else {
if !IsIdentifierContinue(r1) {
return false
}
}
}
return true
}
// This does "IsIdentifierES5AndESNext(UTF16ToString(text))" without any allocations
func IsIdentifierES5AndESNextUTF16(text []uint16) bool {
n := len(text)
if n == 0 {
return false
}
for i := 0; i < n; i++ {
isStart := i == 0
r1 := rune(text[i])
if r1 >= 0xD800 && r1 <= 0xDBFF && i+1 < n {
if r2 := rune(text[i+1]); r2 >= 0xDC00 && r2 <= 0xDFFF {
r1 = (r1 << 10) + r2 + (0x10000 - (0xD800 << 10) - 0xDC00)
i++
}
}
if isStart {
if !IsIdentifierStartES5AndESNext(r1) {
return false
}
} else {
if !IsIdentifierContinueES5AndESNext(r1) {
return false
}
}
}
return true
}
func IsIdentifierStart(codePoint rune) bool {
switch codePoint {
case '_', '$',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
return true
}
// All ASCII identifier start code points are listed above
if codePoint < 0x7F {
return false
}
return unicode.Is(idStartES5OrESNext, codePoint)
}
func IsIdentifierContinue(codePoint rune) bool {
switch codePoint {
case '_', '$', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
return true
}
// All ASCII identifier start code points are listed above
if codePoint < 0x7F {
return false
}
// ZWNJ and ZWJ are allowed in identifiers
if codePoint == 0x200C || codePoint == 0x200D {
return true
}
return unicode.Is(idContinueES5OrESNext, codePoint)
}
func IsIdentifierStartES5AndESNext(codePoint rune) bool {
switch codePoint {
case '_', '$',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
return true
}
// All ASCII identifier start code points are listed above
if codePoint < 0x7F {
return false
}
return unicode.Is(idStartES5AndESNext, codePoint)
}
func IsIdentifierContinueES5AndESNext(codePoint rune) bool {
switch codePoint {
case '_', '$', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
return true
}
// All ASCII identifier start code points are listed above
if codePoint < 0x7F {
return false
}
// ZWNJ and ZWJ are allowed in identifiers
if codePoint == 0x200C || codePoint == 0x200D {
return true
}
return unicode.Is(idContinueES5AndESNext, codePoint)
}
// See the "White Space Code Points" table in the ECMAScript standard
func IsWhitespace(codePoint rune) bool {
switch codePoint {
case
'\u0009', // character tabulation
'\u000B', // line tabulation
'\u000C', // form feed
'\u0020', // space
'\u00A0', // no-break space
// Unicode "Space_Separator" code points
'\u1680', // ogham space mark
'\u2000', // en quad
'\u2001', // em quad
'\u2002', // en space
'\u2003', // em space
'\u2004', // three-per-em space
'\u2005', // four-per-em space
'\u2006', // six-per-em space
'\u2007', // figure space
'\u2008', // punctuation space
'\u2009', // thin space
'\u200A', // hair space
'\u202F', // narrow no-break space
'\u205F', // medium mathematical space
'\u3000', // ideographic space
'\uFEFF': // zero width non-breaking space
return true
default:
return false
}
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_ast/unicode.go | internal/js_ast/unicode.go | // This file was automatically generated by gen-unicode-table.js. Do not edit.
package js_ast
import "unicode"
var idStartES5AndESNext = &unicode.RangeTable{
LatinOffset: 117,
R16: []unicode.Range16{
{Lo: 0x41, Hi: 0x5a, Stride: 1},
{Lo: 0x61, Hi: 0x7a, Stride: 1},
{Lo: 0xaa, Hi: 0xaa, Stride: 1},
{Lo: 0xb5, Hi: 0xb5, Stride: 1},
{Lo: 0xba, Hi: 0xba, Stride: 1},
{Lo: 0xc0, Hi: 0xd6, Stride: 1},
{Lo: 0xd8, Hi: 0xf6, Stride: 1},
{Lo: 0xf8, Hi: 0x21f, Stride: 1},
{Lo: 0x222, Hi: 0x233, Stride: 1},
{Lo: 0x250, Hi: 0x2ad, Stride: 1},
{Lo: 0x2b0, Hi: 0x2b8, Stride: 1},
{Lo: 0x2bb, Hi: 0x2c1, Stride: 1},
{Lo: 0x2d0, Hi: 0x2d1, Stride: 1},
{Lo: 0x2e0, Hi: 0x2e4, Stride: 1},
{Lo: 0x2ee, Hi: 0x2ee, Stride: 1},
{Lo: 0x37a, Hi: 0x37a, Stride: 1},
{Lo: 0x386, Hi: 0x386, Stride: 1},
{Lo: 0x388, Hi: 0x38a, Stride: 1},
{Lo: 0x38c, Hi: 0x38c, Stride: 1},
{Lo: 0x38e, Hi: 0x3a1, Stride: 1},
{Lo: 0x3a3, Hi: 0x3ce, Stride: 1},
{Lo: 0x3d0, Hi: 0x3d7, Stride: 1},
{Lo: 0x3da, Hi: 0x3f3, Stride: 1},
{Lo: 0x400, Hi: 0x481, Stride: 1},
{Lo: 0x48c, Hi: 0x4c4, Stride: 1},
{Lo: 0x4c7, Hi: 0x4c8, Stride: 1},
{Lo: 0x4cb, Hi: 0x4cc, Stride: 1},
{Lo: 0x4d0, Hi: 0x4f5, Stride: 1},
{Lo: 0x4f8, Hi: 0x4f9, Stride: 1},
{Lo: 0x531, Hi: 0x556, Stride: 1},
{Lo: 0x559, Hi: 0x559, Stride: 1},
{Lo: 0x561, Hi: 0x587, Stride: 1},
{Lo: 0x5d0, Hi: 0x5ea, Stride: 1},
{Lo: 0x5f0, Hi: 0x5f2, Stride: 1},
{Lo: 0x621, Hi: 0x63a, Stride: 1},
{Lo: 0x640, Hi: 0x64a, Stride: 1},
{Lo: 0x671, Hi: 0x6d3, Stride: 1},
{Lo: 0x6d5, Hi: 0x6d5, Stride: 1},
{Lo: 0x6e5, Hi: 0x6e6, Stride: 1},
{Lo: 0x6fa, Hi: 0x6fc, Stride: 1},
{Lo: 0x710, Hi: 0x710, Stride: 1},
{Lo: 0x712, Hi: 0x72c, Stride: 1},
{Lo: 0x780, Hi: 0x7a5, Stride: 1},
{Lo: 0x905, Hi: 0x939, Stride: 1},
{Lo: 0x93d, Hi: 0x93d, Stride: 1},
{Lo: 0x950, Hi: 0x950, Stride: 1},
{Lo: 0x958, Hi: 0x961, Stride: 1},
{Lo: 0x985, Hi: 0x98c, Stride: 1},
{Lo: 0x98f, Hi: 0x990, Stride: 1},
{Lo: 0x993, Hi: 0x9a8, Stride: 1},
{Lo: 0x9aa, Hi: 0x9b0, Stride: 1},
{Lo: 0x9b2, Hi: 0x9b2, Stride: 1},
{Lo: 0x9b6, Hi: 0x9b9, Stride: 1},
{Lo: 0x9dc, Hi: 0x9dd, Stride: 1},
{Lo: 0x9df, Hi: 0x9e1, Stride: 1},
{Lo: 0x9f0, Hi: 0x9f1, Stride: 1},
{Lo: 0xa05, Hi: 0xa0a, Stride: 1},
{Lo: 0xa0f, Hi: 0xa10, Stride: 1},
{Lo: 0xa13, Hi: 0xa28, Stride: 1},
{Lo: 0xa2a, Hi: 0xa30, Stride: 1},
{Lo: 0xa32, Hi: 0xa33, Stride: 1},
{Lo: 0xa35, Hi: 0xa36, Stride: 1},
{Lo: 0xa38, Hi: 0xa39, Stride: 1},
{Lo: 0xa59, Hi: 0xa5c, Stride: 1},
{Lo: 0xa5e, Hi: 0xa5e, Stride: 1},
{Lo: 0xa72, Hi: 0xa74, Stride: 1},
{Lo: 0xa85, Hi: 0xa8b, Stride: 1},
{Lo: 0xa8d, Hi: 0xa8d, Stride: 1},
{Lo: 0xa8f, Hi: 0xa91, Stride: 1},
{Lo: 0xa93, Hi: 0xaa8, Stride: 1},
{Lo: 0xaaa, Hi: 0xab0, Stride: 1},
{Lo: 0xab2, Hi: 0xab3, Stride: 1},
{Lo: 0xab5, Hi: 0xab9, Stride: 1},
{Lo: 0xabd, Hi: 0xabd, Stride: 1},
{Lo: 0xad0, Hi: 0xad0, Stride: 1},
{Lo: 0xae0, Hi: 0xae0, Stride: 1},
{Lo: 0xb05, Hi: 0xb0c, Stride: 1},
{Lo: 0xb0f, Hi: 0xb10, Stride: 1},
{Lo: 0xb13, Hi: 0xb28, Stride: 1},
{Lo: 0xb2a, Hi: 0xb30, Stride: 1},
{Lo: 0xb32, Hi: 0xb33, Stride: 1},
{Lo: 0xb36, Hi: 0xb39, Stride: 1},
{Lo: 0xb3d, Hi: 0xb3d, Stride: 1},
{Lo: 0xb5c, Hi: 0xb5d, Stride: 1},
{Lo: 0xb5f, Hi: 0xb61, Stride: 1},
{Lo: 0xb85, Hi: 0xb8a, Stride: 1},
{Lo: 0xb8e, Hi: 0xb90, Stride: 1},
{Lo: 0xb92, Hi: 0xb95, Stride: 1},
{Lo: 0xb99, Hi: 0xb9a, Stride: 1},
{Lo: 0xb9c, Hi: 0xb9c, Stride: 1},
{Lo: 0xb9e, Hi: 0xb9f, Stride: 1},
{Lo: 0xba3, Hi: 0xba4, Stride: 1},
{Lo: 0xba8, Hi: 0xbaa, Stride: 1},
{Lo: 0xbae, Hi: 0xbb5, Stride: 1},
{Lo: 0xbb7, Hi: 0xbb9, Stride: 1},
{Lo: 0xc05, Hi: 0xc0c, Stride: 1},
{Lo: 0xc0e, Hi: 0xc10, Stride: 1},
{Lo: 0xc12, Hi: 0xc28, Stride: 1},
{Lo: 0xc2a, Hi: 0xc33, Stride: 1},
{Lo: 0xc35, Hi: 0xc39, Stride: 1},
{Lo: 0xc60, Hi: 0xc61, Stride: 1},
{Lo: 0xc85, Hi: 0xc8c, Stride: 1},
{Lo: 0xc8e, Hi: 0xc90, Stride: 1},
{Lo: 0xc92, Hi: 0xca8, Stride: 1},
{Lo: 0xcaa, Hi: 0xcb3, Stride: 1},
{Lo: 0xcb5, Hi: 0xcb9, Stride: 1},
{Lo: 0xcde, Hi: 0xcde, Stride: 1},
{Lo: 0xce0, Hi: 0xce1, Stride: 1},
{Lo: 0xd05, Hi: 0xd0c, Stride: 1},
{Lo: 0xd0e, Hi: 0xd10, Stride: 1},
{Lo: 0xd12, Hi: 0xd28, Stride: 1},
{Lo: 0xd2a, Hi: 0xd39, Stride: 1},
{Lo: 0xd60, Hi: 0xd61, Stride: 1},
{Lo: 0xd85, Hi: 0xd96, Stride: 1},
{Lo: 0xd9a, Hi: 0xdb1, Stride: 1},
{Lo: 0xdb3, Hi: 0xdbb, Stride: 1},
{Lo: 0xdbd, Hi: 0xdbd, Stride: 1},
{Lo: 0xdc0, Hi: 0xdc6, Stride: 1},
{Lo: 0xe01, Hi: 0xe30, Stride: 1},
{Lo: 0xe32, Hi: 0xe33, Stride: 1},
{Lo: 0xe40, Hi: 0xe46, Stride: 1},
{Lo: 0xe81, Hi: 0xe82, Stride: 1},
{Lo: 0xe84, Hi: 0xe84, Stride: 1},
{Lo: 0xe87, Hi: 0xe88, Stride: 1},
{Lo: 0xe8a, Hi: 0xe8a, Stride: 1},
{Lo: 0xe8d, Hi: 0xe8d, Stride: 1},
{Lo: 0xe94, Hi: 0xe97, Stride: 1},
{Lo: 0xe99, Hi: 0xe9f, Stride: 1},
{Lo: 0xea1, Hi: 0xea3, Stride: 1},
{Lo: 0xea5, Hi: 0xea5, Stride: 1},
{Lo: 0xea7, Hi: 0xea7, Stride: 1},
{Lo: 0xeaa, Hi: 0xeab, Stride: 1},
{Lo: 0xead, Hi: 0xeb0, Stride: 1},
{Lo: 0xeb2, Hi: 0xeb3, Stride: 1},
{Lo: 0xebd, Hi: 0xebd, Stride: 1},
{Lo: 0xec0, Hi: 0xec4, Stride: 1},
{Lo: 0xec6, Hi: 0xec6, Stride: 1},
{Lo: 0xedc, Hi: 0xedd, Stride: 1},
{Lo: 0xf00, Hi: 0xf00, Stride: 1},
{Lo: 0xf40, Hi: 0xf47, Stride: 1},
{Lo: 0xf49, Hi: 0xf6a, Stride: 1},
{Lo: 0xf88, Hi: 0xf8b, Stride: 1},
},
R32: []unicode.Range32{
{Lo: 0x1000, Hi: 0x1021, Stride: 1},
{Lo: 0x1023, Hi: 0x1027, Stride: 1},
{Lo: 0x1029, Hi: 0x102a, Stride: 1},
{Lo: 0x1050, Hi: 0x1055, Stride: 1},
{Lo: 0x10a0, Hi: 0x10c5, Stride: 1},
{Lo: 0x10d0, Hi: 0x10f6, Stride: 1},
{Lo: 0x1100, Hi: 0x1159, Stride: 1},
{Lo: 0x115f, Hi: 0x11a2, Stride: 1},
{Lo: 0x11a8, Hi: 0x11f9, Stride: 1},
{Lo: 0x1200, Hi: 0x1206, Stride: 1},
{Lo: 0x1208, Hi: 0x1246, Stride: 1},
{Lo: 0x1248, Hi: 0x1248, Stride: 1},
{Lo: 0x124a, Hi: 0x124d, Stride: 1},
{Lo: 0x1250, Hi: 0x1256, Stride: 1},
{Lo: 0x1258, Hi: 0x1258, Stride: 1},
{Lo: 0x125a, Hi: 0x125d, Stride: 1},
{Lo: 0x1260, Hi: 0x1286, Stride: 1},
{Lo: 0x1288, Hi: 0x1288, Stride: 1},
{Lo: 0x128a, Hi: 0x128d, Stride: 1},
{Lo: 0x1290, Hi: 0x12ae, Stride: 1},
{Lo: 0x12b0, Hi: 0x12b0, Stride: 1},
{Lo: 0x12b2, Hi: 0x12b5, Stride: 1},
{Lo: 0x12b8, Hi: 0x12be, Stride: 1},
{Lo: 0x12c0, Hi: 0x12c0, Stride: 1},
{Lo: 0x12c2, Hi: 0x12c5, Stride: 1},
{Lo: 0x12c8, Hi: 0x12ce, Stride: 1},
{Lo: 0x12d0, Hi: 0x12d6, Stride: 1},
{Lo: 0x12d8, Hi: 0x12ee, Stride: 1},
{Lo: 0x12f0, Hi: 0x130e, Stride: 1},
{Lo: 0x1310, Hi: 0x1310, Stride: 1},
{Lo: 0x1312, Hi: 0x1315, Stride: 1},
{Lo: 0x1318, Hi: 0x131e, Stride: 1},
{Lo: 0x1320, Hi: 0x1346, Stride: 1},
{Lo: 0x1348, Hi: 0x135a, Stride: 1},
{Lo: 0x13a0, Hi: 0x13f4, Stride: 1},
{Lo: 0x1401, Hi: 0x166c, Stride: 1},
{Lo: 0x166f, Hi: 0x1676, Stride: 1},
{Lo: 0x1681, Hi: 0x169a, Stride: 1},
{Lo: 0x16a0, Hi: 0x16ea, Stride: 1},
{Lo: 0x1780, Hi: 0x17b3, Stride: 1},
{Lo: 0x1820, Hi: 0x1877, Stride: 1},
{Lo: 0x1880, Hi: 0x18a8, Stride: 1},
{Lo: 0x1e00, Hi: 0x1e9b, Stride: 1},
{Lo: 0x1ea0, Hi: 0x1ef9, Stride: 1},
{Lo: 0x1f00, Hi: 0x1f15, Stride: 1},
{Lo: 0x1f18, Hi: 0x1f1d, Stride: 1},
{Lo: 0x1f20, Hi: 0x1f45, Stride: 1},
{Lo: 0x1f48, Hi: 0x1f4d, Stride: 1},
{Lo: 0x1f50, Hi: 0x1f57, Stride: 1},
{Lo: 0x1f59, Hi: 0x1f59, Stride: 1},
{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 1},
{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 1},
{Lo: 0x1f5f, Hi: 0x1f7d, Stride: 1},
{Lo: 0x1f80, Hi: 0x1fb4, Stride: 1},
{Lo: 0x1fb6, Hi: 0x1fbc, Stride: 1},
{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 1},
{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 1},
{Lo: 0x1fc6, Hi: 0x1fcc, Stride: 1},
{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 1},
{Lo: 0x1fd6, Hi: 0x1fdb, Stride: 1},
{Lo: 0x1fe0, Hi: 0x1fec, Stride: 1},
{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 1},
{Lo: 0x1ff6, Hi: 0x1ffc, Stride: 1},
{Lo: 0x207f, Hi: 0x207f, Stride: 1},
{Lo: 0x2102, Hi: 0x2102, Stride: 1},
{Lo: 0x2107, Hi: 0x2107, Stride: 1},
{Lo: 0x210a, Hi: 0x2113, Stride: 1},
{Lo: 0x2115, Hi: 0x2115, Stride: 1},
{Lo: 0x2119, Hi: 0x211d, Stride: 1},
{Lo: 0x2124, Hi: 0x2124, Stride: 1},
{Lo: 0x2126, Hi: 0x2126, Stride: 1},
{Lo: 0x2128, Hi: 0x2128, Stride: 1},
{Lo: 0x212a, Hi: 0x212d, Stride: 1},
{Lo: 0x212f, Hi: 0x2131, Stride: 1},
{Lo: 0x2133, Hi: 0x2139, Stride: 1},
{Lo: 0x3005, Hi: 0x3006, Stride: 1},
{Lo: 0x3031, Hi: 0x3035, Stride: 1},
{Lo: 0x3041, Hi: 0x3094, Stride: 1},
{Lo: 0x309d, Hi: 0x309e, Stride: 1},
{Lo: 0x30a1, Hi: 0x30fa, Stride: 1},
{Lo: 0x30fc, Hi: 0x30fe, Stride: 1},
{Lo: 0x3105, Hi: 0x312c, Stride: 1},
{Lo: 0x3131, Hi: 0x318e, Stride: 1},
{Lo: 0x31a0, Hi: 0x31b7, Stride: 1},
{Lo: 0x3400, Hi: 0x4db5, Stride: 1},
{Lo: 0x4e00, Hi: 0x9fa5, Stride: 1},
{Lo: 0xa000, Hi: 0xa48c, Stride: 1},
{Lo: 0xac00, Hi: 0xd7a3, Stride: 1},
{Lo: 0xf900, Hi: 0xfa2d, Stride: 1},
{Lo: 0xfb00, Hi: 0xfb06, Stride: 1},
{Lo: 0xfb13, Hi: 0xfb17, Stride: 1},
{Lo: 0xfb1d, Hi: 0xfb1d, Stride: 1},
{Lo: 0xfb1f, Hi: 0xfb28, Stride: 1},
{Lo: 0xfb2a, Hi: 0xfb36, Stride: 1},
{Lo: 0xfb38, Hi: 0xfb3c, Stride: 1},
{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 1},
{Lo: 0xfb40, Hi: 0xfb41, Stride: 1},
{Lo: 0xfb43, Hi: 0xfb44, Stride: 1},
{Lo: 0xfb46, Hi: 0xfbb1, Stride: 1},
{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 1},
{Lo: 0xfd50, Hi: 0xfd8f, Stride: 1},
{Lo: 0xfd92, Hi: 0xfdc7, Stride: 1},
{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 1},
{Lo: 0xfe70, Hi: 0xfe72, Stride: 1},
{Lo: 0xfe74, Hi: 0xfe74, Stride: 1},
{Lo: 0xfe76, Hi: 0xfefc, Stride: 1},
{Lo: 0xff21, Hi: 0xff3a, Stride: 1},
{Lo: 0xff41, Hi: 0xff5a, Stride: 1},
{Lo: 0xff66, Hi: 0xffbe, Stride: 1},
{Lo: 0xffc2, Hi: 0xffc7, Stride: 1},
{Lo: 0xffca, Hi: 0xffcf, Stride: 1},
{Lo: 0xffd2, Hi: 0xffd7, Stride: 1},
{Lo: 0xffda, Hi: 0xffdc, Stride: 1},
},
}
var idContinueES5AndESNext = &unicode.RangeTable{
LatinOffset: 128,
R16: []unicode.Range16{
{Lo: 0x30, Hi: 0x39, Stride: 1},
{Lo: 0x41, Hi: 0x5a, Stride: 1},
{Lo: 0x5f, Hi: 0x5f, Stride: 1},
{Lo: 0x61, Hi: 0x7a, Stride: 1},
{Lo: 0xaa, Hi: 0xaa, Stride: 1},
{Lo: 0xb5, Hi: 0xb5, Stride: 1},
{Lo: 0xba, Hi: 0xba, Stride: 1},
{Lo: 0xc0, Hi: 0xd6, Stride: 1},
{Lo: 0xd8, Hi: 0xf6, Stride: 1},
{Lo: 0xf8, Hi: 0x21f, Stride: 1},
{Lo: 0x222, Hi: 0x233, Stride: 1},
{Lo: 0x250, Hi: 0x2ad, Stride: 1},
{Lo: 0x2b0, Hi: 0x2b8, Stride: 1},
{Lo: 0x2bb, Hi: 0x2c1, Stride: 1},
{Lo: 0x2d0, Hi: 0x2d1, Stride: 1},
{Lo: 0x2e0, Hi: 0x2e4, Stride: 1},
{Lo: 0x2ee, Hi: 0x2ee, Stride: 1},
{Lo: 0x300, Hi: 0x34e, Stride: 1},
{Lo: 0x360, Hi: 0x362, Stride: 1},
{Lo: 0x37a, Hi: 0x37a, Stride: 1},
{Lo: 0x386, Hi: 0x386, Stride: 1},
{Lo: 0x388, Hi: 0x38a, Stride: 1},
{Lo: 0x38c, Hi: 0x38c, Stride: 1},
{Lo: 0x38e, Hi: 0x3a1, Stride: 1},
{Lo: 0x3a3, Hi: 0x3ce, Stride: 1},
{Lo: 0x3d0, Hi: 0x3d7, Stride: 1},
{Lo: 0x3da, Hi: 0x3f3, Stride: 1},
{Lo: 0x400, Hi: 0x481, Stride: 1},
{Lo: 0x483, Hi: 0x486, Stride: 1},
{Lo: 0x48c, Hi: 0x4c4, Stride: 1},
{Lo: 0x4c7, Hi: 0x4c8, Stride: 1},
{Lo: 0x4cb, Hi: 0x4cc, Stride: 1},
{Lo: 0x4d0, Hi: 0x4f5, Stride: 1},
{Lo: 0x4f8, Hi: 0x4f9, Stride: 1},
{Lo: 0x531, Hi: 0x556, Stride: 1},
{Lo: 0x559, Hi: 0x559, Stride: 1},
{Lo: 0x561, Hi: 0x587, Stride: 1},
{Lo: 0x591, Hi: 0x5a1, Stride: 1},
{Lo: 0x5a3, Hi: 0x5b9, Stride: 1},
{Lo: 0x5bb, Hi: 0x5bd, Stride: 1},
{Lo: 0x5bf, Hi: 0x5bf, Stride: 1},
{Lo: 0x5c1, Hi: 0x5c2, Stride: 1},
{Lo: 0x5c4, Hi: 0x5c4, Stride: 1},
{Lo: 0x5d0, Hi: 0x5ea, Stride: 1},
{Lo: 0x5f0, Hi: 0x5f2, Stride: 1},
{Lo: 0x621, Hi: 0x63a, Stride: 1},
{Lo: 0x640, Hi: 0x655, Stride: 1},
{Lo: 0x660, Hi: 0x669, Stride: 1},
{Lo: 0x670, Hi: 0x6d3, Stride: 1},
{Lo: 0x6d5, Hi: 0x6dc, Stride: 1},
{Lo: 0x6df, Hi: 0x6e8, Stride: 1},
{Lo: 0x6ea, Hi: 0x6ed, Stride: 1},
{Lo: 0x6f0, Hi: 0x6fc, Stride: 1},
{Lo: 0x710, Hi: 0x72c, Stride: 1},
{Lo: 0x730, Hi: 0x74a, Stride: 1},
{Lo: 0x780, Hi: 0x7b0, Stride: 1},
{Lo: 0x901, Hi: 0x903, Stride: 1},
{Lo: 0x905, Hi: 0x939, Stride: 1},
{Lo: 0x93c, Hi: 0x94d, Stride: 1},
{Lo: 0x950, Hi: 0x954, Stride: 1},
{Lo: 0x958, Hi: 0x963, Stride: 1},
{Lo: 0x966, Hi: 0x96f, Stride: 1},
{Lo: 0x981, Hi: 0x983, Stride: 1},
{Lo: 0x985, Hi: 0x98c, Stride: 1},
{Lo: 0x98f, Hi: 0x990, Stride: 1},
{Lo: 0x993, Hi: 0x9a8, Stride: 1},
{Lo: 0x9aa, Hi: 0x9b0, Stride: 1},
{Lo: 0x9b2, Hi: 0x9b2, Stride: 1},
{Lo: 0x9b6, Hi: 0x9b9, Stride: 1},
{Lo: 0x9bc, Hi: 0x9bc, Stride: 1},
{Lo: 0x9be, Hi: 0x9c4, Stride: 1},
{Lo: 0x9c7, Hi: 0x9c8, Stride: 1},
{Lo: 0x9cb, Hi: 0x9cd, Stride: 1},
{Lo: 0x9d7, Hi: 0x9d7, Stride: 1},
{Lo: 0x9dc, Hi: 0x9dd, Stride: 1},
{Lo: 0x9df, Hi: 0x9e3, Stride: 1},
{Lo: 0x9e6, Hi: 0x9f1, Stride: 1},
{Lo: 0xa02, Hi: 0xa02, Stride: 1},
{Lo: 0xa05, Hi: 0xa0a, Stride: 1},
{Lo: 0xa0f, Hi: 0xa10, Stride: 1},
{Lo: 0xa13, Hi: 0xa28, Stride: 1},
{Lo: 0xa2a, Hi: 0xa30, Stride: 1},
{Lo: 0xa32, Hi: 0xa33, Stride: 1},
{Lo: 0xa35, Hi: 0xa36, Stride: 1},
{Lo: 0xa38, Hi: 0xa39, Stride: 1},
{Lo: 0xa3c, Hi: 0xa3c, Stride: 1},
{Lo: 0xa3e, Hi: 0xa42, Stride: 1},
{Lo: 0xa47, Hi: 0xa48, Stride: 1},
{Lo: 0xa4b, Hi: 0xa4d, Stride: 1},
{Lo: 0xa59, Hi: 0xa5c, Stride: 1},
{Lo: 0xa5e, Hi: 0xa5e, Stride: 1},
{Lo: 0xa66, Hi: 0xa74, Stride: 1},
{Lo: 0xa81, Hi: 0xa83, Stride: 1},
{Lo: 0xa85, Hi: 0xa8b, Stride: 1},
{Lo: 0xa8d, Hi: 0xa8d, Stride: 1},
{Lo: 0xa8f, Hi: 0xa91, Stride: 1},
{Lo: 0xa93, Hi: 0xaa8, Stride: 1},
{Lo: 0xaaa, Hi: 0xab0, Stride: 1},
{Lo: 0xab2, Hi: 0xab3, Stride: 1},
{Lo: 0xab5, Hi: 0xab9, Stride: 1},
{Lo: 0xabc, Hi: 0xac5, Stride: 1},
{Lo: 0xac7, Hi: 0xac9, Stride: 1},
{Lo: 0xacb, Hi: 0xacd, Stride: 1},
{Lo: 0xad0, Hi: 0xad0, Stride: 1},
{Lo: 0xae0, Hi: 0xae0, Stride: 1},
{Lo: 0xae6, Hi: 0xaef, Stride: 1},
{Lo: 0xb01, Hi: 0xb03, Stride: 1},
{Lo: 0xb05, Hi: 0xb0c, Stride: 1},
{Lo: 0xb0f, Hi: 0xb10, Stride: 1},
{Lo: 0xb13, Hi: 0xb28, Stride: 1},
{Lo: 0xb2a, Hi: 0xb30, Stride: 1},
{Lo: 0xb32, Hi: 0xb33, Stride: 1},
{Lo: 0xb36, Hi: 0xb39, Stride: 1},
{Lo: 0xb3c, Hi: 0xb43, Stride: 1},
{Lo: 0xb47, Hi: 0xb48, Stride: 1},
{Lo: 0xb4b, Hi: 0xb4d, Stride: 1},
{Lo: 0xb56, Hi: 0xb57, Stride: 1},
{Lo: 0xb5c, Hi: 0xb5d, Stride: 1},
{Lo: 0xb5f, Hi: 0xb61, Stride: 1},
{Lo: 0xb66, Hi: 0xb6f, Stride: 1},
{Lo: 0xb82, Hi: 0xb83, Stride: 1},
{Lo: 0xb85, Hi: 0xb8a, Stride: 1},
{Lo: 0xb8e, Hi: 0xb90, Stride: 1},
{Lo: 0xb92, Hi: 0xb95, Stride: 1},
{Lo: 0xb99, Hi: 0xb9a, Stride: 1},
{Lo: 0xb9c, Hi: 0xb9c, Stride: 1},
{Lo: 0xb9e, Hi: 0xb9f, Stride: 1},
{Lo: 0xba3, Hi: 0xba4, Stride: 1},
{Lo: 0xba8, Hi: 0xbaa, Stride: 1},
{Lo: 0xbae, Hi: 0xbb5, Stride: 1},
{Lo: 0xbb7, Hi: 0xbb9, Stride: 1},
{Lo: 0xbbe, Hi: 0xbc2, Stride: 1},
{Lo: 0xbc6, Hi: 0xbc8, Stride: 1},
{Lo: 0xbca, Hi: 0xbcd, Stride: 1},
{Lo: 0xbd7, Hi: 0xbd7, Stride: 1},
{Lo: 0xbe7, Hi: 0xbef, Stride: 1},
{Lo: 0xc01, Hi: 0xc03, Stride: 1},
{Lo: 0xc05, Hi: 0xc0c, Stride: 1},
{Lo: 0xc0e, Hi: 0xc10, Stride: 1},
{Lo: 0xc12, Hi: 0xc28, Stride: 1},
{Lo: 0xc2a, Hi: 0xc33, Stride: 1},
{Lo: 0xc35, Hi: 0xc39, Stride: 1},
{Lo: 0xc3e, Hi: 0xc44, Stride: 1},
{Lo: 0xc46, Hi: 0xc48, Stride: 1},
{Lo: 0xc4a, Hi: 0xc4d, Stride: 1},
{Lo: 0xc55, Hi: 0xc56, Stride: 1},
{Lo: 0xc60, Hi: 0xc61, Stride: 1},
{Lo: 0xc66, Hi: 0xc6f, Stride: 1},
{Lo: 0xc82, Hi: 0xc83, Stride: 1},
{Lo: 0xc85, Hi: 0xc8c, Stride: 1},
{Lo: 0xc8e, Hi: 0xc90, Stride: 1},
{Lo: 0xc92, Hi: 0xca8, Stride: 1},
{Lo: 0xcaa, Hi: 0xcb3, Stride: 1},
{Lo: 0xcb5, Hi: 0xcb9, Stride: 1},
{Lo: 0xcbe, Hi: 0xcc4, Stride: 1},
{Lo: 0xcc6, Hi: 0xcc8, Stride: 1},
{Lo: 0xcca, Hi: 0xccd, Stride: 1},
{Lo: 0xcd5, Hi: 0xcd6, Stride: 1},
{Lo: 0xcde, Hi: 0xcde, Stride: 1},
{Lo: 0xce0, Hi: 0xce1, Stride: 1},
{Lo: 0xce6, Hi: 0xcef, Stride: 1},
{Lo: 0xd02, Hi: 0xd03, Stride: 1},
{Lo: 0xd05, Hi: 0xd0c, Stride: 1},
{Lo: 0xd0e, Hi: 0xd10, Stride: 1},
{Lo: 0xd12, Hi: 0xd28, Stride: 1},
{Lo: 0xd2a, Hi: 0xd39, Stride: 1},
{Lo: 0xd3e, Hi: 0xd43, Stride: 1},
{Lo: 0xd46, Hi: 0xd48, Stride: 1},
{Lo: 0xd4a, Hi: 0xd4d, Stride: 1},
{Lo: 0xd57, Hi: 0xd57, Stride: 1},
{Lo: 0xd60, Hi: 0xd61, Stride: 1},
{Lo: 0xd66, Hi: 0xd6f, Stride: 1},
{Lo: 0xd82, Hi: 0xd83, Stride: 1},
{Lo: 0xd85, Hi: 0xd96, Stride: 1},
{Lo: 0xd9a, Hi: 0xdb1, Stride: 1},
{Lo: 0xdb3, Hi: 0xdbb, Stride: 1},
{Lo: 0xdbd, Hi: 0xdbd, Stride: 1},
{Lo: 0xdc0, Hi: 0xdc6, Stride: 1},
{Lo: 0xdca, Hi: 0xdca, Stride: 1},
{Lo: 0xdcf, Hi: 0xdd4, Stride: 1},
{Lo: 0xdd6, Hi: 0xdd6, Stride: 1},
{Lo: 0xdd8, Hi: 0xddf, Stride: 1},
{Lo: 0xdf2, Hi: 0xdf3, Stride: 1},
{Lo: 0xe01, Hi: 0xe3a, Stride: 1},
{Lo: 0xe40, Hi: 0xe4e, Stride: 1},
{Lo: 0xe50, Hi: 0xe59, Stride: 1},
{Lo: 0xe81, Hi: 0xe82, Stride: 1},
{Lo: 0xe84, Hi: 0xe84, Stride: 1},
{Lo: 0xe87, Hi: 0xe88, Stride: 1},
{Lo: 0xe8a, Hi: 0xe8a, Stride: 1},
{Lo: 0xe8d, Hi: 0xe8d, Stride: 1},
{Lo: 0xe94, Hi: 0xe97, Stride: 1},
{Lo: 0xe99, Hi: 0xe9f, Stride: 1},
{Lo: 0xea1, Hi: 0xea3, Stride: 1},
{Lo: 0xea5, Hi: 0xea5, Stride: 1},
{Lo: 0xea7, Hi: 0xea7, Stride: 1},
{Lo: 0xeaa, Hi: 0xeab, Stride: 1},
{Lo: 0xead, Hi: 0xeb9, Stride: 1},
{Lo: 0xebb, Hi: 0xebd, Stride: 1},
{Lo: 0xec0, Hi: 0xec4, Stride: 1},
{Lo: 0xec6, Hi: 0xec6, Stride: 1},
{Lo: 0xec8, Hi: 0xecd, Stride: 1},
{Lo: 0xed0, Hi: 0xed9, Stride: 1},
{Lo: 0xedc, Hi: 0xedd, Stride: 1},
{Lo: 0xf00, Hi: 0xf00, Stride: 1},
{Lo: 0xf18, Hi: 0xf19, Stride: 1},
{Lo: 0xf20, Hi: 0xf29, Stride: 1},
{Lo: 0xf35, Hi: 0xf35, Stride: 1},
{Lo: 0xf37, Hi: 0xf37, Stride: 1},
{Lo: 0xf39, Hi: 0xf39, Stride: 1},
{Lo: 0xf3e, Hi: 0xf47, Stride: 1},
{Lo: 0xf49, Hi: 0xf6a, Stride: 1},
{Lo: 0xf71, Hi: 0xf84, Stride: 1},
{Lo: 0xf86, Hi: 0xf8b, Stride: 1},
{Lo: 0xf90, Hi: 0xf97, Stride: 1},
{Lo: 0xf99, Hi: 0xfbc, Stride: 1},
{Lo: 0xfc6, Hi: 0xfc6, Stride: 1},
},
R32: []unicode.Range32{
{Lo: 0x1000, Hi: 0x1021, Stride: 1},
{Lo: 0x1023, Hi: 0x1027, Stride: 1},
{Lo: 0x1029, Hi: 0x102a, Stride: 1},
{Lo: 0x102c, Hi: 0x1032, Stride: 1},
{Lo: 0x1036, Hi: 0x1039, Stride: 1},
{Lo: 0x1040, Hi: 0x1049, Stride: 1},
{Lo: 0x1050, Hi: 0x1059, Stride: 1},
{Lo: 0x10a0, Hi: 0x10c5, Stride: 1},
{Lo: 0x10d0, Hi: 0x10f6, Stride: 1},
{Lo: 0x1100, Hi: 0x1159, Stride: 1},
{Lo: 0x115f, Hi: 0x11a2, Stride: 1},
{Lo: 0x11a8, Hi: 0x11f9, Stride: 1},
{Lo: 0x1200, Hi: 0x1206, Stride: 1},
{Lo: 0x1208, Hi: 0x1246, Stride: 1},
{Lo: 0x1248, Hi: 0x1248, Stride: 1},
{Lo: 0x124a, Hi: 0x124d, Stride: 1},
{Lo: 0x1250, Hi: 0x1256, Stride: 1},
{Lo: 0x1258, Hi: 0x1258, Stride: 1},
{Lo: 0x125a, Hi: 0x125d, Stride: 1},
{Lo: 0x1260, Hi: 0x1286, Stride: 1},
{Lo: 0x1288, Hi: 0x1288, Stride: 1},
{Lo: 0x128a, Hi: 0x128d, Stride: 1},
{Lo: 0x1290, Hi: 0x12ae, Stride: 1},
{Lo: 0x12b0, Hi: 0x12b0, Stride: 1},
{Lo: 0x12b2, Hi: 0x12b5, Stride: 1},
{Lo: 0x12b8, Hi: 0x12be, Stride: 1},
{Lo: 0x12c0, Hi: 0x12c0, Stride: 1},
{Lo: 0x12c2, Hi: 0x12c5, Stride: 1},
{Lo: 0x12c8, Hi: 0x12ce, Stride: 1},
{Lo: 0x12d0, Hi: 0x12d6, Stride: 1},
{Lo: 0x12d8, Hi: 0x12ee, Stride: 1},
{Lo: 0x12f0, Hi: 0x130e, Stride: 1},
{Lo: 0x1310, Hi: 0x1310, Stride: 1},
{Lo: 0x1312, Hi: 0x1315, Stride: 1},
{Lo: 0x1318, Hi: 0x131e, Stride: 1},
{Lo: 0x1320, Hi: 0x1346, Stride: 1},
{Lo: 0x1348, Hi: 0x135a, Stride: 1},
{Lo: 0x1369, Hi: 0x1371, Stride: 1},
{Lo: 0x13a0, Hi: 0x13f4, Stride: 1},
{Lo: 0x1401, Hi: 0x166c, Stride: 1},
{Lo: 0x166f, Hi: 0x1676, Stride: 1},
{Lo: 0x1681, Hi: 0x169a, Stride: 1},
{Lo: 0x16a0, Hi: 0x16ea, Stride: 1},
{Lo: 0x1780, Hi: 0x17d3, Stride: 1},
{Lo: 0x17e0, Hi: 0x17e9, Stride: 1},
{Lo: 0x1810, Hi: 0x1819, Stride: 1},
{Lo: 0x1820, Hi: 0x1877, Stride: 1},
{Lo: 0x1880, Hi: 0x18a9, Stride: 1},
{Lo: 0x1e00, Hi: 0x1e9b, Stride: 1},
{Lo: 0x1ea0, Hi: 0x1ef9, Stride: 1},
{Lo: 0x1f00, Hi: 0x1f15, Stride: 1},
{Lo: 0x1f18, Hi: 0x1f1d, Stride: 1},
{Lo: 0x1f20, Hi: 0x1f45, Stride: 1},
{Lo: 0x1f48, Hi: 0x1f4d, Stride: 1},
{Lo: 0x1f50, Hi: 0x1f57, Stride: 1},
{Lo: 0x1f59, Hi: 0x1f59, Stride: 1},
{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 1},
{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 1},
{Lo: 0x1f5f, Hi: 0x1f7d, Stride: 1},
{Lo: 0x1f80, Hi: 0x1fb4, Stride: 1},
{Lo: 0x1fb6, Hi: 0x1fbc, Stride: 1},
{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 1},
{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 1},
{Lo: 0x1fc6, Hi: 0x1fcc, Stride: 1},
{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 1},
{Lo: 0x1fd6, Hi: 0x1fdb, Stride: 1},
{Lo: 0x1fe0, Hi: 0x1fec, Stride: 1},
{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 1},
{Lo: 0x1ff6, Hi: 0x1ffc, Stride: 1},
{Lo: 0x203f, Hi: 0x2040, Stride: 1},
{Lo: 0x207f, Hi: 0x207f, Stride: 1},
{Lo: 0x20d0, Hi: 0x20dc, Stride: 1},
{Lo: 0x20e1, Hi: 0x20e1, Stride: 1},
{Lo: 0x2102, Hi: 0x2102, Stride: 1},
{Lo: 0x2107, Hi: 0x2107, Stride: 1},
{Lo: 0x210a, Hi: 0x2113, Stride: 1},
{Lo: 0x2115, Hi: 0x2115, Stride: 1},
{Lo: 0x2119, Hi: 0x211d, Stride: 1},
{Lo: 0x2124, Hi: 0x2124, Stride: 1},
{Lo: 0x2126, Hi: 0x2126, Stride: 1},
{Lo: 0x2128, Hi: 0x2128, Stride: 1},
{Lo: 0x212a, Hi: 0x212d, Stride: 1},
{Lo: 0x212f, Hi: 0x2131, Stride: 1},
{Lo: 0x2133, Hi: 0x2139, Stride: 1},
{Lo: 0x3005, Hi: 0x3006, Stride: 1},
{Lo: 0x302a, Hi: 0x302f, Stride: 1},
{Lo: 0x3031, Hi: 0x3035, Stride: 1},
{Lo: 0x3041, Hi: 0x3094, Stride: 1},
{Lo: 0x3099, Hi: 0x309a, Stride: 1},
{Lo: 0x309d, Hi: 0x309e, Stride: 1},
{Lo: 0x30a1, Hi: 0x30fa, Stride: 1},
{Lo: 0x30fc, Hi: 0x30fe, Stride: 1},
{Lo: 0x3105, Hi: 0x312c, Stride: 1},
{Lo: 0x3131, Hi: 0x318e, Stride: 1},
{Lo: 0x31a0, Hi: 0x31b7, Stride: 1},
{Lo: 0x3400, Hi: 0x4db5, Stride: 1},
{Lo: 0x4e00, Hi: 0x9fa5, Stride: 1},
{Lo: 0xa000, Hi: 0xa48c, Stride: 1},
{Lo: 0xac00, Hi: 0xd7a3, Stride: 1},
{Lo: 0xf900, Hi: 0xfa2d, Stride: 1},
{Lo: 0xfb00, Hi: 0xfb06, Stride: 1},
{Lo: 0xfb13, Hi: 0xfb17, Stride: 1},
{Lo: 0xfb1d, Hi: 0xfb28, Stride: 1},
{Lo: 0xfb2a, Hi: 0xfb36, Stride: 1},
{Lo: 0xfb38, Hi: 0xfb3c, Stride: 1},
{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 1},
{Lo: 0xfb40, Hi: 0xfb41, Stride: 1},
{Lo: 0xfb43, Hi: 0xfb44, Stride: 1},
{Lo: 0xfb46, Hi: 0xfbb1, Stride: 1},
{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 1},
{Lo: 0xfd50, Hi: 0xfd8f, Stride: 1},
{Lo: 0xfd92, Hi: 0xfdc7, Stride: 1},
{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 1},
{Lo: 0xfe20, Hi: 0xfe23, Stride: 1},
{Lo: 0xfe33, Hi: 0xfe34, Stride: 1},
{Lo: 0xfe4d, Hi: 0xfe4f, Stride: 1},
{Lo: 0xfe70, Hi: 0xfe72, Stride: 1},
{Lo: 0xfe74, Hi: 0xfe74, Stride: 1},
{Lo: 0xfe76, Hi: 0xfefc, Stride: 1},
{Lo: 0xff10, Hi: 0xff19, Stride: 1},
{Lo: 0xff21, Hi: 0xff3a, Stride: 1},
{Lo: 0xff3f, Hi: 0xff3f, Stride: 1},
{Lo: 0xff41, Hi: 0xff5a, Stride: 1},
{Lo: 0xff66, Hi: 0xffbe, Stride: 1},
{Lo: 0xffc2, Hi: 0xffc7, Stride: 1},
{Lo: 0xffca, Hi: 0xffcf, Stride: 1},
{Lo: 0xffd2, Hi: 0xffd7, Stride: 1},
{Lo: 0xffda, Hi: 0xffdc, Stride: 1},
},
}
var idStartES5OrESNext = &unicode.RangeTable{
LatinOffset: 117,
R16: []unicode.Range16{
{Lo: 0x41, Hi: 0x5a, Stride: 1},
{Lo: 0x61, Hi: 0x7a, Stride: 1},
{Lo: 0xaa, Hi: 0xaa, Stride: 1},
{Lo: 0xb5, Hi: 0xb5, Stride: 1},
{Lo: 0xba, Hi: 0xba, Stride: 1},
{Lo: 0xc0, Hi: 0xd6, Stride: 1},
{Lo: 0xd8, Hi: 0xf6, Stride: 1},
{Lo: 0xf8, Hi: 0x2c1, Stride: 1},
{Lo: 0x2c6, Hi: 0x2d1, Stride: 1},
{Lo: 0x2e0, Hi: 0x2e4, Stride: 1},
{Lo: 0x2ec, Hi: 0x2ec, Stride: 1},
{Lo: 0x2ee, Hi: 0x2ee, Stride: 1},
{Lo: 0x370, Hi: 0x374, Stride: 1},
{Lo: 0x376, Hi: 0x377, Stride: 1},
{Lo: 0x37a, Hi: 0x37d, Stride: 1},
{Lo: 0x37f, Hi: 0x37f, Stride: 1},
{Lo: 0x386, Hi: 0x386, Stride: 1},
{Lo: 0x388, Hi: 0x38a, Stride: 1},
{Lo: 0x38c, Hi: 0x38c, Stride: 1},
{Lo: 0x38e, Hi: 0x3a1, Stride: 1},
{Lo: 0x3a3, Hi: 0x3f5, Stride: 1},
{Lo: 0x3f7, Hi: 0x481, Stride: 1},
{Lo: 0x48a, Hi: 0x52f, Stride: 1},
{Lo: 0x531, Hi: 0x556, Stride: 1},
{Lo: 0x559, Hi: 0x559, Stride: 1},
{Lo: 0x560, Hi: 0x588, Stride: 1},
{Lo: 0x5d0, Hi: 0x5ea, Stride: 1},
{Lo: 0x5ef, Hi: 0x5f2, Stride: 1},
{Lo: 0x620, Hi: 0x64a, Stride: 1},
{Lo: 0x66e, Hi: 0x66f, Stride: 1},
{Lo: 0x671, Hi: 0x6d3, Stride: 1},
{Lo: 0x6d5, Hi: 0x6d5, Stride: 1},
{Lo: 0x6e5, Hi: 0x6e6, Stride: 1},
{Lo: 0x6ee, Hi: 0x6ef, Stride: 1},
{Lo: 0x6fa, Hi: 0x6fc, Stride: 1},
{Lo: 0x6ff, Hi: 0x6ff, Stride: 1},
{Lo: 0x710, Hi: 0x710, Stride: 1},
{Lo: 0x712, Hi: 0x72f, Stride: 1},
{Lo: 0x74d, Hi: 0x7a5, Stride: 1},
{Lo: 0x7b1, Hi: 0x7b1, Stride: 1},
{Lo: 0x7ca, Hi: 0x7ea, Stride: 1},
{Lo: 0x7f4, Hi: 0x7f5, Stride: 1},
{Lo: 0x7fa, Hi: 0x7fa, Stride: 1},
{Lo: 0x800, Hi: 0x815, Stride: 1},
{Lo: 0x81a, Hi: 0x81a, Stride: 1},
{Lo: 0x824, Hi: 0x824, Stride: 1},
{Lo: 0x828, Hi: 0x828, Stride: 1},
{Lo: 0x840, Hi: 0x858, Stride: 1},
{Lo: 0x860, Hi: 0x86a, Stride: 1},
{Lo: 0x870, Hi: 0x887, Stride: 1},
{Lo: 0x889, Hi: 0x88e, Stride: 1},
{Lo: 0x8a0, Hi: 0x8c9, Stride: 1},
{Lo: 0x904, Hi: 0x939, Stride: 1},
{Lo: 0x93d, Hi: 0x93d, Stride: 1},
{Lo: 0x950, Hi: 0x950, Stride: 1},
{Lo: 0x958, Hi: 0x961, Stride: 1},
{Lo: 0x971, Hi: 0x980, Stride: 1},
{Lo: 0x985, Hi: 0x98c, Stride: 1},
{Lo: 0x98f, Hi: 0x990, Stride: 1},
{Lo: 0x993, Hi: 0x9a8, Stride: 1},
{Lo: 0x9aa, Hi: 0x9b0, Stride: 1},
{Lo: 0x9b2, Hi: 0x9b2, Stride: 1},
{Lo: 0x9b6, Hi: 0x9b9, Stride: 1},
{Lo: 0x9bd, Hi: 0x9bd, Stride: 1},
{Lo: 0x9ce, Hi: 0x9ce, Stride: 1},
{Lo: 0x9dc, Hi: 0x9dd, Stride: 1},
{Lo: 0x9df, Hi: 0x9e1, Stride: 1},
{Lo: 0x9f0, Hi: 0x9f1, Stride: 1},
{Lo: 0x9fc, Hi: 0x9fc, Stride: 1},
{Lo: 0xa05, Hi: 0xa0a, Stride: 1},
{Lo: 0xa0f, Hi: 0xa10, Stride: 1},
{Lo: 0xa13, Hi: 0xa28, Stride: 1},
{Lo: 0xa2a, Hi: 0xa30, Stride: 1},
{Lo: 0xa32, Hi: 0xa33, Stride: 1},
{Lo: 0xa35, Hi: 0xa36, Stride: 1},
{Lo: 0xa38, Hi: 0xa39, Stride: 1},
{Lo: 0xa59, Hi: 0xa5c, Stride: 1},
{Lo: 0xa5e, Hi: 0xa5e, Stride: 1},
{Lo: 0xa72, Hi: 0xa74, Stride: 1},
{Lo: 0xa85, Hi: 0xa8d, Stride: 1},
{Lo: 0xa8f, Hi: 0xa91, Stride: 1},
{Lo: 0xa93, Hi: 0xaa8, Stride: 1},
{Lo: 0xaaa, Hi: 0xab0, Stride: 1},
{Lo: 0xab2, Hi: 0xab3, Stride: 1},
{Lo: 0xab5, Hi: 0xab9, Stride: 1},
{Lo: 0xabd, Hi: 0xabd, Stride: 1},
{Lo: 0xad0, Hi: 0xad0, Stride: 1},
{Lo: 0xae0, Hi: 0xae1, Stride: 1},
{Lo: 0xaf9, Hi: 0xaf9, Stride: 1},
{Lo: 0xb05, Hi: 0xb0c, Stride: 1},
{Lo: 0xb0f, Hi: 0xb10, Stride: 1},
{Lo: 0xb13, Hi: 0xb28, Stride: 1},
{Lo: 0xb2a, Hi: 0xb30, Stride: 1},
{Lo: 0xb32, Hi: 0xb33, Stride: 1},
{Lo: 0xb35, Hi: 0xb39, Stride: 1},
{Lo: 0xb3d, Hi: 0xb3d, Stride: 1},
{Lo: 0xb5c, Hi: 0xb5d, Stride: 1},
{Lo: 0xb5f, Hi: 0xb61, Stride: 1},
{Lo: 0xb71, Hi: 0xb71, Stride: 1},
{Lo: 0xb83, Hi: 0xb83, Stride: 1},
{Lo: 0xb85, Hi: 0xb8a, Stride: 1},
{Lo: 0xb8e, Hi: 0xb90, Stride: 1},
{Lo: 0xb92, Hi: 0xb95, Stride: 1},
{Lo: 0xb99, Hi: 0xb9a, Stride: 1},
{Lo: 0xb9c, Hi: 0xb9c, Stride: 1},
{Lo: 0xb9e, Hi: 0xb9f, Stride: 1},
{Lo: 0xba3, Hi: 0xba4, Stride: 1},
{Lo: 0xba8, Hi: 0xbaa, Stride: 1},
{Lo: 0xbae, Hi: 0xbb9, Stride: 1},
{Lo: 0xbd0, Hi: 0xbd0, Stride: 1},
{Lo: 0xc05, Hi: 0xc0c, Stride: 1},
{Lo: 0xc0e, Hi: 0xc10, Stride: 1},
{Lo: 0xc12, Hi: 0xc28, Stride: 1},
{Lo: 0xc2a, Hi: 0xc39, Stride: 1},
{Lo: 0xc3d, Hi: 0xc3d, Stride: 1},
{Lo: 0xc58, Hi: 0xc5a, Stride: 1},
{Lo: 0xc5d, Hi: 0xc5d, Stride: 1},
{Lo: 0xc60, Hi: 0xc61, Stride: 1},
{Lo: 0xc80, Hi: 0xc80, Stride: 1},
{Lo: 0xc85, Hi: 0xc8c, Stride: 1},
{Lo: 0xc8e, Hi: 0xc90, Stride: 1},
{Lo: 0xc92, Hi: 0xca8, Stride: 1},
{Lo: 0xcaa, Hi: 0xcb3, Stride: 1},
{Lo: 0xcb5, Hi: 0xcb9, Stride: 1},
{Lo: 0xcbd, Hi: 0xcbd, Stride: 1},
{Lo: 0xcdd, Hi: 0xcde, Stride: 1},
{Lo: 0xce0, Hi: 0xce1, Stride: 1},
{Lo: 0xcf1, Hi: 0xcf2, Stride: 1},
{Lo: 0xd04, Hi: 0xd0c, Stride: 1},
{Lo: 0xd0e, Hi: 0xd10, Stride: 1},
{Lo: 0xd12, Hi: 0xd3a, Stride: 1},
{Lo: 0xd3d, Hi: 0xd3d, Stride: 1},
{Lo: 0xd4e, Hi: 0xd4e, Stride: 1},
{Lo: 0xd54, Hi: 0xd56, Stride: 1},
{Lo: 0xd5f, Hi: 0xd61, Stride: 1},
{Lo: 0xd7a, Hi: 0xd7f, Stride: 1},
{Lo: 0xd85, Hi: 0xd96, Stride: 1},
{Lo: 0xd9a, Hi: 0xdb1, Stride: 1},
{Lo: 0xdb3, Hi: 0xdbb, Stride: 1},
{Lo: 0xdbd, Hi: 0xdbd, Stride: 1},
{Lo: 0xdc0, Hi: 0xdc6, Stride: 1},
{Lo: 0xe01, Hi: 0xe30, Stride: 1},
{Lo: 0xe32, Hi: 0xe33, Stride: 1},
{Lo: 0xe40, Hi: 0xe46, Stride: 1},
{Lo: 0xe81, Hi: 0xe82, Stride: 1},
{Lo: 0xe84, Hi: 0xe84, Stride: 1},
{Lo: 0xe86, Hi: 0xe8a, Stride: 1},
{Lo: 0xe8c, Hi: 0xea3, Stride: 1},
{Lo: 0xea5, Hi: 0xea5, Stride: 1},
{Lo: 0xea7, Hi: 0xeb0, Stride: 1},
{Lo: 0xeb2, Hi: 0xeb3, Stride: 1},
{Lo: 0xebd, Hi: 0xebd, Stride: 1},
{Lo: 0xec0, Hi: 0xec4, Stride: 1},
{Lo: 0xec6, Hi: 0xec6, Stride: 1},
{Lo: 0xedc, Hi: 0xedf, Stride: 1},
{Lo: 0xf00, Hi: 0xf00, Stride: 1},
{Lo: 0xf40, Hi: 0xf47, Stride: 1},
{Lo: 0xf49, Hi: 0xf6c, Stride: 1},
{Lo: 0xf88, Hi: 0xf8c, Stride: 1},
},
R32: []unicode.Range32{
{Lo: 0x1000, Hi: 0x102a, Stride: 1},
{Lo: 0x103f, Hi: 0x103f, Stride: 1},
{Lo: 0x1050, Hi: 0x1055, Stride: 1},
{Lo: 0x105a, Hi: 0x105d, Stride: 1},
{Lo: 0x1061, Hi: 0x1061, Stride: 1},
{Lo: 0x1065, Hi: 0x1066, Stride: 1},
{Lo: 0x106e, Hi: 0x1070, Stride: 1},
{Lo: 0x1075, Hi: 0x1081, Stride: 1},
{Lo: 0x108e, Hi: 0x108e, Stride: 1},
{Lo: 0x10a0, Hi: 0x10c5, Stride: 1},
{Lo: 0x10c7, Hi: 0x10c7, Stride: 1},
{Lo: 0x10cd, Hi: 0x10cd, Stride: 1},
{Lo: 0x10d0, Hi: 0x10fa, Stride: 1},
{Lo: 0x10fc, Hi: 0x1248, Stride: 1},
{Lo: 0x124a, Hi: 0x124d, Stride: 1},
{Lo: 0x1250, Hi: 0x1256, Stride: 1},
{Lo: 0x1258, Hi: 0x1258, Stride: 1},
{Lo: 0x125a, Hi: 0x125d, Stride: 1},
{Lo: 0x1260, Hi: 0x1288, Stride: 1},
{Lo: 0x128a, Hi: 0x128d, Stride: 1},
{Lo: 0x1290, Hi: 0x12b0, Stride: 1},
{Lo: 0x12b2, Hi: 0x12b5, Stride: 1},
{Lo: 0x12b8, Hi: 0x12be, Stride: 1},
{Lo: 0x12c0, Hi: 0x12c0, Stride: 1},
{Lo: 0x12c2, Hi: 0x12c5, Stride: 1},
{Lo: 0x12c8, Hi: 0x12d6, Stride: 1},
{Lo: 0x12d8, Hi: 0x1310, Stride: 1},
{Lo: 0x1312, Hi: 0x1315, Stride: 1},
{Lo: 0x1318, Hi: 0x135a, Stride: 1},
{Lo: 0x1380, Hi: 0x138f, Stride: 1},
{Lo: 0x13a0, Hi: 0x13f5, Stride: 1},
{Lo: 0x13f8, Hi: 0x13fd, Stride: 1},
{Lo: 0x1401, Hi: 0x166c, Stride: 1},
{Lo: 0x166f, Hi: 0x167f, Stride: 1},
{Lo: 0x1681, Hi: 0x169a, Stride: 1},
{Lo: 0x16a0, Hi: 0x16ea, Stride: 1},
{Lo: 0x16ee, Hi: 0x16f8, Stride: 1},
{Lo: 0x1700, Hi: 0x1711, Stride: 1},
{Lo: 0x171f, Hi: 0x1731, Stride: 1},
{Lo: 0x1740, Hi: 0x1751, Stride: 1},
{Lo: 0x1760, Hi: 0x176c, Stride: 1},
{Lo: 0x176e, Hi: 0x1770, Stride: 1},
{Lo: 0x1780, Hi: 0x17b3, Stride: 1},
{Lo: 0x17d7, Hi: 0x17d7, Stride: 1},
{Lo: 0x17dc, Hi: 0x17dc, Stride: 1},
{Lo: 0x1820, Hi: 0x1878, Stride: 1},
{Lo: 0x1880, Hi: 0x18a8, Stride: 1},
{Lo: 0x18aa, Hi: 0x18aa, Stride: 1},
{Lo: 0x18b0, Hi: 0x18f5, Stride: 1},
{Lo: 0x1900, Hi: 0x191e, Stride: 1},
{Lo: 0x1950, Hi: 0x196d, Stride: 1},
{Lo: 0x1970, Hi: 0x1974, Stride: 1},
{Lo: 0x1980, Hi: 0x19ab, Stride: 1},
{Lo: 0x19b0, Hi: 0x19c9, Stride: 1},
{Lo: 0x1a00, Hi: 0x1a16, Stride: 1},
{Lo: 0x1a20, Hi: 0x1a54, Stride: 1},
{Lo: 0x1aa7, Hi: 0x1aa7, Stride: 1},
{Lo: 0x1b05, Hi: 0x1b33, Stride: 1},
{Lo: 0x1b45, Hi: 0x1b4c, Stride: 1},
{Lo: 0x1b83, Hi: 0x1ba0, Stride: 1},
{Lo: 0x1bae, Hi: 0x1baf, Stride: 1},
{Lo: 0x1bba, Hi: 0x1be5, Stride: 1},
{Lo: 0x1c00, Hi: 0x1c23, Stride: 1},
{Lo: 0x1c4d, Hi: 0x1c4f, Stride: 1},
{Lo: 0x1c5a, Hi: 0x1c7d, Stride: 1},
{Lo: 0x1c80, Hi: 0x1c88, Stride: 1},
{Lo: 0x1c90, Hi: 0x1cba, Stride: 1},
{Lo: 0x1cbd, Hi: 0x1cbf, Stride: 1},
{Lo: 0x1ce9, Hi: 0x1cec, Stride: 1},
{Lo: 0x1cee, Hi: 0x1cf3, Stride: 1},
{Lo: 0x1cf5, Hi: 0x1cf6, Stride: 1},
{Lo: 0x1cfa, Hi: 0x1cfa, Stride: 1},
{Lo: 0x1d00, Hi: 0x1dbf, Stride: 1},
{Lo: 0x1e00, Hi: 0x1f15, Stride: 1},
{Lo: 0x1f18, Hi: 0x1f1d, Stride: 1},
{Lo: 0x1f20, Hi: 0x1f45, Stride: 1},
{Lo: 0x1f48, Hi: 0x1f4d, Stride: 1},
{Lo: 0x1f50, Hi: 0x1f57, Stride: 1},
{Lo: 0x1f59, Hi: 0x1f59, Stride: 1},
{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 1},
{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 1},
{Lo: 0x1f5f, Hi: 0x1f7d, Stride: 1},
{Lo: 0x1f80, Hi: 0x1fb4, Stride: 1},
{Lo: 0x1fb6, Hi: 0x1fbc, Stride: 1},
{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 1},
{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 1},
{Lo: 0x1fc6, Hi: 0x1fcc, Stride: 1},
{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 1},
{Lo: 0x1fd6, Hi: 0x1fdb, Stride: 1},
{Lo: 0x1fe0, Hi: 0x1fec, Stride: 1},
{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 1},
{Lo: 0x1ff6, Hi: 0x1ffc, Stride: 1},
{Lo: 0x2071, Hi: 0x2071, Stride: 1},
{Lo: 0x207f, Hi: 0x207f, Stride: 1},
{Lo: 0x2090, Hi: 0x209c, Stride: 1},
{Lo: 0x2102, Hi: 0x2102, Stride: 1},
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/js_ast/js_ast.go | internal/js_ast/js_ast.go | package js_ast
import (
"strconv"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/logger"
)
// Every module (i.e. file) is parsed into a separate AST data structure. For
// efficiency, the parser also resolves all scopes and binds all symbols in the
// tree.
//
// Identifiers in the tree are referenced by a Ref, which is a pointer into the
// symbol table for the file. The symbol table is stored as a top-level field
// in the AST so it can be accessed without traversing the tree. For example,
// a renaming pass can iterate over the symbol table without touching the tree.
//
// Parse trees are intended to be immutable. That makes it easy to build an
// incremental compiler with a "watch" mode that can avoid re-parsing files
// that have already been parsed. Any passes that operate on an AST after it
// has been parsed should create a copy of the mutated parts of the tree
// instead of mutating the original tree.
type L uint8
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence
const (
LLowest L = iota
LComma
LSpread
LYield
LAssign
LConditional
LNullishCoalescing
LLogicalOr
LLogicalAnd
LBitwiseOr
LBitwiseXor
LBitwiseAnd
LEquals
LCompare
LShift
LAdd
LMultiply
LExponentiation
LPrefix
LPostfix
LNew
LCall
LMember
)
type OpCode uint8
func (op OpCode) IsPrefix() bool {
return op < UnOpPostDec
}
func (op OpCode) UnaryAssignTarget() AssignTarget {
if op >= UnOpPreDec && op <= UnOpPostInc {
return AssignTargetUpdate
}
return AssignTargetNone
}
func (op OpCode) IsLeftAssociative() bool {
return op >= BinOpAdd && op < BinOpComma && op != BinOpPow
}
func (op OpCode) IsRightAssociative() bool {
return op >= BinOpAssign || op == BinOpPow
}
func (op OpCode) BinaryAssignTarget() AssignTarget {
if op == BinOpAssign {
return AssignTargetReplace
}
if op > BinOpAssign {
return AssignTargetUpdate
}
return AssignTargetNone
}
func (op OpCode) IsShortCircuit() bool {
switch op {
case BinOpLogicalOr, BinOpLogicalOrAssign,
BinOpLogicalAnd, BinOpLogicalAndAssign,
BinOpNullishCoalescing, BinOpNullishCoalescingAssign:
return true
}
return false
}
type AssignTarget uint8
const (
AssignTargetNone AssignTarget = iota
AssignTargetReplace // "a = b"
AssignTargetUpdate // "a += b"
)
// If you add a new token, remember to add it to "OpTable" too
const (
// Prefix
UnOpPos OpCode = iota
UnOpNeg
UnOpCpl
UnOpNot
UnOpVoid
UnOpTypeof
UnOpDelete
// Prefix update
UnOpPreDec
UnOpPreInc
// Postfix update
UnOpPostDec
UnOpPostInc
// Left-associative
BinOpAdd
BinOpSub
BinOpMul
BinOpDiv
BinOpRem
BinOpPow
BinOpLt
BinOpLe
BinOpGt
BinOpGe
BinOpIn
BinOpInstanceof
BinOpShl
BinOpShr
BinOpUShr
BinOpLooseEq
BinOpLooseNe
BinOpStrictEq
BinOpStrictNe
BinOpNullishCoalescing
BinOpLogicalOr
BinOpLogicalAnd
BinOpBitwiseOr
BinOpBitwiseAnd
BinOpBitwiseXor
// Non-associative
BinOpComma
// Right-associative
BinOpAssign
BinOpAddAssign
BinOpSubAssign
BinOpMulAssign
BinOpDivAssign
BinOpRemAssign
BinOpPowAssign
BinOpShlAssign
BinOpShrAssign
BinOpUShrAssign
BinOpBitwiseOrAssign
BinOpBitwiseAndAssign
BinOpBitwiseXorAssign
BinOpNullishCoalescingAssign
BinOpLogicalOrAssign
BinOpLogicalAndAssign
)
type OpTableEntry struct {
Text string
Level L
IsKeyword bool
}
var OpTable = []OpTableEntry{
// Prefix
{"+", LPrefix, false},
{"-", LPrefix, false},
{"~", LPrefix, false},
{"!", LPrefix, false},
{"void", LPrefix, true},
{"typeof", LPrefix, true},
{"delete", LPrefix, true},
// Prefix update
{"--", LPrefix, false},
{"++", LPrefix, false},
// Postfix update
{"--", LPostfix, false},
{"++", LPostfix, false},
// Left-associative
{"+", LAdd, false},
{"-", LAdd, false},
{"*", LMultiply, false},
{"/", LMultiply, false},
{"%", LMultiply, false},
{"**", LExponentiation, false}, // Right-associative
{"<", LCompare, false},
{"<=", LCompare, false},
{">", LCompare, false},
{">=", LCompare, false},
{"in", LCompare, true},
{"instanceof", LCompare, true},
{"<<", LShift, false},
{">>", LShift, false},
{">>>", LShift, false},
{"==", LEquals, false},
{"!=", LEquals, false},
{"===", LEquals, false},
{"!==", LEquals, false},
{"??", LNullishCoalescing, false},
{"||", LLogicalOr, false},
{"&&", LLogicalAnd, false},
{"|", LBitwiseOr, false},
{"&", LBitwiseAnd, false},
{"^", LBitwiseXor, false},
// Non-associative
{",", LComma, false},
// Right-associative
{"=", LAssign, false},
{"+=", LAssign, false},
{"-=", LAssign, false},
{"*=", LAssign, false},
{"/=", LAssign, false},
{"%=", LAssign, false},
{"**=", LAssign, false},
{"<<=", LAssign, false},
{">>=", LAssign, false},
{">>>=", LAssign, false},
{"|=", LAssign, false},
{"&=", LAssign, false},
{"^=", LAssign, false},
{"??=", LAssign, false},
{"||=", LAssign, false},
{"&&=", LAssign, false},
}
type Decorator struct {
Value Expr
AtLoc logger.Loc
OmitNewlineAfter bool
}
type PropertyKind uint8
const (
PropertyField PropertyKind = iota
PropertyMethod
PropertyGetter
PropertySetter
PropertyAutoAccessor
PropertySpread
PropertyDeclareOrAbstract
PropertyClassStaticBlock
)
// This returns true if and only if this property matches the "MethodDefinition"
// grammar from the specification. That means it's one of the following forms:
//
// foo() {}
// *foo() {}
// async foo() {}
// async *foo() {}
// get foo() {}
// set foo(_) {}
//
// If this returns true, the "ValueOrNil" field of the property is always an
// "EFunction" expression and it is always printed as a method.
func (kind PropertyKind) IsMethodDefinition() bool {
return kind == PropertyMethod || kind == PropertyGetter || kind == PropertySetter
}
type ClassStaticBlock struct {
Block SBlock
Loc logger.Loc
}
type PropertyFlags uint8
const (
PropertyIsComputed PropertyFlags = 1 << iota
PropertyIsStatic
PropertyWasShorthand
PropertyPreferQuotedKey
)
func (flags PropertyFlags) Has(flag PropertyFlags) bool {
return (flags & flag) != 0
}
type Property struct {
ClassStaticBlock *ClassStaticBlock
Key Expr
// This is omitted for class fields
ValueOrNil Expr
// This is used when parsing a pattern that uses default values:
//
// [a = 1] = [];
// ({a = 1} = {});
//
// It's also used for class fields:
//
// class Foo { a = 1 }
//
InitializerOrNil Expr
Decorators []Decorator
Loc logger.Loc
CloseBracketLoc logger.Loc
Kind PropertyKind
Flags PropertyFlags
}
type PropertyBinding struct {
Key Expr
Value Binding
DefaultValueOrNil Expr
Loc logger.Loc
CloseBracketLoc logger.Loc
IsComputed bool
IsSpread bool
PreferQuotedKey bool
}
type Arg struct {
Binding Binding
DefaultOrNil Expr
Decorators []Decorator
// "constructor(public x: boolean) {}"
IsTypeScriptCtorField bool
}
type Fn struct {
Name *ast.LocRef
Args []Arg
Body FnBody
ArgumentsRef ast.Ref
OpenParenLoc logger.Loc
IsAsync bool
IsGenerator bool
HasRestArg bool
HasIfScope bool
// See: https://github.com/rollup/rollup/pull/5024
HasNoSideEffectsComment bool
// This is true if the function is a method
IsUniqueFormalParameters bool
}
type FnBody struct {
Block SBlock
Loc logger.Loc
}
type Class struct {
Decorators []Decorator
Name *ast.LocRef
ExtendsOrNil Expr
Properties []Property
ClassKeyword logger.Range
BodyLoc logger.Loc
CloseBraceLoc logger.Loc
// If true, JavaScript decorators (i.e. not TypeScript experimental
// decorators) should be lowered. This is the case either if JavaScript
// decorators are not supported in the configured target environment, or
// if "useDefineForClassFields" is set to false and this class has
// decorators on it. Note that this flag is not necessarily set to true if
// "useDefineForClassFields" is false and a class has an "accessor" even
// though the accessor feature comes from the decorator specification.
ShouldLowerStandardDecorators bool
// If true, property field initializers cannot be assumed to have no side
// effects. For example:
//
// class Foo {
// static set foo(x) { importantSideEffect(x) }
// }
// class Bar extends Foo {
// foo = 1
// }
//
// This happens in TypeScript when "useDefineForClassFields" is disabled
// because TypeScript (and esbuild) transforms the above class into this:
//
// class Foo {
// static set foo(x) { importantSideEffect(x); }
// }
// class Bar extends Foo {
// }
// Bar.foo = 1;
//
UseDefineForClassFields bool
}
type ArrayBinding struct {
Binding Binding
DefaultValueOrNil Expr
Loc logger.Loc
}
type Binding struct {
Data B
Loc logger.Loc
}
// This interface is never called. Its purpose is to encode a variant type in
// Go's type system.
type B interface{ isBinding() }
func (*BMissing) isBinding() {}
func (*BIdentifier) isBinding() {}
func (*BArray) isBinding() {}
func (*BObject) isBinding() {}
type BMissing struct{}
type BIdentifier struct{ Ref ast.Ref }
type BArray struct {
Items []ArrayBinding
CloseBracketLoc logger.Loc
HasSpread bool
IsSingleLine bool
}
type BObject struct {
Properties []PropertyBinding
CloseBraceLoc logger.Loc
IsSingleLine bool
}
type Expr struct {
Data E
Loc logger.Loc
}
// This interface is never called. Its purpose is to encode a variant type in
// Go's type system.
type E interface{ isExpr() }
func (*EArray) isExpr() {}
func (*EUnary) isExpr() {}
func (*EBinary) isExpr() {}
func (*EBoolean) isExpr() {}
func (*ESuper) isExpr() {}
func (*ENull) isExpr() {}
func (*EUndefined) isExpr() {}
func (*EThis) isExpr() {}
func (*ENew) isExpr() {}
func (*ENewTarget) isExpr() {}
func (*EImportMeta) isExpr() {}
func (*ECall) isExpr() {}
func (*EDot) isExpr() {}
func (*EIndex) isExpr() {}
func (*EArrow) isExpr() {}
func (*EFunction) isExpr() {}
func (*EClass) isExpr() {}
func (*EIdentifier) isExpr() {}
func (*EImportIdentifier) isExpr() {}
func (*EPrivateIdentifier) isExpr() {}
func (*ENameOfSymbol) isExpr() {}
func (*EJSXElement) isExpr() {}
func (*EJSXText) isExpr() {}
func (*EMissing) isExpr() {}
func (*ENumber) isExpr() {}
func (*EBigInt) isExpr() {}
func (*EObject) isExpr() {}
func (*ESpread) isExpr() {}
func (*EString) isExpr() {}
func (*ETemplate) isExpr() {}
func (*ERegExp) isExpr() {}
func (*EInlinedEnum) isExpr() {}
func (*EAnnotation) isExpr() {}
func (*EAwait) isExpr() {}
func (*EYield) isExpr() {}
func (*EIf) isExpr() {}
func (*ERequireString) isExpr() {}
func (*ERequireResolveString) isExpr() {}
func (*EImportString) isExpr() {}
func (*EImportCall) isExpr() {}
type EArray struct {
Items []Expr
CommaAfterSpread logger.Loc
CloseBracketLoc logger.Loc
IsSingleLine bool
IsParenthesized bool
}
type EUnary struct {
Value Expr
Op OpCode
// The expression "typeof (0, x)" must not become "typeof x" if "x"
// is unbound because that could suppress a ReferenceError from "x".
//
// Also if we know a typeof operator was originally an identifier, then
// we know that this typeof operator always has no side effects (even if
// we consider the identifier by itself to have a side effect).
//
// Note that there *is* actually a case where "typeof x" can throw an error:
// when "x" is being referenced inside of its TDZ (temporal dead zone). TDZ
// checks are not yet handled correctly by esbuild, so this possibility is
// currently ignored.
WasOriginallyTypeofIdentifier bool
// Similarly the expression "delete (0, x)" must not become "delete x"
// because that syntax is invalid in strict mode. We also need to make sure
// we don't accidentally change the return value:
//
// Returns false:
// "var a; delete (a)"
// "var a = Object.freeze({b: 1}); delete (a.b)"
// "var a = Object.freeze({b: 1}); delete (a?.b)"
// "var a = Object.freeze({b: 1}); delete (a['b'])"
// "var a = Object.freeze({b: 1}); delete (a?.['b'])"
//
// Returns true:
// "var a; delete (0, a)"
// "var a = Object.freeze({b: 1}); delete (true && a.b)"
// "var a = Object.freeze({b: 1}); delete (false || a?.b)"
// "var a = Object.freeze({b: 1}); delete (null ?? a?.['b'])"
// "var a = Object.freeze({b: 1}); delete (true ? a['b'] : a['b'])"
//
WasOriginallyDeleteOfIdentifierOrPropertyAccess bool
}
type EBinary struct {
Left Expr
Right Expr
Op OpCode
}
type EBoolean struct{ Value bool }
type EMissing struct{}
type ESuper struct{}
type ENull struct{}
type EUndefined struct{}
type EThis struct{}
type ENewTarget struct {
Range logger.Range
}
type EImportMeta struct {
RangeLen int32
}
// These help reduce unnecessary memory allocations
var BMissingShared = &BMissing{}
var EMissingShared = &EMissing{}
var ENullShared = &ENull{}
var ESuperShared = &ESuper{}
var EThisShared = &EThis{}
var EUndefinedShared = &EUndefined{}
var SDebuggerShared = &SDebugger{}
var SEmptyShared = &SEmpty{}
var STypeScriptShared = &STypeScript{}
var STypeScriptSharedWasDeclareClass = &STypeScript{WasDeclareClass: true}
type ENew struct {
Target Expr
Args []Expr
CloseParenLoc logger.Loc
IsMultiLine bool
// True if there is a comment containing "@__PURE__" or "#__PURE__" preceding
// this call expression. See the comment inside ECall for more details.
CanBeUnwrappedIfUnused bool
}
type CallKind uint8
const (
NormalCall CallKind = iota
DirectEval
TargetWasOriginallyPropertyAccess
)
type OptionalChain uint8
const (
// "a.b"
OptionalChainNone OptionalChain = iota
// "a?.b"
OptionalChainStart
// "a?.b.c" => ".c" is OptionalChainContinue
// "(a?.b).c" => ".c" is OptionalChainNone
OptionalChainContinue
)
type ECall struct {
Target Expr
Args []Expr
CloseParenLoc logger.Loc
OptionalChain OptionalChain
Kind CallKind
IsMultiLine bool
// True if there is a comment containing "@__PURE__" or "#__PURE__" preceding
// this call expression. This is an annotation used for tree shaking, and
// means that the call can be removed if it's unused. It does not mean the
// call is pure (e.g. it may still return something different if called twice).
//
// Note that the arguments are not considered to be part of the call. If the
// call itself is removed due to this annotation, the arguments must remain
// if they have side effects.
CanBeUnwrappedIfUnused bool
}
func (a *ECall) HasSameFlagsAs(b *ECall) bool {
return a.OptionalChain == b.OptionalChain &&
a.Kind == b.Kind &&
a.CanBeUnwrappedIfUnused == b.CanBeUnwrappedIfUnused
}
type EDot struct {
Target Expr
Name string
NameLoc logger.Loc
OptionalChain OptionalChain
// If true, this property access is known to be free of side-effects. That
// means it can be removed if the resulting value isn't used.
CanBeRemovedIfUnused bool
// If true, this property access is a function that, when called, can be
// unwrapped if the resulting value is unused. Unwrapping means discarding
// the call target but keeping any arguments with side effects.
CallCanBeUnwrappedIfUnused bool
// Symbol values are known to not have side effects when used as property
// names in class declarations and object literals.
IsSymbolInstance bool
}
func (a *EDot) HasSameFlagsAs(b *EDot) bool {
return a.OptionalChain == b.OptionalChain &&
a.CanBeRemovedIfUnused == b.CanBeRemovedIfUnused &&
a.CallCanBeUnwrappedIfUnused == b.CallCanBeUnwrappedIfUnused &&
a.IsSymbolInstance == b.IsSymbolInstance
}
type EIndex struct {
Target Expr
Index Expr
CloseBracketLoc logger.Loc
OptionalChain OptionalChain
// If true, this property access is known to be free of side-effects. That
// means it can be removed if the resulting value isn't used.
CanBeRemovedIfUnused bool
// If true, this property access is a function that, when called, can be
// unwrapped if the resulting value is unused. Unwrapping means discarding
// the call target but keeping any arguments with side effects.
CallCanBeUnwrappedIfUnused bool
// Symbol values are known to not have side effects when used as property
// names in class declarations and object literals.
IsSymbolInstance bool
}
func (a *EIndex) HasSameFlagsAs(b *EIndex) bool {
return a.OptionalChain == b.OptionalChain &&
a.CanBeRemovedIfUnused == b.CanBeRemovedIfUnused &&
a.CallCanBeUnwrappedIfUnused == b.CallCanBeUnwrappedIfUnused &&
a.IsSymbolInstance == b.IsSymbolInstance
}
type EArrow struct {
Args []Arg
Body FnBody
IsAsync bool
HasRestArg bool
PreferExpr bool // Use shorthand if true and "Body" is a single return statement
// V8 uses parentheses as an optimization hint: https://v8.dev/blog/preparser#pife
IsParenthesized bool
// See: https://github.com/rollup/rollup/pull/5024
HasNoSideEffectsComment bool
}
type EFunction struct {
Fn Fn
// V8 uses parentheses as an optimization hint: https://v8.dev/blog/preparser#pife
IsParenthesized bool
}
type EClass struct{ Class Class }
type EIdentifier struct {
Ref ast.Ref
// If we're inside a "with" statement, this identifier may be a property
// access. In that case it would be incorrect to remove this identifier since
// the property access may be a getter or setter with side effects.
MustKeepDueToWithStmt bool
// If true, this identifier is known to not have a side effect (i.e. to not
// throw an exception) when referenced. If false, this identifier may or may
// not have side effects when referenced. This is used to allow the removal
// of known globals such as "Object" if they aren't used.
CanBeRemovedIfUnused bool
// If true, this identifier represents a function that, when called, can be
// unwrapped if the resulting value is unused. Unwrapping means discarding
// the call target but keeping any arguments with side effects.
CallCanBeUnwrappedIfUnused bool
}
// This is similar to an EIdentifier but it represents a reference to an ES6
// import item.
//
// Depending on how the code is linked, the file containing this EImportIdentifier
// may or may not be in the same module group as the file it was imported from.
//
// If it's the same module group than we can just merge the import item symbol
// with the corresponding symbol that was imported, effectively renaming them
// to be the same thing and statically binding them together.
//
// But if it's a different module group, then the import must be dynamically
// evaluated using a property access off the corresponding namespace symbol,
// which represents the result of a require() call.
//
// It's stored as a separate type so it's not easy to confuse with a plain
// identifier. For example, it'd be bad if code trying to convert "{x: x}" into
// "{x}" shorthand syntax wasn't aware that the "x" in this case is actually
// "{x: importedNamespace.x}". This separate type forces code to opt-in to
// doing this instead of opt-out.
type EImportIdentifier struct {
Ref ast.Ref
PreferQuotedKey bool
// If true, this was originally an identifier expression such as "foo". If
// false, this could potentially have been a member access expression such
// as "ns.foo" off of an imported namespace object.
WasOriginallyIdentifier bool
}
// This is similar to EIdentifier but it represents class-private fields and
// methods. It can be used where computed properties can be used, such as
// EIndex and Property.
type EPrivateIdentifier struct {
Ref ast.Ref
}
// This represents an internal property name that can be mangled. The symbol
// referenced by this expression should be a "SymbolMangledProp" symbol.
type ENameOfSymbol struct {
Ref ast.Ref
HasPropertyKeyComment bool // If true, a preceding comment contains "@__KEY__"
}
type EJSXElement struct {
TagOrNil Expr
Properties []Property
// Note: This array may contain nil entries. Be careful about nil entries
// when iterating over this array.
//
// Each nil entry corresponds to the "JSXChildExpression_opt" part of the
// grammar (https://facebook.github.io/jsx/#prod-JSXChild):
//
// JSXChild :
// JSXText
// JSXElement
// JSXFragment
// { JSXChildExpression_opt }
//
// This is the "{}" part in "<a>{}</a>". We allow this because some people
// put comments there and then expect to be able to process them from
// esbuild's output. These absent AST nodes are completely omitted when
// JSX is transformed to JS. They are only present when JSX preservation is
// enabled.
NullableChildren []Expr
CloseLoc logger.Loc
IsTagSingleLine bool
}
// The JSX specification doesn't say how JSX text is supposed to be interpreted
// so our "preserve" JSX transform should reproduce the original source code
// verbatim. One reason why this matters is because there is no canonical way
// to interpret JSX text (Babel and TypeScript differ in what newlines mean).
// Another reason is that some people want to do custom things such as this:
// https://github.com/evanw/esbuild/issues/3605
type EJSXText struct {
Raw string
}
type ENumber struct{ Value float64 }
type EBigInt struct{ Value string }
type EObject struct {
Properties []Property
CommaAfterSpread logger.Loc
CloseBraceLoc logger.Loc
IsSingleLine bool
IsParenthesized bool
}
type ESpread struct{ Value Expr }
// This is used for both strings and no-substitution template literals to reduce
// the number of cases that need to be checked for string optimization code
type EString struct {
Value []uint16
LegacyOctalLoc logger.Loc
PreferTemplate bool
HasPropertyKeyComment bool // If true, a preceding comment contains "@__KEY__"
ContainsUniqueKey bool // If true, this string must not be wrapped
}
type TemplatePart struct {
Value Expr
TailRaw string // Only use when "TagOrNil" is not nil
TailCooked []uint16 // Only use when "TagOrNil" is nil
TailLoc logger.Loc
}
type ETemplate struct {
TagOrNil Expr
HeadRaw string // Only use when "TagOrNil" is not nil
HeadCooked []uint16 // Only use when "TagOrNil" is nil
Parts []TemplatePart
HeadLoc logger.Loc
LegacyOctalLoc logger.Loc
// True if this is a tagged template literal with a comment that indicates
// this function call can be removed if the result is unused. Note that the
// arguments are not considered to be part of the call. If the call itself
// is removed due to this annotation, the arguments must remain if they have
// side effects (including the string conversions).
CanBeUnwrappedIfUnused bool
// If the tag is present, it is expected to be a function and is called. If
// the tag is a syntactic property access, then the value for "this" in the
// function call is the object whose property was accessed (e.g. in "a.b``"
// the value for "this" in "a.b" is "a"). We need to ensure that if "a``"
// ever becomes "b.c``" later on due to optimizations, it is written as
// "(0, b.c)``" to avoid a behavior change.
TagWasOriginallyPropertyAccess bool
}
type ERegExp struct{ Value string }
type EInlinedEnum struct {
Value Expr
Comment string
}
type AnnotationFlags uint8
const (
// This is sort of like an IIFE with a "/* @__PURE__ */" comment except it's an
// inline annotation on an expression itself without the nested scope. Sometimes
// we can't easily introduce a new scope (e.g. if the expression uses "await").
CanBeRemovedIfUnusedFlag AnnotationFlags = 1 << iota
)
func (flags AnnotationFlags) Has(flag AnnotationFlags) bool {
return (flags & flag) != 0
}
type EAnnotation struct {
Value Expr
Flags AnnotationFlags
}
type EAwait struct {
Value Expr
}
type EYield struct {
ValueOrNil Expr
IsStar bool
}
type EIf struct {
Test Expr
Yes Expr
No Expr
}
type ERequireString struct {
ImportRecordIndex uint32
CloseParenLoc logger.Loc
}
type ERequireResolveString struct {
ImportRecordIndex uint32
CloseParenLoc logger.Loc
}
type EImportString struct {
ImportRecordIndex uint32
CloseParenLoc logger.Loc
}
type EImportCall struct {
Expr Expr
OptionsOrNil Expr
CloseParenLoc logger.Loc
Phase ast.ImportPhase
}
type Stmt struct {
Data S
Loc logger.Loc
}
// This interface is never called. Its purpose is to encode a variant type in
// Go's type system.
type S interface{ isStmt() }
func (*SBlock) isStmt() {}
func (*SComment) isStmt() {}
func (*SDebugger) isStmt() {}
func (*SDirective) isStmt() {}
func (*SEmpty) isStmt() {}
func (*STypeScript) isStmt() {}
func (*SExportClause) isStmt() {}
func (*SExportFrom) isStmt() {}
func (*SExportDefault) isStmt() {}
func (*SExportStar) isStmt() {}
func (*SExportEquals) isStmt() {}
func (*SLazyExport) isStmt() {}
func (*SExpr) isStmt() {}
func (*SEnum) isStmt() {}
func (*SNamespace) isStmt() {}
func (*SFunction) isStmt() {}
func (*SClass) isStmt() {}
func (*SLabel) isStmt() {}
func (*SIf) isStmt() {}
func (*SFor) isStmt() {}
func (*SForIn) isStmt() {}
func (*SForOf) isStmt() {}
func (*SDoWhile) isStmt() {}
func (*SWhile) isStmt() {}
func (*SWith) isStmt() {}
func (*STry) isStmt() {}
func (*SSwitch) isStmt() {}
func (*SImport) isStmt() {}
func (*SReturn) isStmt() {}
func (*SThrow) isStmt() {}
func (*SLocal) isStmt() {}
func (*SBreak) isStmt() {}
func (*SContinue) isStmt() {}
type SBlock struct {
Stmts []Stmt
CloseBraceLoc logger.Loc
}
type SEmpty struct{}
// This is a stand-in for a TypeScript type declaration
type STypeScript struct {
WasDeclareClass bool
}
type SComment struct {
Text string
IsLegalComment bool
}
type SDebugger struct{}
type SDirective struct {
Value []uint16
LegacyOctalLoc logger.Loc
}
type SExportClause struct {
Items []ClauseItem
IsSingleLine bool
}
type SExportFrom struct {
Items []ClauseItem
NamespaceRef ast.Ref
ImportRecordIndex uint32
IsSingleLine bool
}
type SExportDefault struct {
Value Stmt // May be a SExpr or SFunction or SClass
DefaultName ast.LocRef
}
type ExportStarAlias struct {
// Although this alias name starts off as being the same as the statement's
// namespace symbol, it may diverge if the namespace symbol name is minified.
// The original alias name is preserved here to avoid this scenario.
OriginalName string
Loc logger.Loc
}
type SExportStar struct {
Alias *ExportStarAlias
NamespaceRef ast.Ref
ImportRecordIndex uint32
}
// This is an "export = value;" statement in TypeScript
type SExportEquals struct {
Value Expr
}
// The decision of whether to export an expression using "module.exports" or
// "export default" is deferred until linking using this statement kind
type SLazyExport struct {
Value Expr
}
type SExpr struct {
Value Expr
// This is set to true for automatically-generated expressions that are part
// of class syntax lowering. A single class declaration may end up with many
// generated expressions after it (e.g. class field initializations, a call
// to keep the original value of the "name" property). When this happens we
// can't tell that the class is side-effect free anymore because all of these
// methods mutate the class. We use this annotation for that instead.
IsFromClassOrFnThatCanBeRemovedIfUnused bool
}
type EnumValue struct {
ValueOrNil Expr
Name []uint16
Ref ast.Ref
Loc logger.Loc
}
type SEnum struct {
Values []EnumValue
Name ast.LocRef
Arg ast.Ref
IsExport bool
}
type SNamespace struct {
Stmts []Stmt
Name ast.LocRef
Arg ast.Ref
IsExport bool
}
type SFunction struct {
Fn Fn
IsExport bool
}
type SClass struct {
Class Class
IsExport bool
}
type SLabel struct {
Stmt Stmt
Name ast.LocRef
IsSingleLineStmt bool
}
type SIf struct {
Test Expr
Yes Stmt
NoOrNil Stmt
IsSingleLineYes bool
IsSingleLineNo bool
}
type SFor struct {
InitOrNil Stmt // May be a SConst, SLet, SVar, or SExpr
TestOrNil Expr
UpdateOrNil Expr
Body Stmt
IsSingleLineBody bool
IsLoweredForAwait bool
}
type SForIn struct {
Init Stmt // May be a SConst, SLet, SVar, or SExpr
Value Expr
Body Stmt
IsSingleLineBody bool
}
type SForOf struct {
Init Stmt // May be a SConst, SLet, SVar, or SExpr
Value Expr
Body Stmt
Await logger.Range
IsSingleLineBody bool
}
type SDoWhile struct {
Body Stmt
Test Expr
}
type SWhile struct {
Test Expr
Body Stmt
IsSingleLineBody bool
}
type SWith struct {
Value Expr
Body Stmt
BodyLoc logger.Loc
IsSingleLineBody bool
}
type Catch struct {
BindingOrNil Binding
Block SBlock
Loc logger.Loc
BlockLoc logger.Loc
}
type Finally struct {
Block SBlock
Loc logger.Loc
}
type STry struct {
Catch *Catch
Finally *Finally
Block SBlock
BlockLoc logger.Loc
}
type Case struct {
ValueOrNil Expr // If this is nil, this is "default" instead of "case"
Body []Stmt
Loc logger.Loc
}
type SSwitch struct {
Test Expr
Cases []Case
BodyLoc logger.Loc
CloseBraceLoc logger.Loc
}
// This object represents all of these types of import statements:
//
// import 'path'
// import {item1, item2} from 'path'
// import * as ns from 'path'
// import defaultItem, {item1, item2} from 'path'
// import defaultItem, * as ns from 'path'
//
// Many parts are optional and can be combined in different ways. The only
// restriction is that you cannot have both a clause and a star namespace.
type SImport struct {
DefaultName *ast.LocRef
Items *[]ClauseItem
StarNameLoc *logger.Loc
// If this is a star import: This is a Ref for the namespace symbol. The Loc
// for the symbol is StarLoc.
//
// Otherwise: This is an auto-generated Ref for the namespace representing
// the imported file. In this case StarLoc is nil. The NamespaceRef is used
// when converting this module to a CommonJS module.
NamespaceRef ast.Ref
ImportRecordIndex uint32
IsSingleLine bool
}
type SReturn struct {
ValueOrNil Expr
}
type SThrow struct {
Value Expr
}
type LocalKind uint8
const (
LocalVar LocalKind = iota
LocalLet
LocalConst
LocalUsing
LocalAwaitUsing
)
func (kind LocalKind) IsUsing() bool {
return kind >= LocalUsing
}
type SLocal struct {
Decls []Decl
Kind LocalKind
IsExport bool
// The TypeScript compiler doesn't generate code for "import foo = bar"
// statements where the import is never used.
WasTSImportEquals bool
}
type SBreak struct {
Label *ast.LocRef
}
type SContinue struct {
Label *ast.LocRef
}
type ClauseItem struct {
Alias string
// This is the original name of the symbol stored in "Name". It's needed for
// "SExportClause" statements such as this:
//
// export {foo as bar} from 'path'
//
// In this case both "foo" and "bar" are aliases because it's a re-export.
// We need to preserve both aliases in case the symbol is renamed. In this
// example, "foo" is "OriginalName" and "bar" is "Alias".
OriginalName string
AliasLoc logger.Loc
Name ast.LocRef
}
type Decl struct {
Binding Binding
ValueOrNil Expr
}
type ScopeKind uint8
const (
ScopeBlock ScopeKind = iota
ScopeWith
ScopeLabel
ScopeClassName
ScopeClassBody
ScopeCatchBinding
// The scopes below stop hoisted variables from extending into parent scopes
ScopeEntry // This is a module, TypeScript enum, or TypeScript namespace
ScopeFunctionArgs
ScopeFunctionBody
ScopeClassStaticInit
)
func (kind ScopeKind) StopsHoisting() bool {
return kind >= ScopeEntry
}
type ScopeMember struct {
Ref ast.Ref
Loc logger.Loc
}
type Scope struct {
// This will be non-nil if this is a TypeScript "namespace" or "enum"
TSNamespace *TSNamespaceScope
Parent *Scope
Children []*Scope
Members map[string]ScopeMember
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_ast/css_ast.go | internal/css_ast/css_ast.go | package css_ast
import (
"strconv"
"strings"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/css_lexer"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/logger"
)
// CSS syntax comes in two layers: a minimal syntax that generally accepts
// anything that looks vaguely like CSS, and a large set of built-in rules
// (the things browsers actually interpret). That way CSS parsers can read
// unknown rules and skip over them without having to stop due to errors.
//
// This AST format is mostly just the minimal syntax. It parses unknown rules
// into a tree with enough information that it can write them back out again.
// There are some additional layers of syntax including selectors and @-rules
// which allow for better pretty-printing and minification.
//
// Most of the AST just references ranges of the original file by keeping the
// original "Token" values around from the lexer. This is a memory-efficient
// representation that helps provide good parsing and printing performance.
type AST struct {
Symbols []ast.Symbol
CharFreq *ast.CharFreq
ImportRecords []ast.ImportRecord
Rules []Rule
SourceMapComment logger.Span
ApproximateLineCount int32
LocalSymbols []ast.LocRef
LocalScope map[string]ast.LocRef
GlobalScope map[string]ast.LocRef
Composes map[ast.Ref]*Composes
// These contain all layer names in the file. It can be used to replace the
// layer-related side effects of importing this file. They are split into two
// groups (those before and after "@import" rules) so that the linker can put
// them in the right places.
LayersPreImport [][]string
LayersPostImport [][]string
}
type Composes struct {
// Note that each of these can be either local or global. Local examples:
//
// .foo { composes: bar }
// .bar { color: red }
//
// Global examples:
//
// .foo { composes: bar from global }
// .foo :global { composes: bar }
// .foo { :global { composes: bar } }
// :global .bar { color: red }
//
Names []ast.LocRef
// Each of these is local in another file. For example:
//
// .foo { composes: bar from "bar.css" }
// .foo { composes: bar from url(bar.css) }
//
ImportedNames []ImportedComposesName
// This tracks what CSS properties each class uses so that we can warn when
// "composes" is used incorrectly to compose two classes from separate files
// that declare the same CSS properties.
Properties map[string]logger.Loc
}
type ImportedComposesName struct {
Alias string
AliasLoc logger.Loc
ImportRecordIndex uint32
}
// We create a lot of tokens, so make sure this layout is memory-efficient.
// The layout here isn't optimal because it biases for convenience (e.g.
// "string" could be shorter) but at least the ordering of fields was
// deliberately chosen to minimize size.
type Token struct {
// Contains the child tokens for component values that are simple blocks.
// These are either "(", "{", "[", or function tokens. The closing token is
// implicit and is not stored.
Children *[]Token // 8 bytes
// This is the raw contents of the token most of the time. However, it
// contains the decoded string contents for "TString" tokens.
Text string // 16 bytes
// The source location at the start of the token
Loc logger.Loc // 4 bytes
// URL tokens have an associated import record at the top-level of the AST.
// This index points to that import record.
//
// Symbol tokens have an associated symbol. This index is the "InnerIndex"
// of the "Ref" for this symbol. The "SourceIndex" for the "Ref" is just
// the source index of the file for this AST.
PayloadIndex uint32 // 4 bytes
// The division between the number and the unit for "TDimension" tokens.
UnitOffset uint16 // 2 bytes
// This will never be "TWhitespace" because whitespace isn't stored as a
// token directly. Instead it is stored in "HasWhitespaceAfter" on the
// previous token. This is to make it easier to pattern-match against
// tokens when handling CSS rules, since whitespace almost always doesn't
// matter. That way you can pattern match against e.g. "rgb(r, g, b)" and
// not have to handle all possible combinations of embedded whitespace
// tokens.
//
// There is one exception to this: when in verbatim whitespace mode and
// the token list is non-empty and is only whitespace tokens. In that case
// a single whitespace token is emitted. This is because otherwise there
// would be no tokens to attach the whitespace before/after flags to.
Kind css_lexer.T // 1 byte
// These flags indicate the presence of a "TWhitespace" token before or after
// this token. There should be whitespace printed between two tokens if either
// token indicates that there should be whitespace. Note that whitespace may
// be altered by processing in certain situations (e.g. minification).
Whitespace WhitespaceFlags // 1 byte
}
type WhitespaceFlags uint8
const (
WhitespaceBefore WhitespaceFlags = 1 << iota
WhitespaceAfter
)
// This is necessary when comparing tokens between two different files
type CrossFileEqualityCheck struct {
ImportRecordsA []ast.ImportRecord
ImportRecordsB []ast.ImportRecord
Symbols ast.SymbolMap
SourceIndexA uint32
SourceIndexB uint32
}
func (check *CrossFileEqualityCheck) RefsAreEquivalent(a ast.Ref, b ast.Ref) bool {
if a == b {
return true
}
if check == nil || check.Symbols.SymbolsForSource == nil {
return false
}
a = ast.FollowSymbols(check.Symbols, a)
b = ast.FollowSymbols(check.Symbols, b)
if a == b {
return true
}
symbolA := check.Symbols.Get(a)
symbolB := check.Symbols.Get(b)
return symbolA.Kind == ast.SymbolGlobalCSS && symbolB.Kind == ast.SymbolGlobalCSS && symbolA.OriginalName == symbolB.OriginalName
}
func (a Token) Equal(b Token, check *CrossFileEqualityCheck) bool {
if a.Kind == b.Kind && a.Text == b.Text && a.Whitespace == b.Whitespace {
// URLs should be compared based on the text of the associated import record
// (which is what will actually be printed) instead of the original text
if a.Kind == css_lexer.TURL {
if check == nil {
// If both tokens are in the same file, just compare the index
if a.PayloadIndex != b.PayloadIndex {
return false
}
} else {
// If the tokens come from separate files, compare the import records
// themselves instead of comparing the indices. This can happen when
// the linker runs a "DuplicateRuleRemover" during bundling. This
// doesn't compare the source indices because at this point during
// linking, paths inside the bundle (e.g. due to the "copy" loader)
// should have already been converted into text (e.g. the "unique key"
// string).
if check.ImportRecordsA[a.PayloadIndex].Path.Text !=
check.ImportRecordsB[b.PayloadIndex].Path.Text {
return false
}
}
}
// Symbols should be compared based on the symbol reference instead of the
// original text
if a.Kind == css_lexer.TSymbol {
if check == nil {
// If both tokens are in the same file, just compare the index
if a.PayloadIndex != b.PayloadIndex {
return false
}
} else {
// If the tokens come from separate files, compare the symbols themselves
refA := ast.Ref{SourceIndex: check.SourceIndexA, InnerIndex: a.PayloadIndex}
refB := ast.Ref{SourceIndex: check.SourceIndexB, InnerIndex: b.PayloadIndex}
if !check.RefsAreEquivalent(refA, refB) {
return false
}
}
}
if a.Children == nil && b.Children == nil {
return true
}
if a.Children != nil && b.Children != nil && TokensEqual(*a.Children, *b.Children, check) {
return true
}
}
return false
}
func TokensEqual(a []Token, b []Token, check *CrossFileEqualityCheck) bool {
if len(a) != len(b) {
return false
}
for i, ai := range a {
if !ai.Equal(b[i], check) {
return false
}
}
return true
}
func HashTokens(hash uint32, tokens []Token) uint32 {
hash = helpers.HashCombine(hash, uint32(len(tokens)))
for _, t := range tokens {
hash = helpers.HashCombine(hash, uint32(t.Kind))
if t.Kind != css_lexer.TURL {
hash = helpers.HashCombineString(hash, t.Text)
}
if t.Children != nil {
hash = HashTokens(hash, *t.Children)
}
}
return hash
}
func (a Token) EqualIgnoringWhitespace(b Token) bool {
if a.Kind == b.Kind && a.Text == b.Text && a.PayloadIndex == b.PayloadIndex {
if a.Children == nil && b.Children == nil {
return true
}
if a.Children != nil && b.Children != nil && TokensEqualIgnoringWhitespace(*a.Children, *b.Children) {
return true
}
}
return false
}
func TokensEqualIgnoringWhitespace(a []Token, b []Token) bool {
if len(a) != len(b) {
return false
}
for i, c := range a {
if !c.EqualIgnoringWhitespace(b[i]) {
return false
}
}
return true
}
func TokensAreCommaSeparated(tokens []Token) bool {
if n := len(tokens); (n & 1) != 0 {
for i := 1; i < n; i += 2 {
if tokens[i].Kind != css_lexer.TComma {
return false
}
}
return true
}
return false
}
type PercentageFlags uint8
const (
AllowPercentageBelow0 PercentageFlags = 1 << iota
AllowPercentageAbove100
AllowAnyPercentage = AllowPercentageBelow0 | AllowPercentageAbove100
)
func (t Token) NumberOrFractionForPercentage(percentReferenceRange float64, flags PercentageFlags) (float64, bool) {
switch t.Kind {
case css_lexer.TNumber:
if f, err := strconv.ParseFloat(t.Text, 64); err == nil {
return f, true
}
case css_lexer.TPercentage:
if f, err := strconv.ParseFloat(t.PercentageValue(), 64); err == nil {
if (flags&AllowPercentageBelow0) == 0 && f < 0 {
return 0, true
}
if (flags&AllowPercentageAbove100) == 0 && f > 100 {
return percentReferenceRange, true
}
return f / 100 * percentReferenceRange, true
}
}
return 0, false
}
func (t Token) ClampedFractionForPercentage() (float64, bool) {
if t.Kind == css_lexer.TPercentage {
if f, err := strconv.ParseFloat(t.PercentageValue(), 64); err == nil {
if f < 0 {
return 0, true
}
if f > 100 {
return 1, true
}
return f / 100, true
}
}
return 0, false
}
// https://drafts.csswg.org/css-values-3/#lengths
// For zero lengths the unit identifier is optional
// (i.e. can be syntactically represented as the <number> 0).
func (t *Token) TurnLengthIntoNumberIfZero() bool {
if t.Kind == css_lexer.TDimension && t.DimensionValue() == "0" {
t.Kind = css_lexer.TNumber
t.Text = "0"
return true
}
return false
}
func (t *Token) TurnLengthOrPercentageIntoNumberIfZero() bool {
if t.Kind == css_lexer.TPercentage && t.PercentageValue() == "0" {
t.Kind = css_lexer.TNumber
t.Text = "0"
return true
}
return t.TurnLengthIntoNumberIfZero()
}
func (t Token) PercentageValue() string {
return t.Text[:len(t.Text)-1]
}
func (t Token) DimensionValue() string {
return t.Text[:t.UnitOffset]
}
func (t Token) DimensionUnit() string {
return t.Text[t.UnitOffset:]
}
func (t Token) DimensionUnitIsSafeLength() bool {
switch strings.ToLower(t.DimensionUnit()) {
// These units can be reasonably expected to be supported everywhere.
// Information used: https://developer.mozilla.org/en-US/docs/Web/CSS/length
case "cm", "em", "in", "mm", "pc", "pt", "px":
return true
}
return false
}
func (t Token) IsZero() bool {
return t.Kind == css_lexer.TNumber && t.Text == "0"
}
func (t Token) IsOne() bool {
return t.Kind == css_lexer.TNumber && t.Text == "1"
}
func (t Token) IsAngle() bool {
if t.Kind == css_lexer.TDimension {
unit := strings.ToLower(t.DimensionUnit())
return unit == "deg" || unit == "grad" || unit == "rad" || unit == "turn"
}
return false
}
func CloneTokensWithoutImportRecords(tokensIn []Token) (tokensOut []Token) {
for _, t := range tokensIn {
if t.Children != nil {
children := CloneTokensWithoutImportRecords(*t.Children)
t.Children = &children
}
tokensOut = append(tokensOut, t)
}
return
}
func CloneTokensWithImportRecords(
tokensIn []Token, importRecordsIn []ast.ImportRecord,
tokensOut []Token, importRecordsOut []ast.ImportRecord,
) ([]Token, []ast.ImportRecord) {
// Preallocate the output array if we can
if tokensOut == nil {
tokensOut = make([]Token, 0, len(tokensIn))
}
for _, t := range tokensIn {
// Clear the source mapping if this token is being used in another file
t.Loc.Start = 0
// If this is a URL token, also clone the import record
if t.Kind == css_lexer.TURL {
importRecordIndex := uint32(len(importRecordsOut))
importRecordsOut = append(importRecordsOut, importRecordsIn[t.PayloadIndex])
t.PayloadIndex = importRecordIndex
}
// Also search for URL tokens in this token's children
if t.Children != nil {
var children []Token
children, importRecordsOut = CloneTokensWithImportRecords(*t.Children, importRecordsIn, children, importRecordsOut)
t.Children = &children
}
tokensOut = append(tokensOut, t)
}
return tokensOut, importRecordsOut
}
func CloneMediaQueriesWithImportRecords(
queriesIn []MediaQuery, importRecordsIn []ast.ImportRecord,
queriesOut []MediaQuery, importRecordsOut []ast.ImportRecord,
) ([]MediaQuery, []ast.ImportRecord) {
// Preallocate the output array if we can
if queriesOut == nil {
queriesOut = make([]MediaQuery, 0, len(queriesIn))
}
// Recursively clone each query
for _, query := range queriesIn {
query.Data, importRecordsOut = query.Data.CloneWithImportRecords(importRecordsIn, importRecordsOut)
queriesOut = append(queriesOut, query)
}
return queriesOut, importRecordsOut
}
type Rule struct {
Data R
Loc logger.Loc
}
type R interface {
Equal(rule R, check *CrossFileEqualityCheck) bool
Hash() (uint32, bool)
}
func RulesEqual(a []Rule, b []Rule, check *CrossFileEqualityCheck) bool {
if len(a) != len(b) {
return false
}
for i, ai := range a {
if !ai.Data.Equal(b[i].Data, check) {
return false
}
}
return true
}
func HashRules(hash uint32, rules []Rule) uint32 {
hash = helpers.HashCombine(hash, uint32(len(rules)))
for _, child := range rules {
if childHash, ok := child.Data.Hash(); ok {
hash = helpers.HashCombine(hash, childHash)
} else {
hash = helpers.HashCombine(hash, 0)
}
}
return hash
}
type RAtCharset struct {
Encoding string
}
func (a *RAtCharset) Equal(rule R, check *CrossFileEqualityCheck) bool {
b, ok := rule.(*RAtCharset)
return ok && a.Encoding == b.Encoding
}
func (r *RAtCharset) Hash() (uint32, bool) {
hash := uint32(1)
hash = helpers.HashCombineString(hash, r.Encoding)
return hash, true
}
type ImportConditions struct {
// The syntax for "@import" has been extended with optional conditions that
// behave as if the imported file was wrapped in a "@layer", "@supports",
// and/or "@media" rule. The possible syntax combinations are as follows:
//
// @import url(...);
// @import url(...) layer;
// @import url(...) layer(layer-name);
// @import url(...) layer(layer-name) supports(supports-condition);
// @import url(...) layer(layer-name) supports(supports-condition) list-of-media-queries;
// @import url(...) layer(layer-name) list-of-media-queries;
// @import url(...) supports(supports-condition);
// @import url(...) supports(supports-condition) list-of-media-queries;
// @import url(...) list-of-media-queries;
//
// From: https://developer.mozilla.org/en-US/docs/Web/CSS/@import#syntax
Queries []MediaQuery
// These two fields will only ever have zero or one tokens. However, they are
// implemented as arrays for convenience because most of esbuild's helper
// functions that operate on tokens take arrays instead of individual tokens.
Layers []Token
Supports []Token
}
func (c *ImportConditions) CloneWithImportRecords(importRecordsIn []ast.ImportRecord, importRecordsOut []ast.ImportRecord) (ImportConditions, []ast.ImportRecord) {
result := ImportConditions{}
result.Layers, importRecordsOut = CloneTokensWithImportRecords(c.Layers, importRecordsIn, nil, importRecordsOut)
result.Supports, importRecordsOut = CloneTokensWithImportRecords(c.Supports, importRecordsIn, nil, importRecordsOut)
result.Queries, importRecordsOut = CloneMediaQueriesWithImportRecords(c.Queries, importRecordsIn, nil, importRecordsOut)
return result, importRecordsOut
}
type RAtImport struct {
ImportConditions *ImportConditions
ImportRecordIndex uint32
}
func (*RAtImport) Equal(rule R, check *CrossFileEqualityCheck) bool {
return false
}
func (r *RAtImport) Hash() (uint32, bool) {
return 0, false
}
type RAtKeyframes struct {
AtToken string
Name ast.LocRef
Blocks []KeyframeBlock
CloseBraceLoc logger.Loc
}
type KeyframeBlock struct {
Selectors []string
Rules []Rule
Loc logger.Loc
CloseBraceLoc logger.Loc
}
func (a *RAtKeyframes) Equal(rule R, check *CrossFileEqualityCheck) bool {
if b, ok := rule.(*RAtKeyframes); ok && strings.EqualFold(a.AtToken, b.AtToken) && check.RefsAreEquivalent(a.Name.Ref, b.Name.Ref) && len(a.Blocks) == len(b.Blocks) {
for i, ai := range a.Blocks {
bi := b.Blocks[i]
if len(ai.Selectors) != len(bi.Selectors) {
return false
}
for j, aj := range ai.Selectors {
if aj != bi.Selectors[j] {
return false
}
}
if !RulesEqual(ai.Rules, bi.Rules, check) {
return false
}
}
return true
}
return false
}
func (r *RAtKeyframes) Hash() (uint32, bool) {
hash := uint32(2)
hash = helpers.HashCombineString(hash, r.AtToken)
hash = helpers.HashCombine(hash, uint32(len(r.Blocks)))
for _, block := range r.Blocks {
hash = helpers.HashCombine(hash, uint32(len(block.Selectors)))
for _, sel := range block.Selectors {
hash = helpers.HashCombineString(hash, sel)
}
hash = HashRules(hash, block.Rules)
}
return hash, true
}
type RKnownAt struct {
AtToken string
Prelude []Token
Rules []Rule
CloseBraceLoc logger.Loc
}
func (a *RKnownAt) Equal(rule R, check *CrossFileEqualityCheck) bool {
b, ok := rule.(*RKnownAt)
return ok && strings.EqualFold(a.AtToken, b.AtToken) && TokensEqual(a.Prelude, b.Prelude, check) && RulesEqual(a.Rules, b.Rules, check)
}
func (r *RKnownAt) Hash() (uint32, bool) {
hash := uint32(3)
hash = helpers.HashCombineString(hash, r.AtToken)
hash = HashTokens(hash, r.Prelude)
hash = HashRules(hash, r.Rules)
return hash, true
}
type RUnknownAt struct {
AtToken string
Prelude []Token
Block []Token
}
func (a *RUnknownAt) Equal(rule R, check *CrossFileEqualityCheck) bool {
b, ok := rule.(*RUnknownAt)
return ok && strings.EqualFold(a.AtToken, b.AtToken) && TokensEqual(a.Prelude, b.Prelude, check) && TokensEqual(a.Block, b.Block, check)
}
func (r *RUnknownAt) Hash() (uint32, bool) {
hash := uint32(4)
hash = helpers.HashCombineString(hash, r.AtToken)
hash = HashTokens(hash, r.Prelude)
hash = HashTokens(hash, r.Block)
return hash, true
}
type RSelector struct {
Selectors []ComplexSelector
Rules []Rule
CloseBraceLoc logger.Loc
}
func (a *RSelector) Equal(rule R, check *CrossFileEqualityCheck) bool {
b, ok := rule.(*RSelector)
return ok && ComplexSelectorsEqual(a.Selectors, b.Selectors, check) && RulesEqual(a.Rules, b.Rules, check)
}
func (r *RSelector) Hash() (uint32, bool) {
hash := uint32(5)
hash = helpers.HashCombine(hash, uint32(len(r.Selectors)))
hash = HashComplexSelectors(hash, r.Selectors)
hash = HashRules(hash, r.Rules)
return hash, true
}
type RQualified struct {
Prelude []Token
Rules []Rule
CloseBraceLoc logger.Loc
}
func (a *RQualified) Equal(rule R, check *CrossFileEqualityCheck) bool {
b, ok := rule.(*RQualified)
return ok && TokensEqual(a.Prelude, b.Prelude, check) && RulesEqual(a.Rules, b.Rules, check)
}
func (r *RQualified) Hash() (uint32, bool) {
hash := uint32(6)
hash = HashTokens(hash, r.Prelude)
hash = HashRules(hash, r.Rules)
return hash, true
}
type RDeclaration struct {
KeyText string
Value []Token
KeyRange logger.Range
Key D // Compare using this instead of "Key" for speed
Important bool
}
func (a *RDeclaration) Equal(rule R, check *CrossFileEqualityCheck) bool {
b, ok := rule.(*RDeclaration)
return ok && a.KeyText == b.KeyText && TokensEqual(a.Value, b.Value, check) && a.Important == b.Important
}
func (r *RDeclaration) Hash() (uint32, bool) {
var hash uint32
if r.Key == DUnknown {
if r.Important {
hash = uint32(7)
} else {
hash = uint32(8)
}
hash = helpers.HashCombineString(hash, r.KeyText)
} else {
if r.Important {
hash = uint32(9)
} else {
hash = uint32(10)
}
hash = helpers.HashCombine(hash, uint32(r.Key))
}
hash = HashTokens(hash, r.Value)
return hash, true
}
type RBadDeclaration struct {
Tokens []Token
}
func (a *RBadDeclaration) Equal(rule R, check *CrossFileEqualityCheck) bool {
b, ok := rule.(*RBadDeclaration)
return ok && TokensEqual(a.Tokens, b.Tokens, check)
}
func (r *RBadDeclaration) Hash() (uint32, bool) {
hash := uint32(7)
hash = HashTokens(hash, r.Tokens)
return hash, true
}
type RComment struct {
Text string
}
func (a *RComment) Equal(rule R, check *CrossFileEqualityCheck) bool {
b, ok := rule.(*RComment)
return ok && a.Text == b.Text
}
func (r *RComment) Hash() (uint32, bool) {
hash := uint32(8)
hash = helpers.HashCombineString(hash, r.Text)
return hash, true
}
type RAtLayer struct {
Names [][]string
Rules []Rule
CloseBraceLoc logger.Loc
}
func (a *RAtLayer) Equal(rule R, check *CrossFileEqualityCheck) bool {
if b, ok := rule.(*RAtLayer); ok && len(a.Names) == len(b.Names) && len(a.Rules) == len(b.Rules) {
for i, ai := range a.Names {
bi := b.Names[i]
if len(ai) != len(bi) {
return false
}
for j, aj := range ai {
if aj != bi[j] {
return false
}
}
}
if !RulesEqual(a.Rules, b.Rules, check) {
return false
}
}
return false
}
func (r *RAtLayer) Hash() (uint32, bool) {
hash := uint32(9)
hash = helpers.HashCombine(hash, uint32(len(r.Names)))
for _, parts := range r.Names {
hash = helpers.HashCombine(hash, uint32(len(parts)))
for _, part := range parts {
hash = helpers.HashCombineString(hash, part)
}
}
hash = HashRules(hash, r.Rules)
return hash, true
}
type RAtMedia struct {
Queries []MediaQuery
Rules []Rule
CloseBraceLoc logger.Loc
}
func (a *RAtMedia) Equal(rule R, check *CrossFileEqualityCheck) bool {
b, ok := rule.(*RAtMedia)
return ok && MediaQueriesEqual(a.Queries, b.Queries, check) && RulesEqual(a.Rules, b.Rules, check)
}
func (r *RAtMedia) Hash() (uint32, bool) {
hash := uint32(10)
hash = HashMediaQueries(hash, r.Queries)
hash = HashRules(hash, r.Rules)
return hash, true
}
type MediaQuery struct {
Loc logger.Loc
Data MQ
}
type MQ interface {
Equal(query MQ, check *CrossFileEqualityCheck) bool
EqualIgnoringWhitespace(query MQ) bool
Hash() uint32
CloneWithImportRecords(importRecordsIn []ast.ImportRecord, importRecordsOut []ast.ImportRecord) (MQ, []ast.ImportRecord)
}
func MediaQueriesEqual(a []MediaQuery, b []MediaQuery, check *CrossFileEqualityCheck) bool {
if len(a) != len(b) {
return false
}
for i, ai := range a {
if !ai.Data.Equal(b[i].Data, check) {
return false
}
}
return true
}
func MediaQueriesEqualIgnoringWhitespace(a []MediaQuery, b []MediaQuery) bool {
if len(a) != len(b) {
return false
}
for i, ai := range a {
if !ai.Data.EqualIgnoringWhitespace(b[i].Data) {
return false
}
}
return true
}
func HashMediaQueries(hash uint32, queries []MediaQuery) uint32 {
hash = helpers.HashCombine(hash, uint32(len(queries)))
for _, q := range queries {
hash = helpers.HashCombine(hash, q.Data.Hash())
}
return hash
}
type MQTypeOp uint8
const (
MQTypeOpNone MQTypeOp = iota
MQTypeOpNot
MQTypeOpOnly
)
type MQType struct {
Op MQTypeOp
Type string
AndOrNull MediaQuery
}
func (q *MQType) Equal(query MQ, check *CrossFileEqualityCheck) bool {
if p, ok := query.(*MQType); ok && q.Op == p.Op && q.Type == p.Type {
return (q.AndOrNull.Data == nil && p.AndOrNull.Data == nil) ||
(q.AndOrNull.Data != nil && p.AndOrNull.Data != nil && q.AndOrNull.Data.Equal(p.AndOrNull.Data, check))
}
return false
}
func (q *MQType) EqualIgnoringWhitespace(query MQ) bool {
if p, ok := query.(*MQType); ok && q.Op == p.Op && q.Type == p.Type {
return (q.AndOrNull.Data == nil && p.AndOrNull.Data == nil) ||
(q.AndOrNull.Data != nil && p.AndOrNull.Data != nil && q.AndOrNull.Data.EqualIgnoringWhitespace(p.AndOrNull.Data))
}
return false
}
func (q *MQType) Hash() uint32 {
hash := uint32(0)
hash = helpers.HashCombine(hash, uint32(q.Op))
hash = helpers.HashCombineString(hash, q.Type)
if q.AndOrNull.Data != nil {
hash = helpers.HashCombine(hash, q.AndOrNull.Data.Hash())
}
return hash
}
func (q *MQType) CloneWithImportRecords(importRecordsIn []ast.ImportRecord, importRecordsOut []ast.ImportRecord) (MQ, []ast.ImportRecord) {
var andOrNull MQ
if q.AndOrNull.Data != nil {
andOrNull, importRecordsOut = q.AndOrNull.Data.CloneWithImportRecords(importRecordsIn, importRecordsOut)
}
return &MQType{Op: q.Op, Type: q.Type, AndOrNull: MediaQuery{Data: andOrNull}}, importRecordsOut
}
type MQNot struct {
Inner MediaQuery
}
func (q *MQNot) Equal(query MQ, check *CrossFileEqualityCheck) bool {
p, ok := query.(*MQNot)
return ok && q.Inner.Data.Equal(p.Inner.Data, check)
}
func (q *MQNot) EqualIgnoringWhitespace(query MQ) bool {
p, ok := query.(*MQNot)
return ok && q.Inner.Data.EqualIgnoringWhitespace(p.Inner.Data)
}
func (q *MQNot) Hash() uint32 {
hash := uint32(1)
hash = helpers.HashCombine(hash, q.Inner.Data.Hash())
return hash
}
func (q *MQNot) CloneWithImportRecords(importRecordsIn []ast.ImportRecord, importRecordsOut []ast.ImportRecord) (MQ, []ast.ImportRecord) {
inner, importRecordsOut := q.Inner.Data.CloneWithImportRecords(importRecordsIn, importRecordsOut)
return &MQNot{Inner: MediaQuery{Data: inner}}, importRecordsOut
}
type MQBinaryOp uint8
const (
MQBinaryOpAnd MQBinaryOp = iota
MQBinaryOpOr
)
type MQBinary struct {
Op MQBinaryOp
Terms []MediaQuery
}
func (q *MQBinary) Equal(query MQ, check *CrossFileEqualityCheck) bool {
p, ok := query.(*MQBinary)
return ok && q.Op == p.Op && MediaQueriesEqual(q.Terms, p.Terms, check)
}
func (q *MQBinary) EqualIgnoringWhitespace(query MQ) bool {
p, ok := query.(*MQBinary)
return ok && q.Op == p.Op && MediaQueriesEqualIgnoringWhitespace(q.Terms, p.Terms)
}
func (q *MQBinary) Hash() uint32 {
hash := uint32(2)
hash = helpers.HashCombine(hash, uint32(q.Op))
hash = HashMediaQueries(hash, q.Terms)
return hash
}
func (q *MQBinary) CloneWithImportRecords(importRecordsIn []ast.ImportRecord, importRecordsOut []ast.ImportRecord) (MQ, []ast.ImportRecord) {
terms := make([]MediaQuery, 0, len(q.Terms))
for _, term := range q.Terms {
var clone MQ
clone, importRecordsOut = term.Data.CloneWithImportRecords(importRecordsIn, importRecordsOut)
terms = append(terms, MediaQuery{Data: clone})
}
return &MQBinary{Op: q.Op, Terms: terms}, importRecordsOut
}
type MQArbitraryTokens struct {
Tokens []Token
}
func (q *MQArbitraryTokens) Equal(query MQ, check *CrossFileEqualityCheck) bool {
p, ok := query.(*MQArbitraryTokens)
return ok && TokensEqual(q.Tokens, p.Tokens, check)
}
func (q *MQArbitraryTokens) EqualIgnoringWhitespace(query MQ) bool {
p, ok := query.(*MQArbitraryTokens)
return ok && TokensEqualIgnoringWhitespace(q.Tokens, p.Tokens)
}
func (q *MQArbitraryTokens) Hash() uint32 {
hash := uint32(3)
hash = HashTokens(hash, q.Tokens)
return hash
}
func (q *MQArbitraryTokens) CloneWithImportRecords(importRecordsIn []ast.ImportRecord, importRecordsOut []ast.ImportRecord) (MQ, []ast.ImportRecord) {
tokens, importRecordsOut := CloneTokensWithImportRecords(q.Tokens, importRecordsIn, nil, importRecordsOut)
return &MQArbitraryTokens{Tokens: tokens}, importRecordsOut
}
type MQPlainOrBoolean struct {
Name string
ValueOrNil []Token
}
func (q *MQPlainOrBoolean) Equal(query MQ, check *CrossFileEqualityCheck) bool {
p, ok := query.(*MQPlainOrBoolean)
return ok && q.Name == p.Name && TokensEqual(q.ValueOrNil, p.ValueOrNil, check)
}
func (q *MQPlainOrBoolean) EqualIgnoringWhitespace(query MQ) bool {
p, ok := query.(*MQPlainOrBoolean)
return ok && q.Name == p.Name && TokensEqualIgnoringWhitespace(q.ValueOrNil, p.ValueOrNil)
}
func (q *MQPlainOrBoolean) Hash() uint32 {
hash := uint32(4)
hash = helpers.HashCombineString(hash, q.Name)
hash = HashTokens(hash, q.ValueOrNil)
return hash
}
func (q *MQPlainOrBoolean) CloneWithImportRecords(importRecordsIn []ast.ImportRecord, importRecordsOut []ast.ImportRecord) (MQ, []ast.ImportRecord) {
var valueOrNil []Token
if q.ValueOrNil != nil {
valueOrNil, importRecordsOut = CloneTokensWithImportRecords(q.ValueOrNil, importRecordsIn, nil, importRecordsOut)
}
return &MQPlainOrBoolean{Name: q.Name, ValueOrNil: valueOrNil}, importRecordsOut
}
type MQRange struct {
Before []Token
Name string
After []Token
NameLoc logger.Loc
BeforeCmp MQCmp
AfterCmp MQCmp
}
func (q *MQRange) Equal(query MQ, check *CrossFileEqualityCheck) bool {
p, ok := query.(*MQRange)
return ok && q.BeforeCmp == p.BeforeCmp && q.AfterCmp == p.AfterCmp && q.Name == p.Name &&
TokensEqual(q.Before, p.Before, check) && TokensEqual(q.After, p.After, check)
}
func (q *MQRange) EqualIgnoringWhitespace(query MQ) bool {
p, ok := query.(*MQRange)
return ok && q.BeforeCmp == p.BeforeCmp && q.AfterCmp == p.AfterCmp && q.Name == p.Name &&
TokensEqualIgnoringWhitespace(q.Before, p.Before) && TokensEqualIgnoringWhitespace(q.After, p.After)
}
func (q *MQRange) Hash() uint32 {
hash := uint32(5)
hash = HashTokens(hash, q.Before)
hash = helpers.HashCombine(hash, uint32(q.BeforeCmp))
hash = helpers.HashCombineString(hash, q.Name)
hash = helpers.HashCombine(hash, uint32(q.AfterCmp))
hash = HashTokens(hash, q.After)
return hash
}
func (q *MQRange) CloneWithImportRecords(importRecordsIn []ast.ImportRecord, importRecordsOut []ast.ImportRecord) (MQ, []ast.ImportRecord) {
before, importRecordsOut := CloneTokensWithImportRecords(q.Before, importRecordsIn, nil, importRecordsOut)
after, importRecordsOut := CloneTokensWithImportRecords(q.After, importRecordsIn, nil, importRecordsOut)
return &MQRange{
Before: before,
BeforeCmp: q.BeforeCmp,
Name: q.Name,
AfterCmp: q.AfterCmp,
After: after,
}, importRecordsOut
}
type MQCmp uint8
const (
MQCmpNone MQCmp = iota
MQCmpEq
MQCmpLt
MQCmpLe
MQCmpGt
MQCmpGe
)
func (cmp MQCmp) String() string {
switch cmp {
case MQCmpLt:
return "<"
case MQCmpLe:
return "<="
case MQCmpGt:
return ">"
case MQCmpGe:
return ">="
}
return "="
}
func (cmp MQCmp) Dir() int {
switch cmp {
case MQCmpLt, MQCmpLe:
return -1
case MQCmpGt, MQCmpGe:
return 1
}
return 0
}
func (cmp MQCmp) Flip() MQCmp {
switch cmp {
case MQCmpLt:
return MQCmpGe
case MQCmpLe:
return MQCmpGt
case MQCmpGt:
return MQCmpLe
case MQCmpGe:
return MQCmpLt
}
return cmp
}
func (cmp MQCmp) Reverse() MQCmp {
switch cmp {
case MQCmpLt:
return MQCmpGt
case MQCmpLe:
return MQCmpGe
case MQCmpGt:
return MQCmpLt
case MQCmpGe:
return MQCmpLe
}
return cmp
}
type ComplexSelector struct {
Selectors []CompoundSelector
}
func ComplexSelectorsEqual(a []ComplexSelector, b []ComplexSelector, check *CrossFileEqualityCheck) bool {
if len(a) != len(b) {
return false
}
for i, ai := range a {
if !ai.Equal(b[i], check) {
return false
}
}
return true
}
func HashComplexSelectors(hash uint32, selectors []ComplexSelector) uint32 {
for _, complex := range selectors {
hash = helpers.HashCombine(hash, uint32(len(complex.Selectors)))
for _, sel := range complex.Selectors {
if sel.TypeSelector != nil {
hash = helpers.HashCombineString(hash, sel.TypeSelector.Name.Text)
} else {
hash = helpers.HashCombine(hash, 0)
}
hash = helpers.HashCombine(hash, uint32(len(sel.SubclassSelectors)))
for _, ss := range sel.SubclassSelectors {
hash = helpers.HashCombine(hash, ss.Data.Hash())
}
hash = helpers.HashCombine(hash, uint32(sel.Combinator.Byte))
}
}
return hash
}
func (s ComplexSelector) Clone() ComplexSelector {
clone := ComplexSelector{Selectors: make([]CompoundSelector, len(s.Selectors))}
for i, sel := range s.Selectors {
clone.Selectors[i] = sel.Clone()
}
return clone
}
func (sel ComplexSelector) ContainsNestingCombinator() bool {
for _, inner := range sel.Selectors {
if len(inner.NestingSelectorLocs) > 0 {
return true
}
for _, ss := range inner.SubclassSelectors {
if pseudo, ok := ss.Data.(*SSPseudoClassWithSelectorList); ok {
for _, nested := range pseudo.Selectors {
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/css_ast/css_decl_table.go | internal/css_ast/css_decl_table.go | package css_ast
import (
"strings"
"sync"
"github.com/evanw/esbuild/internal/helpers"
)
type D uint16
const (
DUnknown D = iota
DAlignContent
DAlignItems
DAlignSelf
DAlignmentBaseline
DAll
DAnimation
DAnimationDelay
DAnimationDirection
DAnimationDuration
DAnimationFillMode
DAnimationIterationCount
DAnimationName
DAnimationPlayState
DAnimationTimingFunction
DAppearance
DBackdropFilter
DBackfaceVisibility
DBackground
DBackgroundAttachment
DBackgroundClip
DBackgroundColor
DBackgroundImage
DBackgroundOrigin
DBackgroundPosition
DBackgroundPositionX
DBackgroundPositionY
DBackgroundRepeat
DBackgroundSize
DBaselineShift
DBlockSize
DBorder
DBorderBlockEnd
DBorderBlockEndColor
DBorderBlockEndStyle
DBorderBlockEndWidth
DBorderBlockStart
DBorderBlockStartColor
DBorderBlockStartStyle
DBorderBlockStartWidth
DBorderBottom
DBorderBottomColor
DBorderBottomLeftRadius
DBorderBottomRightRadius
DBorderBottomStyle
DBorderBottomWidth
DBorderCollapse
DBorderColor
DBorderImage
DBorderImageOutset
DBorderImageRepeat
DBorderImageSlice
DBorderImageSource
DBorderImageWidth
DBorderInlineEnd
DBorderInlineEndColor
DBorderInlineEndStyle
DBorderInlineEndWidth
DBorderInlineStart
DBorderInlineStartColor
DBorderInlineStartStyle
DBorderInlineStartWidth
DBorderLeft
DBorderLeftColor
DBorderLeftStyle
DBorderLeftWidth
DBorderRadius
DBorderRight
DBorderRightColor
DBorderRightStyle
DBorderRightWidth
DBorderSpacing
DBorderStyle
DBorderTop
DBorderTopColor
DBorderTopLeftRadius
DBorderTopRightRadius
DBorderTopStyle
DBorderTopWidth
DBorderWidth
DBottom
DBoxDecorationBreak
DBoxShadow
DBoxSizing
DBreakAfter
DBreakBefore
DBreakInside
DCaptionSide
DCaretColor
DClear
DClip
DClipPath
DClipRule
DColor
DColorInterpolation
DColorInterpolationFilters
DColumnCount
DColumnFill
DColumnGap
DColumnRule
DColumnRuleColor
DColumnRuleStyle
DColumnRuleWidth
DColumnSpan
DColumnWidth
DColumns
DComposes
DContainer
DContainerName
DContainerType
DContent
DCounterIncrement
DCounterReset
DCssFloat
DCssText
DCursor
DDirection
DDisplay
DDominantBaseline
DEmptyCells
DFill
DFillOpacity
DFillRule
DFilter
DFlex
DFlexBasis
DFlexDirection
DFlexFlow
DFlexGrow
DFlexShrink
DFlexWrap
DFloat
DFloodColor
DFloodOpacity
DFont
DFontFamily
DFontFeatureSettings
DFontKerning
DFontSize
DFontSizeAdjust
DFontStretch
DFontStyle
DFontSynthesis
DFontVariant
DFontVariantCaps
DFontVariantEastAsian
DFontVariantLigatures
DFontVariantNumeric
DFontVariantPosition
DFontWeight
DGap
DGlyphOrientationVertical
DGrid
DGridArea
DGridAutoColumns
DGridAutoFlow
DGridAutoRows
DGridColumn
DGridColumnEnd
DGridColumnGap
DGridColumnStart
DGridGap
DGridRow
DGridRowEnd
DGridRowGap
DGridRowStart
DGridTemplate
DGridTemplateAreas
DGridTemplateColumns
DGridTemplateRows
DHeight
DHyphens
DImageOrientation
DImageRendering
DInitialLetter
DInlineSize
DInset
DJustifyContent
DJustifyItems
DJustifySelf
DLeft
DLetterSpacing
DLightingColor
DLineBreak
DLineHeight
DListStyle
DListStyleImage
DListStylePosition
DListStyleType
DMargin
DMarginBlockEnd
DMarginBlockStart
DMarginBottom
DMarginInlineEnd
DMarginInlineStart
DMarginLeft
DMarginRight
DMarginTop
DMarker
DMarkerEnd
DMarkerMid
DMarkerStart
DMask
DMaskComposite
DMaskImage
DMaskOrigin
DMaskPosition
DMaskRepeat
DMaskSize
DMaskType
DMaxBlockSize
DMaxHeight
DMaxInlineSize
DMaxWidth
DMinBlockSize
DMinHeight
DMinInlineSize
DMinWidth
DObjectFit
DObjectPosition
DOpacity
DOrder
DOrphans
DOutline
DOutlineColor
DOutlineOffset
DOutlineStyle
DOutlineWidth
DOverflow
DOverflowAnchor
DOverflowWrap
DOverflowX
DOverflowY
DOverscrollBehavior
DOverscrollBehaviorBlock
DOverscrollBehaviorInline
DOverscrollBehaviorX
DOverscrollBehaviorY
DPadding
DPaddingBlockEnd
DPaddingBlockStart
DPaddingBottom
DPaddingInlineEnd
DPaddingInlineStart
DPaddingLeft
DPaddingRight
DPaddingTop
DPageBreakAfter
DPageBreakBefore
DPageBreakInside
DPaintOrder
DPerspective
DPerspectiveOrigin
DPlaceContent
DPlaceItems
DPlaceSelf
DPointerEvents
DPosition
DPrintColorAdjust
DQuotes
DResize
DRight
DRotate
DRowGap
DRubyAlign
DRubyPosition
DScale
DScrollBehavior
DShapeRendering
DStopColor
DStopOpacity
DStroke
DStrokeDasharray
DStrokeDashoffset
DStrokeLinecap
DStrokeLinejoin
DStrokeMiterlimit
DStrokeOpacity
DStrokeWidth
DTabSize
DTableLayout
DTextAlign
DTextAlignLast
DTextAnchor
DTextCombineUpright
DTextDecoration
DTextDecorationColor
DTextDecorationLine
DTextDecorationSkip
DTextDecorationStyle
DTextEmphasis
DTextEmphasisColor
DTextEmphasisPosition
DTextEmphasisStyle
DTextIndent
DTextJustify
DTextOrientation
DTextOverflow
DTextRendering
DTextShadow
DTextSizeAdjust
DTextTransform
DTextUnderlinePosition
DTop
DTouchAction
DTransform
DTransformBox
DTransformOrigin
DTransformStyle
DTransition
DTransitionDelay
DTransitionDuration
DTransitionProperty
DTransitionTimingFunction
DTranslate
DUnicodeBidi
DUserSelect
DVerticalAlign
DVisibility
DWhiteSpace
DWidows
DWidth
DWillChange
DWordBreak
DWordSpacing
DWordWrap
DWritingMode
DZIndex
DZoom
)
var KnownDeclarations = map[string]D{
"align-content": DAlignContent,
"align-items": DAlignItems,
"align-self": DAlignSelf,
"alignment-baseline": DAlignmentBaseline,
"all": DAll,
"animation": DAnimation,
"animation-delay": DAnimationDelay,
"animation-direction": DAnimationDirection,
"animation-duration": DAnimationDuration,
"animation-fill-mode": DAnimationFillMode,
"animation-iteration-count": DAnimationIterationCount,
"animation-name": DAnimationName,
"animation-play-state": DAnimationPlayState,
"animation-timing-function": DAnimationTimingFunction,
"appearance": DAppearance,
"backdrop-filter": DBackdropFilter,
"backface-visibility": DBackfaceVisibility,
"background": DBackground,
"background-attachment": DBackgroundAttachment,
"background-clip": DBackgroundClip,
"background-color": DBackgroundColor,
"background-image": DBackgroundImage,
"background-origin": DBackgroundOrigin,
"background-position": DBackgroundPosition,
"background-position-x": DBackgroundPositionX,
"background-position-y": DBackgroundPositionY,
"background-repeat": DBackgroundRepeat,
"background-size": DBackgroundSize,
"baseline-shift": DBaselineShift,
"block-size": DBlockSize,
"border": DBorder,
"border-block-end": DBorderBlockEnd,
"border-block-end-color": DBorderBlockEndColor,
"border-block-end-style": DBorderBlockEndStyle,
"border-block-end-width": DBorderBlockEndWidth,
"border-block-start": DBorderBlockStart,
"border-block-start-color": DBorderBlockStartColor,
"border-block-start-style": DBorderBlockStartStyle,
"border-block-start-width": DBorderBlockStartWidth,
"border-bottom": DBorderBottom,
"border-bottom-color": DBorderBottomColor,
"border-bottom-left-radius": DBorderBottomLeftRadius,
"border-bottom-right-radius": DBorderBottomRightRadius,
"border-bottom-style": DBorderBottomStyle,
"border-bottom-width": DBorderBottomWidth,
"border-collapse": DBorderCollapse,
"border-color": DBorderColor,
"border-image": DBorderImage,
"border-image-outset": DBorderImageOutset,
"border-image-repeat": DBorderImageRepeat,
"border-image-slice": DBorderImageSlice,
"border-image-source": DBorderImageSource,
"border-image-width": DBorderImageWidth,
"border-inline-end": DBorderInlineEnd,
"border-inline-end-color": DBorderInlineEndColor,
"border-inline-end-style": DBorderInlineEndStyle,
"border-inline-end-width": DBorderInlineEndWidth,
"border-inline-start": DBorderInlineStart,
"border-inline-start-color": DBorderInlineStartColor,
"border-inline-start-style": DBorderInlineStartStyle,
"border-inline-start-width": DBorderInlineStartWidth,
"border-left": DBorderLeft,
"border-left-color": DBorderLeftColor,
"border-left-style": DBorderLeftStyle,
"border-left-width": DBorderLeftWidth,
"border-radius": DBorderRadius,
"border-right": DBorderRight,
"border-right-color": DBorderRightColor,
"border-right-style": DBorderRightStyle,
"border-right-width": DBorderRightWidth,
"border-spacing": DBorderSpacing,
"border-style": DBorderStyle,
"border-top": DBorderTop,
"border-top-color": DBorderTopColor,
"border-top-left-radius": DBorderTopLeftRadius,
"border-top-right-radius": DBorderTopRightRadius,
"border-top-style": DBorderTopStyle,
"border-top-width": DBorderTopWidth,
"border-width": DBorderWidth,
"bottom": DBottom,
"box-decoration-break": DBoxDecorationBreak,
"box-shadow": DBoxShadow,
"box-sizing": DBoxSizing,
"break-after": DBreakAfter,
"break-before": DBreakBefore,
"break-inside": DBreakInside,
"caption-side": DCaptionSide,
"caret-color": DCaretColor,
"clear": DClear,
"clip": DClip,
"clip-path": DClipPath,
"clip-rule": DClipRule,
"color": DColor,
"color-interpolation": DColorInterpolation,
"color-interpolation-filters": DColorInterpolationFilters,
"column-count": DColumnCount,
"column-fill": DColumnFill,
"column-gap": DColumnGap,
"column-rule": DColumnRule,
"column-rule-color": DColumnRuleColor,
"column-rule-style": DColumnRuleStyle,
"column-rule-width": DColumnRuleWidth,
"column-span": DColumnSpan,
"column-width": DColumnWidth,
"columns": DColumns,
"composes": DComposes,
"container": DContainer,
"container-name": DContainerName,
"container-type": DContainerType,
"content": DContent,
"counter-increment": DCounterIncrement,
"counter-reset": DCounterReset,
"css-float": DCssFloat,
"css-text": DCssText,
"cursor": DCursor,
"direction": DDirection,
"display": DDisplay,
"dominant-baseline": DDominantBaseline,
"empty-cells": DEmptyCells,
"fill": DFill,
"fill-opacity": DFillOpacity,
"fill-rule": DFillRule,
"filter": DFilter,
"flex": DFlex,
"flex-basis": DFlexBasis,
"flex-direction": DFlexDirection,
"flex-flow": DFlexFlow,
"flex-grow": DFlexGrow,
"flex-shrink": DFlexShrink,
"flex-wrap": DFlexWrap,
"float": DFloat,
"flood-color": DFloodColor,
"flood-opacity": DFloodOpacity,
"font": DFont,
"font-family": DFontFamily,
"font-feature-settings": DFontFeatureSettings,
"font-kerning": DFontKerning,
"font-size": DFontSize,
"font-size-adjust": DFontSizeAdjust,
"font-stretch": DFontStretch,
"font-style": DFontStyle,
"font-synthesis": DFontSynthesis,
"font-variant": DFontVariant,
"font-variant-caps": DFontVariantCaps,
"font-variant-east-asian": DFontVariantEastAsian,
"font-variant-ligatures": DFontVariantLigatures,
"font-variant-numeric": DFontVariantNumeric,
"font-variant-position": DFontVariantPosition,
"font-weight": DFontWeight,
"gap": DGap,
"glyph-orientation-vertical": DGlyphOrientationVertical,
"grid": DGrid,
"grid-area": DGridArea,
"grid-auto-columns": DGridAutoColumns,
"grid-auto-flow": DGridAutoFlow,
"grid-auto-rows": DGridAutoRows,
"grid-column": DGridColumn,
"grid-column-end": DGridColumnEnd,
"grid-column-gap": DGridColumnGap,
"grid-column-start": DGridColumnStart,
"grid-gap": DGridGap,
"grid-row": DGridRow,
"grid-row-end": DGridRowEnd,
"grid-row-gap": DGridRowGap,
"grid-row-start": DGridRowStart,
"grid-template": DGridTemplate,
"grid-template-areas": DGridTemplateAreas,
"grid-template-columns": DGridTemplateColumns,
"grid-template-rows": DGridTemplateRows,
"height": DHeight,
"hyphens": DHyphens,
"image-orientation": DImageOrientation,
"image-rendering": DImageRendering,
"initial-letter": DInitialLetter,
"inline-size": DInlineSize,
"inset": DInset,
"justify-content": DJustifyContent,
"justify-items": DJustifyItems,
"justify-self": DJustifySelf,
"left": DLeft,
"letter-spacing": DLetterSpacing,
"lighting-color": DLightingColor,
"line-break": DLineBreak,
"line-height": DLineHeight,
"list-style": DListStyle,
"list-style-image": DListStyleImage,
"list-style-position": DListStylePosition,
"list-style-type": DListStyleType,
"margin": DMargin,
"margin-block-end": DMarginBlockEnd,
"margin-block-start": DMarginBlockStart,
"margin-bottom": DMarginBottom,
"margin-inline-end": DMarginInlineEnd,
"margin-inline-start": DMarginInlineStart,
"margin-left": DMarginLeft,
"margin-right": DMarginRight,
"margin-top": DMarginTop,
"marker": DMarker,
"marker-end": DMarkerEnd,
"marker-mid": DMarkerMid,
"marker-start": DMarkerStart,
"mask": DMask,
"mask-composite": DMaskComposite,
"mask-image": DMaskImage,
"mask-origin": DMaskOrigin,
"mask-position": DMaskPosition,
"mask-repeat": DMaskRepeat,
"mask-size": DMaskSize,
"mask-type": DMaskType,
"max-block-size": DMaxBlockSize,
"max-height": DMaxHeight,
"max-inline-size": DMaxInlineSize,
"max-width": DMaxWidth,
"min-block-size": DMinBlockSize,
"min-height": DMinHeight,
"min-inline-size": DMinInlineSize,
"min-width": DMinWidth,
"object-fit": DObjectFit,
"object-position": DObjectPosition,
"opacity": DOpacity,
"order": DOrder,
"orphans": DOrphans,
"outline": DOutline,
"outline-color": DOutlineColor,
"outline-offset": DOutlineOffset,
"outline-style": DOutlineStyle,
"outline-width": DOutlineWidth,
"overflow": DOverflow,
"overflow-anchor": DOverflowAnchor,
"overflow-wrap": DOverflowWrap,
"overflow-x": DOverflowX,
"overflow-y": DOverflowY,
"overscroll-behavior": DOverscrollBehavior,
"overscroll-behavior-block": DOverscrollBehaviorBlock,
"overscroll-behavior-inline": DOverscrollBehaviorInline,
"overscroll-behavior-x": DOverscrollBehaviorX,
"overscroll-behavior-y": DOverscrollBehaviorY,
"padding": DPadding,
"padding-block-end": DPaddingBlockEnd,
"padding-block-start": DPaddingBlockStart,
"padding-bottom": DPaddingBottom,
"padding-inline-end": DPaddingInlineEnd,
"padding-inline-start": DPaddingInlineStart,
"padding-left": DPaddingLeft,
"padding-right": DPaddingRight,
"padding-top": DPaddingTop,
"page-break-after": DPageBreakAfter,
"page-break-before": DPageBreakBefore,
"page-break-inside": DPageBreakInside,
"paint-order": DPaintOrder,
"perspective": DPerspective,
"perspective-origin": DPerspectiveOrigin,
"place-content": DPlaceContent,
"place-items": DPlaceItems,
"place-self": DPlaceSelf,
"pointer-events": DPointerEvents,
"position": DPosition,
"print-color-adjust": DPrintColorAdjust,
"quotes": DQuotes,
"resize": DResize,
"right": DRight,
"rotate": DRotate,
"row-gap": DRowGap,
"ruby-align": DRubyAlign,
"ruby-position": DRubyPosition,
"scale": DScale,
"scroll-behavior": DScrollBehavior,
"shape-rendering": DShapeRendering,
"stop-color": DStopColor,
"stop-opacity": DStopOpacity,
"stroke": DStroke,
"stroke-dasharray": DStrokeDasharray,
"stroke-dashoffset": DStrokeDashoffset,
"stroke-linecap": DStrokeLinecap,
"stroke-linejoin": DStrokeLinejoin,
"stroke-miterlimit": DStrokeMiterlimit,
"stroke-opacity": DStrokeOpacity,
"stroke-width": DStrokeWidth,
"tab-size": DTabSize,
"table-layout": DTableLayout,
"text-align": DTextAlign,
"text-align-last": DTextAlignLast,
"text-anchor": DTextAnchor,
"text-combine-upright": DTextCombineUpright,
"text-decoration": DTextDecoration,
"text-decoration-color": DTextDecorationColor,
"text-decoration-line": DTextDecorationLine,
"text-decoration-skip": DTextDecorationSkip,
"text-decoration-style": DTextDecorationStyle,
"text-emphasis": DTextEmphasis,
"text-emphasis-color": DTextEmphasisColor,
"text-emphasis-position": DTextEmphasisPosition,
"text-emphasis-style": DTextEmphasisStyle,
"text-indent": DTextIndent,
"text-justify": DTextJustify,
"text-orientation": DTextOrientation,
"text-overflow": DTextOverflow,
"text-rendering": DTextRendering,
"text-shadow": DTextShadow,
"text-size-adjust": DTextSizeAdjust,
"text-transform": DTextTransform,
"text-underline-position": DTextUnderlinePosition,
"top": DTop,
"touch-action": DTouchAction,
"transform": DTransform,
"transform-box": DTransformBox,
"transform-origin": DTransformOrigin,
"transform-style": DTransformStyle,
"transition": DTransition,
"transition-delay": DTransitionDelay,
"transition-duration": DTransitionDuration,
"transition-property": DTransitionProperty,
"transition-timing-function": DTransitionTimingFunction,
"translate": DTranslate,
"unicode-bidi": DUnicodeBidi,
"user-select": DUserSelect,
"vertical-align": DVerticalAlign,
"visibility": DVisibility,
"white-space": DWhiteSpace,
"widows": DWidows,
"width": DWidth,
"will-change": DWillChange,
"word-break": DWordBreak,
"word-spacing": DWordSpacing,
"word-wrap": DWordWrap,
"writing-mode": DWritingMode,
"z-index": DZIndex,
"zoom": DZoom,
}
var typoDetector *helpers.TypoDetector
var typoDetectorMutex sync.Mutex
func MaybeCorrectDeclarationTypo(text string) (string, bool) {
// Ignore CSS variables, which should not be corrected to CSS properties
if strings.HasPrefix(text, "--") {
return "", false
}
typoDetectorMutex.Lock()
defer typoDetectorMutex.Unlock()
// Lazily-initialize the typo detector for speed when it's not needed
if typoDetector == nil {
valid := make([]string, 0, len(KnownDeclarations))
for key := range KnownDeclarations {
valid = append(valid, key)
}
detector := helpers.MakeTypoDetector(valid)
typoDetector = &detector
}
return typoDetector.MaybeCorrectTypo(text)
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/config/globals.go | internal/config/globals.go | package config
import (
"math"
"sync"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
)
var processedGlobalsMutex sync.Mutex
var processedGlobals *ProcessedDefines
// If something is in this list, then a direct identifier expression or property
// access chain matching this will be assumed to have no side effects and will
// be removed.
//
// This also means code is allowed to be reordered past things in this list. For
// example, if "console.log" is in this list, permitting reordering allows for
// "if (a) console.log(b); else console.log(c)" to be reordered and transformed
// into "console.log(a ? b : c)". Notice that "a" and "console.log" are in a
// different order, which can only happen if evaluating the "console.log"
// property access can be assumed to not change the value of "a".
//
// Note that membership in this list says nothing about whether calling any of
// these functions has any side effects. It only says something about
// referencing these function without calling them.
var knownGlobals = [][]string{
// Array: Static methods
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array#static_methods
{"Array", "from"},
{"Array", "fromAsync"},
{"Array", "isArray"},
{"Array", "of"},
// RegExp: Static methods
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/RegExp#static_methods
{"RegExp", "escape"},
// Map: Static methods
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Map#static_methods
{"Map", "groupBy"},
// Object: Static methods
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object#static_methods
{"Object", "assign"},
{"Object", "create"},
{"Object", "defineProperties"},
{"Object", "defineProperty"},
{"Object", "entries"},
{"Object", "freeze"},
{"Object", "fromEntries"},
{"Object", "getOwnPropertyDescriptor"},
{"Object", "getOwnPropertyDescriptors"},
{"Object", "getOwnPropertyNames"},
{"Object", "getOwnPropertySymbols"},
{"Object", "getPrototypeOf"},
{"Object", "is"},
{"Object", "isExtensible"},
{"Object", "isFrozen"},
{"Object", "isSealed"},
{"Object", "keys"},
{"Object", "preventExtensions"},
{"Object", "seal"},
{"Object", "setPrototypeOf"},
{"Object", "values"},
// Object: Instance methods
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object#instance_methods
{"Object", "prototype", "__defineGetter__"},
{"Object", "prototype", "__defineSetter__"},
{"Object", "prototype", "__lookupGetter__"},
{"Object", "prototype", "__lookupSetter__"},
{"Object", "prototype", "hasOwnProperty"},
{"Object", "prototype", "isPrototypeOf"},
{"Object", "prototype", "propertyIsEnumerable"},
{"Object", "prototype", "toLocaleString"},
{"Object", "prototype", "toString"},
{"Object", "prototype", "unwatch"},
{"Object", "prototype", "valueOf"},
{"Object", "prototype", "watch"},
// Symbol: Static properties
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Symbol#static_properties
{"Symbol", "asyncDispose"},
{"Symbol", "asyncIterator"},
{"Symbol", "dispose"},
{"Symbol", "hasInstance"},
{"Symbol", "isConcatSpreadable"},
{"Symbol", "iterator"},
{"Symbol", "match"},
{"Symbol", "matchAll"},
{"Symbol", "replace"},
{"Symbol", "search"},
{"Symbol", "species"},
{"Symbol", "split"},
{"Symbol", "toPrimitive"},
{"Symbol", "toStringTag"},
{"Symbol", "unscopables"},
// Math: Static properties
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math#static_properties
{"Math", "E"},
{"Math", "LN10"},
{"Math", "LN2"},
{"Math", "LOG10E"},
{"Math", "LOG2E"},
{"Math", "PI"},
{"Math", "SQRT1_2"},
{"Math", "SQRT2"},
// Math: Static methods
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math#static_methods
{"Math", "abs"},
{"Math", "acos"},
{"Math", "acosh"},
{"Math", "asin"},
{"Math", "asinh"},
{"Math", "atan"},
{"Math", "atan2"},
{"Math", "atanh"},
{"Math", "cbrt"},
{"Math", "ceil"},
{"Math", "clz32"},
{"Math", "cos"},
{"Math", "cosh"},
{"Math", "exp"},
{"Math", "expm1"},
{"Math", "floor"},
{"Math", "fround"},
{"Math", "hypot"},
{"Math", "imul"},
{"Math", "log"},
{"Math", "log10"},
{"Math", "log1p"},
{"Math", "log2"},
{"Math", "max"},
{"Math", "min"},
{"Math", "pow"},
{"Math", "random"},
{"Math", "round"},
{"Math", "sign"},
{"Math", "sin"},
{"Math", "sinh"},
{"Math", "sqrt"},
{"Math", "tan"},
{"Math", "tanh"},
{"Math", "trunc"},
// Reflect: Static methods
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Reflect#static_methods
{"Reflect", "apply"},
{"Reflect", "construct"},
{"Reflect", "defineProperty"},
{"Reflect", "deleteProperty"},
{"Reflect", "get"},
{"Reflect", "getOwnPropertyDescriptor"},
{"Reflect", "getPrototypeOf"},
{"Reflect", "has"},
{"Reflect", "isExtensible"},
{"Reflect", "ownKeys"},
{"Reflect", "preventExtensions"},
{"Reflect", "set"},
{"Reflect", "setPrototypeOf"},
// JSON: Static methods
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON#static_methods
{"JSON", "parse"},
{"JSON", "stringify"},
// TypedArray: Static methods
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray#static_methods
{"BigInt64Array", "from"},
{"BigInt64Array", "of"},
{"BigUint64Array", "from"},
{"BigUint64Array", "of"},
{"Float16Array", "from"},
{"Float16Array", "of"},
{"Float32Array", "from"},
{"Float32Array", "of"},
{"Float64Array", "from"},
{"Float64Array", "of"},
{"Int16Array", "from"},
{"Int16Array", "of"},
{"Int32Array", "from"},
{"Int32Array", "of"},
{"Int8Array", "from"},
{"Int8Array", "of"},
{"Uint16Array", "from"},
{"Uint16Array", "of"},
{"Uint32Array", "from"},
{"Uint32Array", "of"},
{"Uint8Array", "from"},
{"Uint8Array", "fromBase64"},
{"Uint8Array", "fromHex"},
{"Uint8Array", "of"},
{"Uint8ClampedArray", "from"},
{"Uint8ClampedArray", "of"},
// Other globals present in both the browser and node. This should include at
// least the following properties:
// https://tc39.es/ecma262/multipage/global-object.html#sec-constructor-properties-of-the-global-object
//
// Exceptions:
// - Don't include "eval" because it has special behavior
// - Don't include "NaN", "Infinity", and "undefined" because esbuild treats
// these as automatically-inlined constants instead of identifiers
{"AbortController"},
{"AbortSignal"},
{"AggregateError"},
{"Array"},
{"ArrayBuffer"},
{"Atomics"},
{"BigInt"},
{"BigInt64Array"},
{"BigUint64Array"},
{"Boolean"},
{"DataView"},
{"Date"},
{"Error"},
{"EvalError"},
{"Event"},
{"EventTarget"},
{"FinalizationRegistry"},
{"Float16Array"},
{"Float32Array"},
{"Float64Array"},
{"Function"},
{"Int16Array"},
{"Int32Array"},
{"Int8Array"},
{"Intl"},
{"Iterator"},
{"JSON"},
{"Map"},
{"Math"},
{"MessageChannel"},
{"MessageEvent"},
{"MessagePort"},
{"Number"},
{"Object"},
{"Promise"},
{"Proxy"},
{"RangeError"},
{"ReferenceError"},
{"Reflect"},
{"RegExp"},
{"Set"},
{"SharedArrayBuffer"},
{"String"},
{"Symbol"},
{"SyntaxError"},
{"TextDecoder"},
{"TextEncoder"},
{"TypeError"},
{"URIError"},
{"URL"},
{"URLSearchParams"},
{"Uint16Array"},
{"Uint32Array"},
{"Uint8Array"},
{"Uint8ClampedArray"},
{"WeakMap"},
{"WeakRef"},
{"WeakSet"},
{"WebAssembly"},
{"clearInterval"},
{"clearTimeout"},
{"console"},
{"decodeURI"},
{"decodeURIComponent"},
{"encodeURI"},
{"encodeURIComponent"},
{"escape"},
{"globalThis"},
{"isFinite"},
{"isNaN"},
{"parseFloat"},
{"parseInt"},
{"queueMicrotask"},
{"setInterval"},
{"setTimeout"},
{"unescape"},
// Console method references are assumed to have no side effects
// https://developer.mozilla.org/en-US/docs/Web/API/console
{"console", "assert"},
{"console", "clear"},
{"console", "count"},
{"console", "countReset"},
{"console", "debug"},
{"console", "dir"},
{"console", "dirxml"},
{"console", "error"},
{"console", "group"},
{"console", "groupCollapsed"},
{"console", "groupEnd"},
{"console", "info"},
{"console", "log"},
{"console", "table"},
{"console", "time"},
{"console", "timeEnd"},
{"console", "timeLog"},
{"console", "trace"},
{"console", "warn"},
// CSSOM APIs
{"CSSAnimation"},
{"CSSFontFaceRule"},
{"CSSImportRule"},
{"CSSKeyframeRule"},
{"CSSKeyframesRule"},
{"CSSMediaRule"},
{"CSSNamespaceRule"},
{"CSSPageRule"},
{"CSSRule"},
{"CSSRuleList"},
{"CSSStyleDeclaration"},
{"CSSStyleRule"},
{"CSSStyleSheet"},
{"CSSSupportsRule"},
{"CSSTransition"},
// SVG DOM
{"SVGAElement"},
{"SVGAngle"},
{"SVGAnimateElement"},
{"SVGAnimateMotionElement"},
{"SVGAnimateTransformElement"},
{"SVGAnimatedAngle"},
{"SVGAnimatedBoolean"},
{"SVGAnimatedEnumeration"},
{"SVGAnimatedInteger"},
{"SVGAnimatedLength"},
{"SVGAnimatedLengthList"},
{"SVGAnimatedNumber"},
{"SVGAnimatedNumberList"},
{"SVGAnimatedPreserveAspectRatio"},
{"SVGAnimatedRect"},
{"SVGAnimatedString"},
{"SVGAnimatedTransformList"},
{"SVGAnimationElement"},
{"SVGCircleElement"},
{"SVGClipPathElement"},
{"SVGComponentTransferFunctionElement"},
{"SVGDefsElement"},
{"SVGDescElement"},
{"SVGElement"},
{"SVGEllipseElement"},
{"SVGFEBlendElement"},
{"SVGFEColorMatrixElement"},
{"SVGFEComponentTransferElement"},
{"SVGFECompositeElement"},
{"SVGFEConvolveMatrixElement"},
{"SVGFEDiffuseLightingElement"},
{"SVGFEDisplacementMapElement"},
{"SVGFEDistantLightElement"},
{"SVGFEDropShadowElement"},
{"SVGFEFloodElement"},
{"SVGFEFuncAElement"},
{"SVGFEFuncBElement"},
{"SVGFEFuncGElement"},
{"SVGFEFuncRElement"},
{"SVGFEGaussianBlurElement"},
{"SVGFEImageElement"},
{"SVGFEMergeElement"},
{"SVGFEMergeNodeElement"},
{"SVGFEMorphologyElement"},
{"SVGFEOffsetElement"},
{"SVGFEPointLightElement"},
{"SVGFESpecularLightingElement"},
{"SVGFESpotLightElement"},
{"SVGFETileElement"},
{"SVGFETurbulenceElement"},
{"SVGFilterElement"},
{"SVGForeignObjectElement"},
{"SVGGElement"},
{"SVGGeometryElement"},
{"SVGGradientElement"},
{"SVGGraphicsElement"},
{"SVGImageElement"},
{"SVGLength"},
{"SVGLengthList"},
{"SVGLineElement"},
{"SVGLinearGradientElement"},
{"SVGMPathElement"},
{"SVGMarkerElement"},
{"SVGMaskElement"},
{"SVGMatrix"},
{"SVGMetadataElement"},
{"SVGNumber"},
{"SVGNumberList"},
{"SVGPathElement"},
{"SVGPatternElement"},
{"SVGPoint"},
{"SVGPointList"},
{"SVGPolygonElement"},
{"SVGPolylineElement"},
{"SVGPreserveAspectRatio"},
{"SVGRadialGradientElement"},
{"SVGRect"},
{"SVGRectElement"},
{"SVGSVGElement"},
{"SVGScriptElement"},
{"SVGSetElement"},
{"SVGStopElement"},
{"SVGStringList"},
{"SVGStyleElement"},
{"SVGSwitchElement"},
{"SVGSymbolElement"},
{"SVGTSpanElement"},
{"SVGTextContentElement"},
{"SVGTextElement"},
{"SVGTextPathElement"},
{"SVGTextPositioningElement"},
{"SVGTitleElement"},
{"SVGTransform"},
{"SVGTransformList"},
{"SVGUnitTypes"},
{"SVGUseElement"},
{"SVGViewElement"},
// Other browser APIs
//
// This list contains all globals present in modern versions of Chrome, Safari,
// and Firefox except for the following properties, since they have a side effect
// of triggering layout (https://gist.github.com/paulirish/5d52fb081b3570c81e3a):
//
// - scrollX
// - scrollY
// - innerWidth
// - innerHeight
// - pageXOffset
// - pageYOffset
//
// The following globals have also been removed since they sometimes throw an
// exception when accessed, which is a side effect (for more information see
// https://stackoverflow.com/a/33047477):
//
// - localStorage
// - sessionStorage
//
{"AnalyserNode"},
{"Animation"},
{"AnimationEffect"},
{"AnimationEvent"},
{"AnimationPlaybackEvent"},
{"AnimationTimeline"},
{"Attr"},
{"Audio"},
{"AudioBuffer"},
{"AudioBufferSourceNode"},
{"AudioDestinationNode"},
{"AudioListener"},
{"AudioNode"},
{"AudioParam"},
{"AudioProcessingEvent"},
{"AudioScheduledSourceNode"},
{"BarProp"},
{"BeforeUnloadEvent"},
{"BiquadFilterNode"},
{"Blob"},
{"BlobEvent"},
{"ByteLengthQueuingStrategy"},
{"CDATASection"},
{"CSS"},
{"CanvasGradient"},
{"CanvasPattern"},
{"CanvasRenderingContext2D"},
{"ChannelMergerNode"},
{"ChannelSplitterNode"},
{"CharacterData"},
{"ClipboardEvent"},
{"CloseEvent"},
{"Comment"},
{"CompositionEvent"},
{"ConvolverNode"},
{"CountQueuingStrategy"},
{"Crypto"},
{"CustomElementRegistry"},
{"CustomEvent"},
{"DOMException"},
{"DOMImplementation"},
{"DOMMatrix"},
{"DOMMatrixReadOnly"},
{"DOMParser"},
{"DOMPoint"},
{"DOMPointReadOnly"},
{"DOMQuad"},
{"DOMRect"},
{"DOMRectList"},
{"DOMRectReadOnly"},
{"DOMStringList"},
{"DOMStringMap"},
{"DOMTokenList"},
{"DataTransfer"},
{"DataTransferItem"},
{"DataTransferItemList"},
{"DelayNode"},
{"Document"},
{"DocumentFragment"},
{"DocumentTimeline"},
{"DocumentType"},
{"DragEvent"},
{"DynamicsCompressorNode"},
{"Element"},
{"ErrorEvent"},
{"EventSource"},
{"File"},
{"FileList"},
{"FileReader"},
{"FocusEvent"},
{"FontFace"},
{"FormData"},
{"GainNode"},
{"Gamepad"},
{"GamepadButton"},
{"GamepadEvent"},
{"Geolocation"},
{"GeolocationPositionError"},
{"HTMLAllCollection"},
{"HTMLAnchorElement"},
{"HTMLAreaElement"},
{"HTMLAudioElement"},
{"HTMLBRElement"},
{"HTMLBaseElement"},
{"HTMLBodyElement"},
{"HTMLButtonElement"},
{"HTMLCanvasElement"},
{"HTMLCollection"},
{"HTMLDListElement"},
{"HTMLDataElement"},
{"HTMLDataListElement"},
{"HTMLDetailsElement"},
{"HTMLDirectoryElement"},
{"HTMLDivElement"},
{"HTMLDocument"},
{"HTMLElement"},
{"HTMLEmbedElement"},
{"HTMLFieldSetElement"},
{"HTMLFontElement"},
{"HTMLFormControlsCollection"},
{"HTMLFormElement"},
{"HTMLFrameElement"},
{"HTMLFrameSetElement"},
{"HTMLHRElement"},
{"HTMLHeadElement"},
{"HTMLHeadingElement"},
{"HTMLHtmlElement"},
{"HTMLIFrameElement"},
{"HTMLImageElement"},
{"HTMLInputElement"},
{"HTMLLIElement"},
{"HTMLLabelElement"},
{"HTMLLegendElement"},
{"HTMLLinkElement"},
{"HTMLMapElement"},
{"HTMLMarqueeElement"},
{"HTMLMediaElement"},
{"HTMLMenuElement"},
{"HTMLMetaElement"},
{"HTMLMeterElement"},
{"HTMLModElement"},
{"HTMLOListElement"},
{"HTMLObjectElement"},
{"HTMLOptGroupElement"},
{"HTMLOptionElement"},
{"HTMLOptionsCollection"},
{"HTMLOutputElement"},
{"HTMLParagraphElement"},
{"HTMLParamElement"},
{"HTMLPictureElement"},
{"HTMLPreElement"},
{"HTMLProgressElement"},
{"HTMLQuoteElement"},
{"HTMLScriptElement"},
{"HTMLSelectElement"},
{"HTMLSlotElement"},
{"HTMLSourceElement"},
{"HTMLSpanElement"},
{"HTMLStyleElement"},
{"HTMLTableCaptionElement"},
{"HTMLTableCellElement"},
{"HTMLTableColElement"},
{"HTMLTableElement"},
{"HTMLTableRowElement"},
{"HTMLTableSectionElement"},
{"HTMLTemplateElement"},
{"HTMLTextAreaElement"},
{"HTMLTimeElement"},
{"HTMLTitleElement"},
{"HTMLTrackElement"},
{"HTMLUListElement"},
{"HTMLUnknownElement"},
{"HTMLVideoElement"},
{"HashChangeEvent"},
{"Headers"},
{"History"},
{"IDBCursor"},
{"IDBCursorWithValue"},
{"IDBDatabase"},
{"IDBFactory"},
{"IDBIndex"},
{"IDBKeyRange"},
{"IDBObjectStore"},
{"IDBOpenDBRequest"},
{"IDBRequest"},
{"IDBTransaction"},
{"IDBVersionChangeEvent"},
{"Image"},
{"ImageData"},
{"InputEvent"},
{"IntersectionObserver"},
{"IntersectionObserverEntry"},
{"KeyboardEvent"},
{"KeyframeEffect"},
{"Location"},
{"MediaCapabilities"},
{"MediaElementAudioSourceNode"},
{"MediaEncryptedEvent"},
{"MediaError"},
{"MediaList"},
{"MediaQueryList"},
{"MediaQueryListEvent"},
{"MediaRecorder"},
{"MediaSource"},
{"MediaStream"},
{"MediaStreamAudioDestinationNode"},
{"MediaStreamAudioSourceNode"},
{"MediaStreamTrack"},
{"MediaStreamTrackEvent"},
{"MimeType"},
{"MimeTypeArray"},
{"MouseEvent"},
{"MutationEvent"},
{"MutationObserver"},
{"MutationRecord"},
{"NamedNodeMap"},
{"Navigator"},
{"Node"},
{"NodeFilter"},
{"NodeIterator"},
{"NodeList"},
{"Notification"},
{"OfflineAudioCompletionEvent"},
{"Option"},
{"OscillatorNode"},
{"PageTransitionEvent"},
{"Path2D"},
{"Performance"},
{"PerformanceEntry"},
{"PerformanceMark"},
{"PerformanceMeasure"},
{"PerformanceNavigation"},
{"PerformanceObserver"},
{"PerformanceObserverEntryList"},
{"PerformanceResourceTiming"},
{"PerformanceTiming"},
{"PeriodicWave"},
{"Plugin"},
{"PluginArray"},
{"PointerEvent"},
{"PopStateEvent"},
{"ProcessingInstruction"},
{"ProgressEvent"},
{"PromiseRejectionEvent"},
{"RTCCertificate"},
{"RTCDTMFSender"},
{"RTCDTMFToneChangeEvent"},
{"RTCDataChannel"},
{"RTCDataChannelEvent"},
{"RTCIceCandidate"},
{"RTCPeerConnection"},
{"RTCPeerConnectionIceEvent"},
{"RTCRtpReceiver"},
{"RTCRtpSender"},
{"RTCRtpTransceiver"},
{"RTCSessionDescription"},
{"RTCStatsReport"},
{"RTCTrackEvent"},
{"RadioNodeList"},
{"Range"},
{"ReadableStream"},
{"Request"},
{"ResizeObserver"},
{"ResizeObserverEntry"},
{"Response"},
{"Screen"},
{"ScriptProcessorNode"},
{"SecurityPolicyViolationEvent"},
{"Selection"},
{"ShadowRoot"},
{"SourceBuffer"},
{"SourceBufferList"},
{"SpeechSynthesisEvent"},
{"SpeechSynthesisUtterance"},
{"StaticRange"},
{"Storage"},
{"StorageEvent"},
{"StyleSheet"},
{"StyleSheetList"},
{"Text"},
{"TextMetrics"},
{"TextTrack"},
{"TextTrackCue"},
{"TextTrackCueList"},
{"TextTrackList"},
{"TimeRanges"},
{"TrackEvent"},
{"TransitionEvent"},
{"TreeWalker"},
{"UIEvent"},
{"VTTCue"},
{"ValidityState"},
{"VisualViewport"},
{"WaveShaperNode"},
{"WebGLActiveInfo"},
{"WebGLBuffer"},
{"WebGLContextEvent"},
{"WebGLFramebuffer"},
{"WebGLProgram"},
{"WebGLQuery"},
{"WebGLRenderbuffer"},
{"WebGLRenderingContext"},
{"WebGLSampler"},
{"WebGLShader"},
{"WebGLShaderPrecisionFormat"},
{"WebGLSync"},
{"WebGLTexture"},
{"WebGLUniformLocation"},
{"WebKitCSSMatrix"},
{"WebSocket"},
{"WheelEvent"},
{"Window"},
{"Worker"},
{"XMLDocument"},
{"XMLHttpRequest"},
{"XMLHttpRequestEventTarget"},
{"XMLHttpRequestUpload"},
{"XMLSerializer"},
{"XPathEvaluator"},
{"XPathExpression"},
{"XPathResult"},
{"XSLTProcessor"},
{"alert"},
{"atob"},
{"blur"},
{"btoa"},
{"cancelAnimationFrame"},
{"captureEvents"},
{"close"},
{"closed"},
{"confirm"},
{"customElements"},
{"devicePixelRatio"},
{"document"},
{"event"},
{"fetch"},
{"find"},
{"focus"},
{"frameElement"},
{"frames"},
{"getComputedStyle"},
{"getSelection"},
{"history"},
{"indexedDB"},
{"isSecureContext"},
{"length"},
{"location"},
{"locationbar"},
{"matchMedia"},
{"menubar"},
{"moveBy"},
{"moveTo"},
{"name"},
{"navigator"},
{"onabort"},
{"onafterprint"},
{"onanimationend"},
{"onanimationiteration"},
{"onanimationstart"},
{"onbeforeprint"},
{"onbeforeunload"},
{"onblur"},
{"oncanplay"},
{"oncanplaythrough"},
{"onchange"},
{"onclick"},
{"oncontextmenu"},
{"oncuechange"},
{"ondblclick"},
{"ondrag"},
{"ondragend"},
{"ondragenter"},
{"ondragleave"},
{"ondragover"},
{"ondragstart"},
{"ondrop"},
{"ondurationchange"},
{"onemptied"},
{"onended"},
{"onerror"},
{"onfocus"},
{"ongotpointercapture"},
{"onhashchange"},
{"oninput"},
{"oninvalid"},
{"onkeydown"},
{"onkeypress"},
{"onkeyup"},
{"onlanguagechange"},
{"onload"},
{"onloadeddata"},
{"onloadedmetadata"},
{"onloadstart"},
{"onlostpointercapture"},
{"onmessage"},
{"onmousedown"},
{"onmouseenter"},
{"onmouseleave"},
{"onmousemove"},
{"onmouseout"},
{"onmouseover"},
{"onmouseup"},
{"onoffline"},
{"ononline"},
{"onpagehide"},
{"onpageshow"},
{"onpause"},
{"onplay"},
{"onplaying"},
{"onpointercancel"},
{"onpointerdown"},
{"onpointerenter"},
{"onpointerleave"},
{"onpointermove"},
{"onpointerout"},
{"onpointerover"},
{"onpointerup"},
{"onpopstate"},
{"onprogress"},
{"onratechange"},
{"onrejectionhandled"},
{"onreset"},
{"onresize"},
{"onscroll"},
{"onseeked"},
{"onseeking"},
{"onselect"},
{"onstalled"},
{"onstorage"},
{"onsubmit"},
{"onsuspend"},
{"ontimeupdate"},
{"ontoggle"},
{"ontransitioncancel"},
{"ontransitionend"},
{"ontransitionrun"},
{"ontransitionstart"},
{"onunhandledrejection"},
{"onunload"},
{"onvolumechange"},
{"onwaiting"},
{"onwebkitanimationend"},
{"onwebkitanimationiteration"},
{"onwebkitanimationstart"},
{"onwebkittransitionend"},
{"onwheel"},
{"open"},
{"opener"},
{"origin"},
{"outerHeight"},
{"outerWidth"},
{"parent"},
{"performance"},
{"personalbar"},
{"postMessage"},
{"print"},
{"prompt"},
{"releaseEvents"},
{"requestAnimationFrame"},
{"resizeBy"},
{"resizeTo"},
{"screen"},
{"screenLeft"},
{"screenTop"},
{"screenX"},
{"screenY"},
{"scroll"},
{"scrollBy"},
{"scrollTo"},
{"scrollbars"},
{"self"},
{"speechSynthesis"},
{"status"},
{"statusbar"},
{"stop"},
{"toolbar"},
{"top"},
{"webkitURL"},
{"window"},
}
// We currently only support compile-time replacement with certain expressions:
//
// - Primitive literals
// - Identifiers
// - "Entity names" which are identifiers followed by property accesses
//
// We don't support arbitrary expressions because arbitrary expressions may
// require the full AST. For example, there could be "import()" or "require()"
// expressions that need an import record. We also need to re-generate some
// nodes such as identifiers within the injected context so that they can
// bind to symbols in that context. Other expressions such as "this" may
// also be contextual.
type DefineExpr struct {
Constant js_ast.E
Parts []string
InjectedDefineIndex ast.Index32
}
type DefineData struct {
KeyParts []string
DefineExpr *DefineExpr
Flags DefineFlags
}
type DefineFlags uint8
const (
// True if accessing this value is known to not have any side effects. For
// example, a bare reference to "Object.create" can be removed because it
// does not have any observable side effects.
CanBeRemovedIfUnused DefineFlags = 1 << iota
// True if a call to this value is known to not have any side effects. For
// example, a bare call to "Object()" can be removed because it does not
// have any observable side effects.
CallCanBeUnwrappedIfUnused
// If true, the user has indicated that every direct calls to a property on
// this object and all of that call's arguments are to be removed from the
// output, even when the arguments have side effects. This is used to
// implement the "--drop:console" flag.
MethodCallsMustBeReplacedWithUndefined
// Symbol values are known to not have side effects when used as property
// names in class declarations and object literals.
IsSymbolInstance
)
func (flags DefineFlags) Has(flag DefineFlags) bool {
return (flags & flag) != 0
}
func mergeDefineData(old DefineData, new DefineData) DefineData {
new.Flags |= old.Flags
return new
}
type ProcessedDefines struct {
IdentifierDefines map[string]DefineData
DotDefines map[string][]DefineData
}
// This transformation is expensive, so we only want to do it once. Make sure
// to only call processDefines() once per compilation. Unfortunately Golang
// doesn't have an efficient way to copy a map and the overhead of copying
// all of the properties into a new map once for every new parser noticeably
// slows down our benchmarks.
func ProcessDefines(userDefines []DefineData) ProcessedDefines {
// Optimization: reuse known globals if there are no user-specified defines
hasUserDefines := len(userDefines) != 0
if !hasUserDefines {
processedGlobalsMutex.Lock()
if processedGlobals != nil {
defer processedGlobalsMutex.Unlock()
return *processedGlobals
}
processedGlobalsMutex.Unlock()
}
result := ProcessedDefines{
IdentifierDefines: make(map[string]DefineData),
DotDefines: make(map[string][]DefineData),
}
// Mark these property accesses as free of side effects. That means they can
// be removed if their result is unused. We can't just remove all unused
// property accesses since property accesses can have side effects. For
// example, the property access "a.b.c" has the side effect of throwing an
// exception if "a.b" is undefined.
for _, parts := range knownGlobals {
tail := parts[len(parts)-1]
if len(parts) == 1 {
result.IdentifierDefines[tail] = DefineData{Flags: CanBeRemovedIfUnused}
} else {
flags := CanBeRemovedIfUnused
// All properties on the "Symbol" global are currently symbol instances
// (i.e. "typeof Symbol.iterator === 'symbol'"). This is used to avoid
// treating properties with these names as having side effects.
if parts[0] == "Symbol" {
flags |= IsSymbolInstance
}
result.DotDefines[tail] = append(result.DotDefines[tail], DefineData{KeyParts: parts, Flags: flags})
}
}
// Swap in certain literal values because those can be constant folded
result.IdentifierDefines["undefined"] = DefineData{
DefineExpr: &DefineExpr{Constant: js_ast.EUndefinedShared},
}
result.IdentifierDefines["NaN"] = DefineData{
DefineExpr: &DefineExpr{Constant: &js_ast.ENumber{Value: math.NaN()}},
}
result.IdentifierDefines["Infinity"] = DefineData{
DefineExpr: &DefineExpr{Constant: &js_ast.ENumber{Value: math.Inf(1)}},
}
// Then copy the user-specified defines in afterwards, which will overwrite
// any known globals above.
for _, data := range userDefines {
// Identifier defines are special-cased
if len(data.KeyParts) == 1 {
name := data.KeyParts[0]
result.IdentifierDefines[name] = mergeDefineData(result.IdentifierDefines[name], data)
continue
}
tail := data.KeyParts[len(data.KeyParts)-1]
dotDefines := result.DotDefines[tail]
found := false
// Try to merge with existing dot defines first
for i, define := range dotDefines {
if helpers.StringArraysEqual(data.KeyParts, define.KeyParts) {
dotDefines[i] = mergeDefineData(dotDefines[i], data)
found = true
break
}
}
if !found {
dotDefines = append(dotDefines, data)
}
result.DotDefines[tail] = dotDefines
}
// Potentially cache the result for next time
if !hasUserDefines {
processedGlobalsMutex.Lock()
defer processedGlobalsMutex.Unlock()
if processedGlobals == nil {
processedGlobals = &result
}
}
return result
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/config/config.go | internal/config/config.go | package config
import (
"fmt"
"regexp"
"strings"
"sync"
"sync/atomic"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/css_ast"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/logger"
)
type JSXOptions struct {
Factory DefineExpr
Fragment DefineExpr
Parse bool
Preserve bool
AutomaticRuntime bool
ImportSource string
Development bool
SideEffects bool
}
type TSJSX uint8
const (
TSJSXNone TSJSX = iota
TSJSXPreserve
TSJSXReactNative
TSJSXReact
TSJSXReactJSX
TSJSXReactJSXDev
)
type TSOptions struct {
Config TSConfig
Parse bool
NoAmbiguousLessThan bool
}
type TSConfigJSX struct {
// If not empty, these should override the default values
JSXFactory []string // Default if empty: "React.createElement"
JSXFragmentFactory []string // Default if empty: "React.Fragment"
JSXImportSource *string // Default if empty: "react"
JSX TSJSX
}
// This is used for "extends" in "tsconfig.json"
func (derived *TSConfigJSX) ApplyExtendedConfig(base TSConfigJSX) {
if base.JSXFactory != nil {
derived.JSXFactory = base.JSXFactory
}
if base.JSXFragmentFactory != nil {
derived.JSXFragmentFactory = base.JSXFragmentFactory
}
if base.JSXImportSource != nil {
derived.JSXImportSource = base.JSXImportSource
}
if base.JSX != TSJSXNone {
derived.JSX = base.JSX
}
}
func (tsConfig *TSConfigJSX) ApplyTo(jsxOptions *JSXOptions) {
switch tsConfig.JSX {
case TSJSXPreserve, TSJSXReactNative:
// Deliberately don't set "Preserve = true" here. Some tools from Vercel
// apparently automatically set "jsx": "preserve" in "tsconfig.json" and
// people are then confused when esbuild preserves their JSX. Ignoring this
// value means you now have to explicitly pass "--jsx=preserve" to esbuild
// to get this behavior.
case TSJSXReact:
jsxOptions.AutomaticRuntime = false
jsxOptions.Development = false
case TSJSXReactJSX:
jsxOptions.AutomaticRuntime = true
// Deliberately don't set "Development = false" here. People want to be
// able to have "react-jsx" in their "tsconfig.json" file and then swap
// that to "react-jsxdev" by passing "--jsx-dev" to esbuild.
case TSJSXReactJSXDev:
jsxOptions.AutomaticRuntime = true
jsxOptions.Development = true
}
if len(tsConfig.JSXFactory) > 0 {
jsxOptions.Factory = DefineExpr{Parts: tsConfig.JSXFactory}
}
if len(tsConfig.JSXFragmentFactory) > 0 {
jsxOptions.Fragment = DefineExpr{Parts: tsConfig.JSXFragmentFactory}
}
if tsConfig.JSXImportSource != nil {
jsxOptions.ImportSource = *tsConfig.JSXImportSource
}
}
// Note: This can currently only contain primitive values. It's compared
// for equality using a structural equality comparison by the JS parser.
type TSConfig struct {
ExperimentalDecorators MaybeBool
ImportsNotUsedAsValues TSImportsNotUsedAsValues
PreserveValueImports MaybeBool
Target TSTarget
UseDefineForClassFields MaybeBool
VerbatimModuleSyntax MaybeBool
}
// This is used for "extends" in "tsconfig.json"
func (derived *TSConfig) ApplyExtendedConfig(base TSConfig) {
if base.ExperimentalDecorators != Unspecified {
derived.ExperimentalDecorators = base.ExperimentalDecorators
}
if base.ImportsNotUsedAsValues != TSImportsNotUsedAsValues_None {
derived.ImportsNotUsedAsValues = base.ImportsNotUsedAsValues
}
if base.PreserveValueImports != Unspecified {
derived.PreserveValueImports = base.PreserveValueImports
}
if base.Target != TSTargetUnspecified {
derived.Target = base.Target
}
if base.UseDefineForClassFields != Unspecified {
derived.UseDefineForClassFields = base.UseDefineForClassFields
}
if base.VerbatimModuleSyntax != Unspecified {
derived.VerbatimModuleSyntax = base.VerbatimModuleSyntax
}
}
func (cfg *TSConfig) UnusedImportFlags() (flags TSUnusedImportFlags) {
if cfg.VerbatimModuleSyntax == True {
return TSUnusedImport_KeepStmt | TSUnusedImport_KeepValues
}
if cfg.PreserveValueImports == True {
flags |= TSUnusedImport_KeepValues
}
if cfg.ImportsNotUsedAsValues == TSImportsNotUsedAsValues_Preserve || cfg.ImportsNotUsedAsValues == TSImportsNotUsedAsValues_Error {
flags |= TSUnusedImport_KeepStmt
}
return
}
type Platform uint8
const (
PlatformBrowser Platform = iota
PlatformNode
PlatformNeutral
)
type SourceMap uint8
const (
SourceMapNone SourceMap = iota
SourceMapInline
SourceMapLinkedWithComment
SourceMapExternalWithoutComment
SourceMapInlineAndExternal
)
type LegalComments uint8
const (
LegalCommentsInline LegalComments = iota
LegalCommentsNone
LegalCommentsEndOfFile
LegalCommentsLinkedWithComment
LegalCommentsExternalWithoutComment
)
func (lc LegalComments) HasExternalFile() bool {
return lc == LegalCommentsLinkedWithComment || lc == LegalCommentsExternalWithoutComment
}
type Loader uint8
const (
LoaderNone Loader = iota
LoaderBase64
LoaderBinary
LoaderCopy
LoaderCSS
LoaderDataURL
LoaderDefault
LoaderEmpty
LoaderFile
LoaderGlobalCSS
LoaderJS
LoaderJSON
LoaderWithTypeJSON // Has a "with { type: 'json' }" attribute
LoaderJSX
LoaderLocalCSS
LoaderText
LoaderTS
LoaderTSNoAmbiguousLessThan // Used with ".mts" and ".cts"
LoaderTSX
)
var LoaderToString = []string{
"none",
"base64",
"binary",
"copy",
"css",
"dataurl",
"default",
"empty",
"file",
"global-css",
"js",
"json",
"json",
"jsx",
"local-css",
"text",
"ts",
"ts",
"tsx",
}
func (loader Loader) IsTypeScript() bool {
switch loader {
case LoaderTS, LoaderTSNoAmbiguousLessThan, LoaderTSX:
return true
}
return false
}
func (loader Loader) IsCSS() bool {
switch loader {
case
LoaderCSS, LoaderGlobalCSS, LoaderLocalCSS:
return true
}
return false
}
func (loader Loader) CanHaveSourceMap() bool {
switch loader {
case
LoaderJS, LoaderJSX,
LoaderTS, LoaderTSNoAmbiguousLessThan, LoaderTSX,
LoaderCSS, LoaderGlobalCSS, LoaderLocalCSS,
LoaderJSON, LoaderWithTypeJSON, LoaderText:
return true
}
return false
}
func LoaderFromFileExtension(extensionToLoader map[string]Loader, base string) Loader {
// Pick the loader with the longest matching extension. So if there's an
// extension for ".css" and for ".module.css", we want to match the one for
// ".module.css" before the one for ".css".
if i := strings.IndexByte(base, '.'); i != -1 {
for {
if loader, ok := extensionToLoader[base[i:]]; ok {
return loader
}
base = base[i+1:]
i = strings.IndexByte(base, '.')
if i == -1 {
break
}
}
} else {
// If there's no extension, explicitly check for an extensionless loader
if loader, ok := extensionToLoader[""]; ok {
return loader
}
}
return LoaderNone
}
type Format uint8
const (
// This is used when not bundling. It means to preserve whatever form the
// import or export was originally in. ES6 syntax stays ES6 syntax and
// CommonJS syntax stays CommonJS syntax.
FormatPreserve Format = iota
// IIFE stands for immediately-invoked function expression. That looks like
// this:
//
// (() => {
// ... bundled code ...
// })();
//
// If the optional GlobalName is configured, then we'll write out this:
//
// let globalName = (() => {
// ... bundled code ...
// return exports;
// })();
//
FormatIIFE
// The CommonJS format looks like this:
//
// ... bundled code ...
// module.exports = exports;
//
FormatCommonJS
// The ES module format looks like this:
//
// ... bundled code ...
// export {...};
//
FormatESModule
)
func (f Format) KeepESMImportExportSyntax() bool {
return f == FormatPreserve || f == FormatESModule
}
func (f Format) String() string {
switch f {
case FormatIIFE:
return "iife"
case FormatCommonJS:
return "cjs"
case FormatESModule:
return "esm"
}
return ""
}
type StdinInfo struct {
Contents string
SourceFile string
AbsResolveDir string
Loader Loader
}
type WildcardPattern struct {
Prefix string
Suffix string
}
type ExternalMatchers struct {
Exact map[string]bool
Patterns []WildcardPattern
}
func (matchers ExternalMatchers) HasMatchers() bool {
return len(matchers.Exact) > 0 || len(matchers.Patterns) > 0
}
type ExternalSettings struct {
PreResolve ExternalMatchers
PostResolve ExternalMatchers
}
type APICall uint8
const (
BuildCall APICall = iota
TransformCall
)
type Mode uint8
const (
ModePassThrough Mode = iota
ModeConvertFormat
ModeBundle
)
type MaybeBool uint8
const (
Unspecified MaybeBool = iota
True
False
)
type CancelFlag struct {
uint32
}
func (flag *CancelFlag) Cancel() {
atomic.StoreUint32(&flag.uint32, 1)
}
// This checks for nil in one place so we don't have to do that everywhere
func (flag *CancelFlag) DidCancel() bool {
return flag != nil && atomic.LoadUint32(&flag.uint32) != 0
}
type Options struct {
ModuleTypeData js_ast.ModuleTypeData
Defines *ProcessedDefines
TSAlwaysStrict *TSAlwaysStrict
MangleProps *regexp.Regexp
ReserveProps *regexp.Regexp
CancelFlag *CancelFlag
// When mangling property names, call this function with a callback and do
// the property name mangling inside the callback. The callback takes an
// argument which is the mangle cache map to mutate. These callbacks are
// serialized so mutating the map does not require extra synchronization.
//
// This is a callback for determinism reasons. We may be building multiple
// entry points in parallel that are supposed to share a single cache. We
// don't want the order that each entry point mangles properties in to cause
// the output to change, so we serialize the property mangling over all entry
// points in entry point order. However, we still want to link everything in
// parallel so only property mangling is serialized, which is implemented by
// this function blocking until the previous entry point's property mangling
// has finished.
ExclusiveMangleCacheUpdate func(cb func(
mangleCache map[string]interface{},
cssUsedLocalNames map[string]bool,
))
// This is the original information that was used to generate the
// unsupported feature sets above. It's used for error messages.
OriginalTargetEnv string
DropLabels []string
ExtensionOrder []string
MainFields []string
Conditions []string
AbsNodePaths []string // The "NODE_PATH" variable from Node.js
ExternalSettings ExternalSettings
ExternalPackages bool
PackageAliases map[string]string
AbsOutputFile string
AbsOutputDir string
AbsOutputBase string
OutputExtensionJS string
OutputExtensionCSS string
GlobalName []string
TSConfigPath string
TSConfigRaw string
ExtensionToLoader map[string]Loader
PublicPath string
InjectPaths []string
InjectedDefines []InjectedDefine
InjectedFiles []InjectedFile
JSBanner string
JSFooter string
CSSBanner string
CSSFooter string
EntryPathTemplate []PathTemplate
ChunkPathTemplate []PathTemplate
AssetPathTemplate []PathTemplate
Plugins []Plugin
SourceRoot string
Stdin *StdinInfo
JSX JSXOptions
LineLimit int
CSSPrefixData map[css_ast.D]compat.CSSPrefix
UnsupportedJSFeatures compat.JSFeature
UnsupportedCSSFeatures compat.CSSFeature
UnsupportedJSFeatureOverrides compat.JSFeature
UnsupportedJSFeatureOverridesMask compat.JSFeature
UnsupportedCSSFeatureOverrides compat.CSSFeature
UnsupportedCSSFeatureOverridesMask compat.CSSFeature
TS TSOptions
Mode Mode
PreserveSymlinks bool
MinifyWhitespace bool
MinifyIdentifiers bool
MinifySyntax bool
ProfilerNames bool
CodeSplitting bool
WatchMode bool
AllowOverwrite bool
LegalComments LegalComments
LogPathStyle logger.PathStyle
CodePathStyle logger.PathStyle
MetafilePathStyle logger.PathStyle
SourcemapPathStyle logger.PathStyle
// If true, make sure to generate a single file that can be written to stdout
WriteToStdout bool
OmitRuntimeForTests bool
OmitJSXRuntimeForTests bool
ASCIIOnly bool
KeepNames bool
IgnoreDCEAnnotations bool
TreeShaking bool
DropDebugger bool
MangleQuoted bool
Platform Platform
OutputFormat Format
NeedsMetafile bool
SourceMap SourceMap
ExcludeSourcesContent bool
}
type TSImportsNotUsedAsValues uint8
const (
TSImportsNotUsedAsValues_None TSImportsNotUsedAsValues = iota
TSImportsNotUsedAsValues_Remove
TSImportsNotUsedAsValues_Preserve
TSImportsNotUsedAsValues_Error
)
// These flags represent the following separate "tsconfig.json" settings:
//
// - importsNotUsedAsValues
// - preserveValueImports
// - verbatimModuleSyntax
//
// TypeScript prefers for people to use "verbatimModuleSyntax" and has
// deprecated the other two settings, but we must still support them.
// All settings are combined into these two behavioral flags for us.
type TSUnusedImportFlags uint8
// With !TSUnusedImport_KeepStmt && !TSUnusedImport_KeepValues:
//
// "import 'foo'" => "import 'foo'"
// "import * as unused from 'foo'" => ""
// "import { unused } from 'foo'" => ""
// "import { type unused } from 'foo'" => ""
//
// With TSUnusedImport_KeepStmt && !TSUnusedImport_KeepValues:
//
// "import 'foo'" => "import 'foo'"
// "import * as unused from 'foo'" => "import 'foo'"
// "import { unused } from 'foo'" => "import 'foo'"
// "import { type unused } from 'foo'" => "import 'foo'"
//
// With !TSUnusedImport_KeepStmt && TSUnusedImport_KeepValues:
//
// "import 'foo'" => "import 'foo'"
// "import * as unused from 'foo'" => "import * as unused from 'foo'"
// "import { unused } from 'foo'" => "import { unused } from 'foo'"
// "import { type unused } from 'foo'" => ""
//
// With TSUnusedImport_KeepStmt && TSUnusedImport_KeepValues:
//
// "import 'foo'" => "import 'foo'"
// "import * as unused from 'foo'" => "import * as unused from 'foo'"
// "import { unused } from 'foo'" => "import { unused } from 'foo'"
// "import { type unused } from 'foo'" => "import {} from 'foo'"
const (
TSUnusedImport_KeepStmt TSUnusedImportFlags = 1 << iota // "importsNotUsedAsValues" != "remove"
TSUnusedImport_KeepValues // "preserveValueImports" == true
)
type TSTarget uint8
const (
TSTargetUnspecified TSTarget = iota
TSTargetBelowES2022 // "useDefineForClassFields" defaults to false
TSTargetAtOrAboveES2022 // "useDefineForClassFields" defaults to true
)
type TSAlwaysStrict struct {
// This information is only used for error messages
Name string
Source logger.Source
Range logger.Range
// This information can affect code transformation
Value bool
}
type PathPlaceholder uint8
const (
NoPlaceholder PathPlaceholder = iota
// The relative path from the original parent directory to the configured
// "outbase" directory, or to the lowest common ancestor directory
DirPlaceholder
// The original name of the file, or the manual chunk name, or the name of
// the type of output file ("entry" or "chunk" or "asset")
NamePlaceholder
// A hash of the contents of this file, and the contents and output paths of
// all dependencies (except for their hash placeholders)
HashPlaceholder
// The original extension of the file, or the name of the output file
// (e.g. "css", "svg", "png")
ExtPlaceholder
)
type PathTemplate struct {
Data string
Placeholder PathPlaceholder
}
type PathPlaceholders struct {
Dir *string
Name *string
Hash *string
Ext *string
}
func (placeholders PathPlaceholders) Get(placeholder PathPlaceholder) *string {
switch placeholder {
case DirPlaceholder:
return placeholders.Dir
case NamePlaceholder:
return placeholders.Name
case HashPlaceholder:
return placeholders.Hash
case ExtPlaceholder:
return placeholders.Ext
}
return nil
}
func TemplateToString(template []PathTemplate) string {
if len(template) == 1 && template[0].Placeholder == NoPlaceholder {
// Avoid allocations in this case
return template[0].Data
}
sb := strings.Builder{}
for _, part := range template {
sb.WriteString(part.Data)
switch part.Placeholder {
case DirPlaceholder:
sb.WriteString("[dir]")
case NamePlaceholder:
sb.WriteString("[name]")
case HashPlaceholder:
sb.WriteString("[hash]")
case ExtPlaceholder:
sb.WriteString("[ext]")
}
}
return sb.String()
}
func HasPlaceholder(template []PathTemplate, placeholder PathPlaceholder) bool {
for _, part := range template {
if part.Placeholder == placeholder {
return true
}
}
return false
}
func SubstituteTemplate(template []PathTemplate, placeholders PathPlaceholders) []PathTemplate {
// Don't allocate if no substitution is possible and the template is already minimal
shouldSubstitute := false
for i, part := range template {
if placeholders.Get(part.Placeholder) != nil || (part.Placeholder == NoPlaceholder && i+1 < len(template)) {
shouldSubstitute = true
break
}
}
if !shouldSubstitute {
return template
}
// Otherwise, substitute and merge as appropriate
result := make([]PathTemplate, 0, len(template))
for _, part := range template {
if sub := placeholders.Get(part.Placeholder); sub != nil {
part.Data += *sub
part.Placeholder = NoPlaceholder
}
if last := len(result) - 1; last >= 0 && result[last].Placeholder == NoPlaceholder {
last := &result[last]
last.Data += part.Data
last.Placeholder = part.Placeholder
} else {
result = append(result, part)
}
}
return result
}
func ShouldCallRuntimeRequire(mode Mode, outputFormat Format) bool {
return mode == ModeBundle && outputFormat != FormatCommonJS
}
type InjectedDefine struct {
Data js_ast.E
Name string
Source logger.Source
}
type InjectedFile struct {
Exports []InjectableExport
DefineName string // For injected files generated when you "--define" a non-literal
Source logger.Source
IsCopyLoader bool // If you set the loader to "copy" (see https://github.com/evanw/esbuild/issues/3041)
}
type InjectableExport struct {
Alias string
Loc logger.Loc
}
var filterMutex sync.Mutex
var filterCache map[string]*regexp.Regexp
func compileFilter(filter string) (result *regexp.Regexp) {
if filter == "" {
// Must provide a filter
return nil
}
ok := false
// Cache hit?
(func() {
filterMutex.Lock()
defer filterMutex.Unlock()
if filterCache != nil {
result, ok = filterCache[filter]
}
})()
if ok {
return
}
// Cache miss
result, err := regexp.Compile(filter)
if err != nil {
return nil
}
// Cache for next time
filterMutex.Lock()
defer filterMutex.Unlock()
if filterCache == nil {
filterCache = make(map[string]*regexp.Regexp)
}
filterCache[filter] = result
return
}
func CompileFilterForPlugin(pluginName string, kind string, filter string) (*regexp.Regexp, error) {
if filter == "" {
return nil, fmt.Errorf("[%s] %q is missing a filter", pluginName, kind)
}
result := compileFilter(filter)
if result == nil {
return nil, fmt.Errorf("[%s] %q filter is not a valid Go regular expression: %q", pluginName, kind, filter)
}
return result, nil
}
func PluginAppliesToPath(path logger.Path, filter *regexp.Regexp, namespace string) bool {
return (namespace == "" || path.Namespace == namespace) && filter.MatchString(path.Text)
}
////////////////////////////////////////////////////////////////////////////////
// Plugin API
type Plugin struct {
Name string
OnStart []OnStart
OnResolve []OnResolve
OnLoad []OnLoad
}
type OnStart struct {
Callback func() OnStartResult
Name string
}
type OnStartResult struct {
ThrownError error
Msgs []logger.Msg
}
type OnResolve struct {
Filter *regexp.Regexp
Callback func(OnResolveArgs) OnResolveResult
Name string
Namespace string
}
type OnResolveArgs struct {
Path string
ResolveDir string
PluginData interface{}
Importer logger.Path
Kind ast.ImportKind
With logger.ImportAttributes
}
type OnResolveResult struct {
PluginName string
Msgs []logger.Msg
ThrownError error
AbsWatchFiles []string
AbsWatchDirs []string
PluginData interface{}
Path logger.Path
External bool
IsSideEffectFree bool
}
type OnLoad struct {
Filter *regexp.Regexp
Callback func(OnLoadArgs) OnLoadResult
Name string
Namespace string
}
type OnLoadArgs struct {
PluginData interface{}
Path logger.Path
}
type OnLoadResult struct {
PluginName string
Contents *string
AbsResolveDir string
PluginData interface{}
Msgs []logger.Msg
ThrownError error
AbsWatchFiles []string
AbsWatchDirs []string
Loader Loader
}
func PrettyPrintTargetEnvironment(originalTargetEnv string, unsupportedJSFeatureOverridesMask compat.JSFeature) (where string) {
where = "the configured target environment"
overrides := ""
if unsupportedJSFeatureOverridesMask != 0 {
count := 0
mask := unsupportedJSFeatureOverridesMask
for mask != 0 {
if (mask & 1) != 0 {
count++
}
mask >>= 1
}
s := "s"
if count == 1 {
s = ""
}
overrides = fmt.Sprintf(" + %d override%s", count, s)
}
if originalTargetEnv != "" {
where = fmt.Sprintf("%s (%s%s)", where, originalTargetEnv, overrides)
}
return
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/ast/ast.go | internal/ast/ast.go | package ast
// This file contains data structures that are used with the AST packages for
// both JavaScript and CSS. This helps the bundler treat both AST formats in
// a somewhat format-agnostic manner.
import (
"sort"
"strings"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/logger"
)
type ImportKind uint8
const (
// An entry point provided by the user
ImportEntryPoint ImportKind = iota
// An ES6 import or re-export statement
ImportStmt
// A call to "require()"
ImportRequire
// An "import()" expression with a string argument
ImportDynamic
// A call to "require.resolve()"
ImportRequireResolve
// A CSS "@import" rule
ImportAt
// A CSS "composes" declaration
ImportComposesFrom
// A CSS "url(...)" token
ImportURL
)
func (kind ImportKind) StringForMetafile() string {
switch kind {
case ImportStmt:
return "import-statement"
case ImportRequire:
return "require-call"
case ImportDynamic:
return "dynamic-import"
case ImportRequireResolve:
return "require-resolve"
case ImportAt:
return "import-rule"
case ImportComposesFrom:
return "composes-from"
case ImportURL:
return "url-token"
case ImportEntryPoint:
return "entry-point"
default:
panic("Internal error")
}
}
func (kind ImportKind) IsFromCSS() bool {
switch kind {
case ImportAt, ImportComposesFrom, ImportURL:
return true
}
return false
}
func (kind ImportKind) MustResolveToCSS() bool {
switch kind {
case ImportAt, ImportComposesFrom:
return true
}
return false
}
type ImportPhase uint8
const (
EvaluationPhase ImportPhase = iota
// See: https://github.com/tc39/proposal-defer-import-eval
DeferPhase
// See: https://github.com/tc39/proposal-source-phase-imports
SourcePhase
)
type ImportRecordFlags uint16
const (
// Sometimes the parser creates an import record and decides it isn't needed.
// For example, TypeScript code may have import statements that later turn
// out to be type-only imports after analyzing the whole file.
IsUnused ImportRecordFlags = 1 << iota
// If this is true, the import contains syntax like "* as ns". This is used
// to determine whether modules that have no exports need to be wrapped in a
// CommonJS wrapper or not.
ContainsImportStar
// If this is true, the import contains an import for the alias "default",
// either via the "import x from" or "import {default as x} from" syntax.
ContainsDefaultAlias
// If this is true, the import contains an import for the alias "__esModule",
// via the "import {__esModule} from" syntax.
ContainsESModuleAlias
// If true, this "export * from 'path'" statement is evaluated at run-time by
// calling the "__reExport()" helper function
CallsRunTimeReExportFn
// Tell the printer to wrap this call to "require()" in "__toESM(...)"
WrapWithToESM
// Tell the printer to wrap this ESM exports object in "__toCJS(...)"
WrapWithToCJS
// Tell the printer to use the runtime "__require()" instead of "require()"
CallRuntimeRequire
// True for the following cases:
//
// try { require('x') } catch { handle }
// try { await import('x') } catch { handle }
// try { require.resolve('x') } catch { handle }
// import('x').catch(handle)
// import('x').then(_, handle)
//
// In these cases we shouldn't generate an error if the path could not be
// resolved.
HandlesImportErrors
// If true, this was originally written as a bare "import 'file'" statement
WasOriginallyBareImport
// If true, this import can be removed if it's unused
IsExternalWithoutSideEffects
// If true, "assert { type: 'json' }" was present
AssertTypeJSON
// If true, do not generate "external": true in the metafile
ShouldNotBeExternalInMetafile
// CSS "@import" of an empty file should be removed
WasLoadedWithEmptyLoader
// Unique keys are randomly-generated strings that are used to replace paths
// in the source code after it's printed. These must not ever be split apart.
ContainsUniqueKey
)
func (flags ImportRecordFlags) Has(flag ImportRecordFlags) bool {
return (flags & flag) != 0
}
type ImportRecord struct {
AssertOrWith *ImportAssertOrWith
GlobPattern *GlobPattern
Path logger.Path
Range logger.Range
// If the "HandlesImportErrors" flag is present, then this is the location
// of the error handler. This is used for error reporting.
ErrorHandlerLoc logger.Loc
// The resolved source index for an internal import (within the bundle) or
// invalid for an external import (not included in the bundle)
SourceIndex Index32
// Files imported via the "copy" loader use this instead of "SourceIndex"
// because they are sort of like external imports, and are not bundled.
CopySourceIndex Index32
Flags ImportRecordFlags
Phase ImportPhase
Kind ImportKind
}
type AssertOrWithKeyword uint8
const (
AssertKeyword AssertOrWithKeyword = iota
WithKeyword
)
func (kw AssertOrWithKeyword) String() string {
if kw == AssertKeyword {
return "assert"
}
return "with"
}
type ImportAssertOrWith struct {
Entries []AssertOrWithEntry
KeywordLoc logger.Loc
InnerOpenBraceLoc logger.Loc
InnerCloseBraceLoc logger.Loc
OuterOpenBraceLoc logger.Loc
OuterCloseBraceLoc logger.Loc
Keyword AssertOrWithKeyword
}
type AssertOrWithEntry struct {
Key []uint16 // An identifier or a string
Value []uint16 // Always a string
KeyLoc logger.Loc
ValueLoc logger.Loc
PreferQuotedKey bool
}
func FindAssertOrWithEntry(assertions []AssertOrWithEntry, name string) *AssertOrWithEntry {
for _, assertion := range assertions {
if helpers.UTF16EqualsString(assertion.Key, name) {
return &assertion
}
}
return nil
}
type GlobPattern struct {
Parts []helpers.GlobPart
ExportAlias string
Kind ImportKind
}
// This stores a 32-bit index where the zero value is an invalid index. This is
// a better alternative to storing the index as a pointer since that has the
// same properties but takes up more space and costs an extra pointer traversal.
type Index32 struct {
flippedBits uint32
}
func MakeIndex32(index uint32) Index32 {
return Index32{flippedBits: ^index}
}
func (i Index32) IsValid() bool {
return i.flippedBits != 0
}
func (i Index32) GetIndex() uint32 {
return ^i.flippedBits
}
type SymbolKind uint8
const (
// An unbound symbol is one that isn't declared in the file it's referenced
// in. For example, using "window" without declaring it will be unbound.
SymbolUnbound SymbolKind = iota
// This has special merging behavior. You're allowed to re-declare these
// symbols more than once in the same scope. These symbols are also hoisted
// out of the scope they are declared in to the closest containing function
// or module scope. These are the symbols with this kind:
//
// - Function arguments
// - Function statements
// - Variables declared using "var"
//
SymbolHoisted
SymbolHoistedFunction
// There's a weird special case where catch variables declared using a simple
// identifier (i.e. not a binding pattern) block hoisted variables instead of
// becoming an error:
//
// var e = 0;
// try { throw 1 } catch (e) {
// print(e) // 1
// var e = 2
// print(e) // 2
// }
// print(e) // 0 (since the hoisting stops at the catch block boundary)
//
// However, other forms are still a syntax error:
//
// try {} catch (e) { let e }
// try {} catch ({e}) { var e }
//
// This symbol is for handling this weird special case.
SymbolCatchIdentifier
// Generator and async functions are not hoisted, but still have special
// properties such as being able to overwrite previous functions with the
// same name
SymbolGeneratorOrAsyncFunction
// This is the special "arguments" variable inside functions
SymbolArguments
// Classes can merge with TypeScript namespaces.
SymbolClass
// Class names are not allowed to be referenced by computed property keys
SymbolClassInComputedPropertyKey
// A class-private identifier (i.e. "#foo").
SymbolPrivateField
SymbolPrivateMethod
SymbolPrivateGet
SymbolPrivateSet
SymbolPrivateGetSetPair
SymbolPrivateStaticField
SymbolPrivateStaticMethod
SymbolPrivateStaticGet
SymbolPrivateStaticSet
SymbolPrivateStaticGetSetPair
// Labels are in their own namespace
SymbolLabel
// TypeScript enums can merge with TypeScript namespaces and other TypeScript
// enums.
SymbolTSEnum
// TypeScript namespaces can merge with classes, functions, TypeScript enums,
// and other TypeScript namespaces.
SymbolTSNamespace
// In TypeScript, imports are allowed to silently collide with symbols within
// the module. Presumably this is because the imports may be type-only.
SymbolImport
// Assigning to a "const" symbol will throw a TypeError at runtime
SymbolConst
// Injected symbols can be overridden by provided defines
SymbolInjected
// Properties can optionally be renamed to shorter names
SymbolMangledProp
// CSS identifiers that are never renamed
SymbolGlobalCSS
// CSS identifiers that are renamed to be unique to the file they are in
SymbolLocalCSS
// This annotates all other symbols that don't have special behavior
SymbolOther
)
func (kind SymbolKind) IsPrivate() bool {
return kind >= SymbolPrivateField && kind <= SymbolPrivateStaticGetSetPair
}
func (kind SymbolKind) IsHoisted() bool {
return kind == SymbolHoisted || kind == SymbolHoistedFunction
}
func (kind SymbolKind) IsHoistedOrFunction() bool {
return kind.IsHoisted() || kind == SymbolGeneratorOrAsyncFunction
}
func (kind SymbolKind) IsFunction() bool {
return kind == SymbolHoistedFunction || kind == SymbolGeneratorOrAsyncFunction
}
func (kind SymbolKind) IsUnboundOrInjected() bool {
return kind == SymbolUnbound || kind == SymbolInjected
}
var InvalidRef Ref = Ref{^uint32(0), ^uint32(0)}
// Files are parsed in parallel for speed. We want to allow each parser to
// generate symbol IDs that won't conflict with each other. We also want to be
// able to quickly merge symbol tables from all files into one giant symbol
// table.
//
// We can accomplish both goals by giving each symbol ID two parts: a source
// index that is unique to the parser goroutine, and an inner index that
// increments as the parser generates new symbol IDs. Then a symbol map can
// be an array of arrays indexed first by source index, then by inner index.
// The maps can be merged quickly by creating a single outer array containing
// all inner arrays from all parsed files.
type Ref struct {
SourceIndex uint32
InnerIndex uint32
}
type LocRef struct {
Loc logger.Loc
Ref Ref
}
type ImportItemStatus uint8
const (
ImportItemNone ImportItemStatus = iota
// The linker doesn't report import/export mismatch errors
ImportItemGenerated
// The printer will replace this import with "undefined"
ImportItemMissing
)
type SymbolFlags uint16
const (
// Certain symbols must not be renamed or minified. For example, the
// "arguments" variable is declared by the runtime for every function.
// Renaming can also break any identifier used inside a "with" statement.
MustNotBeRenamed SymbolFlags = 1 << iota
// In React's version of JSX, lower-case names are strings while upper-case
// names are identifiers. If we are preserving JSX syntax (i.e. not
// transforming it), then we need to be careful to name the identifiers
// something with a capital letter so further JSX processing doesn't treat
// them as strings instead.
MustStartWithCapitalLetterForJSX
// If true, this symbol is the target of a "__name" helper function call.
// This call is special because it deliberately doesn't count as a use
// of the symbol (otherwise keeping names would disable tree shaking)
// so "UseCountEstimate" is not incremented. This flag helps us know to
// avoid optimizing this symbol when "UseCountEstimate" is 1 in this case.
DidKeepName
// Sometimes we lower private symbols even if they are supported. For example,
// consider the following TypeScript code:
//
// class Foo {
// #foo = 123
// bar = this.#foo
// }
//
// If "useDefineForClassFields: false" is set in "tsconfig.json", then "bar"
// must use assignment semantics instead of define semantics. We can compile
// that to this code:
//
// class Foo {
// constructor() {
// this.#foo = 123;
// this.bar = this.#foo;
// }
// #foo;
// }
//
// However, we can't do the same for static fields:
//
// class Foo {
// static #foo = 123
// static bar = this.#foo
// }
//
// Compiling these static fields to something like this would be invalid:
//
// class Foo {
// static #foo;
// }
// Foo.#foo = 123;
// Foo.bar = Foo.#foo;
//
// Thus "#foo" must be lowered even though it's supported. Another case is
// when we're converting top-level class declarations to class expressions
// to avoid the TDZ and the class shadowing symbol is referenced within the
// class body:
//
// class Foo {
// static #foo = Foo
// }
//
// This cannot be converted into something like this:
//
// var Foo = class {
// static #foo;
// };
// Foo.#foo = Foo;
//
PrivateSymbolMustBeLowered
// This is used to remove the all but the last function re-declaration if a
// function is re-declared multiple times like this:
//
// function foo() { console.log(1) }
// function foo() { console.log(2) }
//
RemoveOverwrittenFunctionDeclaration
// This flag is to avoid warning about this symbol more than once. It only
// applies to the "module" and "exports" unbound symbols.
DidWarnAboutCommonJSInESM
// If this is present, the symbol could potentially be overwritten. This means
// it's not safe to make assumptions about this symbol from the initializer.
CouldPotentiallyBeMutated
// This flags all symbols that were exported from the module using the ES6
// "export" keyword, either directly on the declaration or using "export {}".
WasExported
// This means the symbol is a normal function that has no body statements.
IsEmptyFunction
// This means the symbol is a normal function that takes a single argument
// and returns that argument.
IsIdentityFunction
// If true, calls to this symbol can be unwrapped (i.e. removed except for
// argument side effects) if the result is unused.
CallCanBeUnwrappedIfUnused
)
func (flags SymbolFlags) Has(flag SymbolFlags) bool {
return (flags & flag) != 0
}
// Note: the order of values in this struct matters to reduce struct size.
type Symbol struct {
// This is used for symbols that represent items in the import clause of an
// ES6 import statement. These should always be referenced by EImportIdentifier
// instead of an EIdentifier. When this is present, the expression should
// be printed as a property access off the namespace instead of as a bare
// identifier.
//
// For correctness, this must be stored on the symbol instead of indirectly
// associated with the Ref for the symbol somehow. In ES6 "flat bundling"
// mode, re-exported symbols are collapsed using MergeSymbols() and renamed
// symbols from other files that end up at this symbol must be able to tell
// if it has a namespace alias.
NamespaceAlias *NamespaceAlias
// This is the name that came from the parser. Printed names may be renamed
// during minification or to avoid name collisions. Do not use the original
// name during printing.
OriginalName string
// Used by the parser for single pass parsing. Symbols that have been merged
// form a linked-list where the last link is the symbol to use. This link is
// an invalid ref if it's the last link. If this isn't invalid, you need to
// FollowSymbols to get the real one.
Link Ref
// An estimate of the number of uses of this symbol. This is used to detect
// whether a symbol is used or not. For example, TypeScript imports that are
// unused must be removed because they are probably type-only imports. This
// is an estimate and may not be completely accurate due to oversights in the
// code. But it should always be non-zero when the symbol is used.
UseCountEstimate uint32
// This is for generating cross-chunk imports and exports for code splitting.
ChunkIndex Index32
// This is used for minification. Symbols that are declared in sibling scopes
// can share a name. A good heuristic (from Google Closure Compiler) is to
// assign names to symbols from sibling scopes in declaration order. That way
// local variable names are reused in each global function like this, which
// improves gzip compression:
//
// function x(a, b) { ... }
// function y(a, b, c) { ... }
//
// The parser fills this in for symbols inside nested scopes. There are three
// slot namespaces: regular symbols, label symbols, and private symbols.
NestedScopeSlot Index32
// Boolean values should all be flags instead to save space
Flags SymbolFlags
Kind SymbolKind
// We automatically generate import items for property accesses off of
// namespace imports. This lets us remove the expensive namespace imports
// while bundling in many cases, replacing them with a cheap import item
// instead:
//
// import * as ns from 'path'
// ns.foo()
//
// That can often be replaced by this, which avoids needing the namespace:
//
// import {foo} from 'path'
// foo()
//
// However, if the import is actually missing then we don't want to report a
// compile-time error like we do for real import items. This status lets us
// avoid this. We also need to be able to replace such import items with
// undefined, which this status is also used for.
ImportItemStatus ImportItemStatus
}
// You should call "MergeSymbols" instead of calling this directly
func (newSymbol *Symbol) MergeContentsWith(oldSymbol *Symbol) {
newSymbol.UseCountEstimate += oldSymbol.UseCountEstimate
if oldSymbol.Flags.Has(MustNotBeRenamed) && !newSymbol.Flags.Has(MustNotBeRenamed) {
newSymbol.OriginalName = oldSymbol.OriginalName
newSymbol.Flags |= MustNotBeRenamed
}
if oldSymbol.Flags.Has(MustStartWithCapitalLetterForJSX) {
newSymbol.Flags |= MustStartWithCapitalLetterForJSX
}
}
type SlotNamespace uint8
const (
SlotDefault SlotNamespace = iota
SlotLabel
SlotPrivateName
SlotMangledProp
SlotMustNotBeRenamed
)
func (s *Symbol) SlotNamespace() SlotNamespace {
if s.Kind == SymbolUnbound || s.Flags.Has(MustNotBeRenamed) {
return SlotMustNotBeRenamed
}
if s.Kind.IsPrivate() {
return SlotPrivateName
}
if s.Kind == SymbolLabel {
return SlotLabel
}
if s.Kind == SymbolMangledProp {
return SlotMangledProp
}
return SlotDefault
}
type SlotCounts [4]uint32
func (a *SlotCounts) UnionMax(b SlotCounts) {
for i := range *a {
ai := &(*a)[i]
bi := b[i]
if *ai < bi {
*ai = bi
}
}
}
type NamespaceAlias struct {
Alias string
NamespaceRef Ref
}
type SymbolMap struct {
// This could be represented as a "map[Ref]Symbol" but a two-level array was
// more efficient in profiles. This appears to be because it doesn't involve
// a hash. This representation also makes it trivial to quickly merge symbol
// maps from multiple files together. Each file only generates symbols in a
// single inner array, so you can join the maps together by just make a
// single outer array containing all of the inner arrays. See the comment on
// "Ref" for more detail.
SymbolsForSource [][]Symbol
}
func NewSymbolMap(sourceCount int) SymbolMap {
return SymbolMap{make([][]Symbol, sourceCount)}
}
func (sm SymbolMap) Get(ref Ref) *Symbol {
return &sm.SymbolsForSource[ref.SourceIndex][ref.InnerIndex]
}
// Returns the canonical ref that represents the ref for the provided symbol.
// This may not be the provided ref if the symbol has been merged with another
// symbol.
func FollowSymbols(symbols SymbolMap, ref Ref) Ref {
symbol := symbols.Get(ref)
if symbol.Link == InvalidRef {
return ref
}
link := FollowSymbols(symbols, symbol.Link)
// Only write if needed to avoid concurrent map update hazards
if symbol.Link != link {
symbol.Link = link
}
return link
}
// Use this before calling "FollowSymbols" from separate threads to avoid
// concurrent map update hazards. In Go, mutating a map is not threadsafe
// but reading from a map is. Calling "FollowAllSymbols" first ensures that
// all mutation is done up front.
func FollowAllSymbols(symbols SymbolMap) {
for sourceIndex, inner := range symbols.SymbolsForSource {
for symbolIndex := range inner {
FollowSymbols(symbols, Ref{uint32(sourceIndex), uint32(symbolIndex)})
}
}
}
// Makes "old" point to "new" by joining the linked lists for the two symbols
// together. That way "FollowSymbols" on both "old" and "new" will result in
// the same ref.
func MergeSymbols(symbols SymbolMap, old Ref, new Ref) Ref {
if old == new {
return new
}
oldSymbol := symbols.Get(old)
if oldSymbol.Link != InvalidRef {
oldSymbol.Link = MergeSymbols(symbols, oldSymbol.Link, new)
return oldSymbol.Link
}
newSymbol := symbols.Get(new)
if newSymbol.Link != InvalidRef {
newSymbol.Link = MergeSymbols(symbols, old, newSymbol.Link)
return newSymbol.Link
}
oldSymbol.Link = new
newSymbol.MergeContentsWith(oldSymbol)
return new
}
// This is a histogram of character frequencies for minification
type CharFreq [64]int32
func (freq *CharFreq) Scan(text string, delta int32) {
if delta == 0 {
return
}
// This matches the order in "DefaultNameMinifier"
for i, n := 0, len(text); i < n; i++ {
c := text[i]
switch {
case c >= 'a' && c <= 'z':
(*freq)[c-'a'] += delta
case c >= 'A' && c <= 'Z':
(*freq)[c-('A'-26)] += delta
case c >= '0' && c <= '9':
(*freq)[c+(52-'0')] += delta
case c == '_':
(*freq)[62] += delta
case c == '$':
(*freq)[63] += delta
}
}
}
func (freq *CharFreq) Include(other *CharFreq) {
for i := 0; i < 64; i++ {
(*freq)[i] += (*other)[i]
}
}
type NameMinifier struct {
head string
tail string
}
var DefaultNameMinifierJS = NameMinifier{
head: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_$",
tail: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$",
}
var DefaultNameMinifierCSS = NameMinifier{
head: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_",
tail: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_",
}
type charAndCount struct {
char string
count int32
index byte
}
// This type is just so we can use Go's native sort function
type charAndCountArray []charAndCount
func (a charAndCountArray) Len() int { return len(a) }
func (a charAndCountArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
func (a charAndCountArray) Less(i int, j int) bool {
ai := a[i]
aj := a[j]
return ai.count > aj.count || (ai.count == aj.count && ai.index < aj.index)
}
func (source NameMinifier) ShuffleByCharFreq(freq CharFreq) NameMinifier {
// Sort the histogram in descending order by count
array := make(charAndCountArray, 64)
for i := 0; i < len(source.tail); i++ {
array[i] = charAndCount{
char: source.tail[i : i+1],
index: byte(i),
count: freq[i],
}
}
sort.Sort(array)
// Compute the identifier start and identifier continue sequences
minifier := NameMinifier{}
for _, item := range array {
if item.char < "0" || item.char > "9" {
minifier.head += item.char
}
minifier.tail += item.char
}
return minifier
}
func (minifier NameMinifier) NumberToMinifiedName(i int) string {
n_head := len(minifier.head)
n_tail := len(minifier.tail)
j := i % n_head
var name strings.Builder
name.WriteString(minifier.head[j : j+1])
i = i / n_head
for i > 0 {
i--
j := i % n_tail
name.WriteString(minifier.tail[j : j+1])
i = i / n_tail
}
return name.String()
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/bundler/bundler.go | internal/bundler/bundler.go | package bundler
// The bundler is the core of the "build" and "transform" API calls. Each
// operation has two phases. The first phase scans the module graph, and is
// represented by the "ScanBundle" function. The second phase generates the
// output files from the module graph, and is implemented by the "Compile"
// function.
import (
"bytes"
"encoding/base32"
"encoding/base64"
"fmt"
"math/rand"
"net/http"
"net/url"
"sort"
"strings"
"sync"
"syscall"
"time"
"unicode"
"unicode/utf8"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/cache"
"github.com/evanw/esbuild/internal/compat"
"github.com/evanw/esbuild/internal/config"
"github.com/evanw/esbuild/internal/css_parser"
"github.com/evanw/esbuild/internal/fs"
"github.com/evanw/esbuild/internal/graph"
"github.com/evanw/esbuild/internal/helpers"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_lexer"
"github.com/evanw/esbuild/internal/js_parser"
"github.com/evanw/esbuild/internal/logger"
"github.com/evanw/esbuild/internal/resolver"
"github.com/evanw/esbuild/internal/runtime"
"github.com/evanw/esbuild/internal/sourcemap"
"github.com/evanw/esbuild/internal/xxhash"
)
type scannerFile struct {
// If "AbsMetadataFile" is present, this will be filled out with information
// about this file in JSON format. This is a partial JSON file that will be
// fully assembled later.
jsonMetadataChunk string
pluginData interface{}
inputFile graph.InputFile
}
// This is data related to source maps. It's computed in parallel with linking
// and must be ready by the time printing happens. This is beneficial because
// it is somewhat expensive to produce.
type DataForSourceMap struct {
// This data is for the printer. It maps from byte offsets in the file (which
// are stored at every AST node) to UTF-16 column offsets (required by source
// maps).
LineOffsetTables []sourcemap.LineOffsetTable
// This contains the quoted contents of the original source file. It's what
// needs to be embedded in the "sourcesContent" array in the final source
// map. Quoting is precomputed because it's somewhat expensive.
QuotedContents [][]byte
}
type Bundle struct {
// The unique key prefix is a random string that is unique to every bundling
// operation. It is used as a prefix for the unique keys assigned to every
// chunk during linking. These unique keys are used to identify each chunk
// before the final output paths have been computed.
uniqueKeyPrefix string
fs fs.FS
res *resolver.Resolver
files []scannerFile
entryPoints []graph.EntryPoint
options config.Options
}
type parseArgs struct {
fs fs.FS
log logger.Log
res *resolver.Resolver
caches *cache.CacheSet
prettyPaths logger.PrettyPaths
importSource *logger.Source
importWith *ast.ImportAssertOrWith
sideEffects graph.SideEffects
pluginData interface{}
results chan parseResult
inject chan config.InjectedFile
uniqueKeyPrefix string
keyPath logger.Path
options config.Options
importPathRange logger.Range
sourceIndex uint32
skipResolve bool
}
type parseResult struct {
resolveResults []*resolver.ResolveResult
globResolveResults map[uint32]globResolveResult
file scannerFile
tlaCheck tlaCheck
ok bool
}
type globResolveResult struct {
resolveResults map[string]resolver.ResolveResult
absPath string
prettyPaths logger.PrettyPaths
exportAlias string
}
type tlaCheck struct {
parent ast.Index32
depth uint32
importRecordIndex uint32
}
func parseFile(args parseArgs) {
pathForIdentifierName := args.keyPath.Text
// Identifier name generation may use the name of the parent folder if the
// file name starts with "index". However, this is problematic when the
// parent folder includes the parent directory of what the developer
// considers to be the root of the source tree. If that happens, strip the
// parent folder to avoid including it in the generated name.
if relative, ok := args.fs.Rel(args.options.AbsOutputBase, pathForIdentifierName); ok {
for {
next := strings.TrimPrefix(strings.TrimPrefix(relative, "../"), "..\\")
if relative == next {
break
}
relative = next
}
pathForIdentifierName = relative
}
source := logger.Source{
Index: args.sourceIndex,
KeyPath: args.keyPath,
PrettyPaths: args.prettyPaths,
IdentifierName: js_ast.GenerateNonUniqueNameFromPath(pathForIdentifierName),
}
var loader config.Loader
var absResolveDir string
var pluginName string
var pluginData interface{}
if stdin := args.options.Stdin; stdin != nil {
// Special-case stdin
source.Contents = stdin.Contents
loader = stdin.Loader
if loader == config.LoaderNone {
loader = config.LoaderJS
}
absResolveDir = args.options.Stdin.AbsResolveDir
} else {
result, ok := runOnLoadPlugins(
args.options.Plugins,
args.fs,
&args.caches.FSCache,
args.log,
&source,
args.importSource,
args.importPathRange,
args.pluginData,
args.options.WatchMode,
args.options.LogPathStyle,
)
if !ok {
if args.inject != nil {
args.inject <- config.InjectedFile{
Source: source,
}
}
args.results <- parseResult{}
return
}
loader = result.loader
absResolveDir = result.absResolveDir
pluginName = result.pluginName
pluginData = result.pluginData
}
_, base, ext := logger.PlatformIndependentPathDirBaseExt(source.KeyPath.Text)
// The special "default" loader determines the loader from the file path
if loader == config.LoaderDefault {
loader = config.LoaderFromFileExtension(args.options.ExtensionToLoader, base+ext)
}
// Reject unsupported import attributes when the loader isn't "copy" (since
// "copy" is kind of like "external"). But only do this if this file was not
// loaded by a plugin. Plugins are allowed to assign whatever semantics they
// want to import attributes.
if loader != config.LoaderCopy && pluginName == "" {
for _, attr := range source.KeyPath.ImportAttributes.DecodeIntoArray() {
var errorText string
var errorRange js_lexer.KeyOrValue
// We only currently handle "type: json" and "type: bytes"
if attr.Key != "type" {
errorText = fmt.Sprintf("Importing with the %q attribute is not supported", attr.Key)
errorRange = js_lexer.KeyRange
} else if attr.Value == "json" {
loader = config.LoaderWithTypeJSON
continue
} else if attr.Value == "bytes" {
loader = config.LoaderBinary
continue
} else {
errorText = fmt.Sprintf("Importing with a type attribute of %q is not supported", attr.Value)
errorRange = js_lexer.ValueRange
}
// Everything else is an error
r := args.importPathRange
if args.importWith != nil {
r = js_lexer.RangeOfImportAssertOrWith(*args.importSource, *ast.FindAssertOrWithEntry(args.importWith.Entries, attr.Key), errorRange)
}
tracker := logger.MakeLineColumnTracker(args.importSource)
args.log.AddError(&tracker, r, errorText)
if args.inject != nil {
args.inject <- config.InjectedFile{
Source: source,
}
}
args.results <- parseResult{}
return
}
}
if loader == config.LoaderEmpty {
source.Contents = ""
}
result := parseResult{
file: scannerFile{
inputFile: graph.InputFile{
Source: source,
Loader: loader,
SideEffects: args.sideEffects,
},
pluginData: pluginData,
},
}
defer func() {
r := recover()
if r != nil {
args.log.AddErrorWithNotes(nil, logger.Range{},
fmt.Sprintf("panic: %v (while parsing %q)", r, source.PrettyPaths.Select(args.options.LogPathStyle)),
[]logger.MsgData{{Text: helpers.PrettyPrintedStack()}})
args.results <- result
}
}()
switch loader {
case config.LoaderJS, config.LoaderEmpty:
ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options))
if len(ast.Parts) <= 1 { // Ignore the implicitly-generated namespace export part
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_EmptyAST
}
result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
result.ok = ok
case config.LoaderJSX:
args.options.JSX.Parse = true
ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options))
if len(ast.Parts) <= 1 { // Ignore the implicitly-generated namespace export part
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_EmptyAST
}
result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
result.ok = ok
case config.LoaderTS, config.LoaderTSNoAmbiguousLessThan:
args.options.TS.Parse = true
args.options.TS.NoAmbiguousLessThan = loader == config.LoaderTSNoAmbiguousLessThan
ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options))
if len(ast.Parts) <= 1 { // Ignore the implicitly-generated namespace export part
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_EmptyAST
}
result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
result.ok = ok
case config.LoaderTSX:
args.options.TS.Parse = true
args.options.JSX.Parse = true
ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options))
if len(ast.Parts) <= 1 { // Ignore the implicitly-generated namespace export part
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_EmptyAST
}
result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
result.ok = ok
case config.LoaderCSS, config.LoaderGlobalCSS, config.LoaderLocalCSS:
ast := args.caches.CSSCache.Parse(args.log, source, css_parser.OptionsFromConfig(loader, &args.options))
result.file.inputFile.Repr = &graph.CSSRepr{AST: ast}
result.ok = true
case config.LoaderJSON, config.LoaderWithTypeJSON:
expr, ok := args.caches.JSONCache.Parse(args.log, source, js_parser.JSONOptions{
UnsupportedJSFeatures: args.options.UnsupportedJSFeatures,
})
ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, nil)
if loader == config.LoaderWithTypeJSON {
// The exports kind defaults to "none", in which case the linker picks
// either ESM or CommonJS depending on the situation. Dynamic imports
// causes the linker to pick CommonJS which uses "require()" and then
// converts the return value to ESM, which adds extra properties that
// aren't supposed to be there when "{ with: { type: 'json' } }" is
// present. So if there's an import attribute, we force the type to
// be ESM to avoid this.
ast.ExportsKind = js_ast.ExportsESM
}
if pluginName != "" {
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin
} else {
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData
}
result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
result.ok = ok
case config.LoaderText:
source.Contents = strings.TrimPrefix(source.Contents, "\xEF\xBB\xBF") // Strip any UTF-8 BOM from the text
encoded := base64.StdEncoding.EncodeToString([]byte(source.Contents))
expr := js_ast.Expr{Data: &js_ast.EString{Value: helpers.StringToUTF16(source.Contents)}}
ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, nil)
ast.URLForCSS = "data:text/plain;base64," + encoded
if pluginName != "" {
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin
} else {
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData
}
result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
result.ok = true
case config.LoaderBase64:
mimeType := guessMimeType(ext, source.Contents)
encoded := base64.StdEncoding.EncodeToString([]byte(source.Contents))
expr := js_ast.Expr{Data: &js_ast.EString{Value: helpers.StringToUTF16(encoded)}}
ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, nil)
ast.URLForCSS = "data:" + mimeType + ";base64," + encoded
if pluginName != "" {
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin
} else {
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData
}
result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
result.ok = true
case config.LoaderBinary:
encoded := base64.StdEncoding.EncodeToString([]byte(source.Contents))
expr := js_ast.Expr{Data: &js_ast.EString{Value: helpers.StringToUTF16(encoded)}}
var helper *js_parser.HelperCall
if args.options.UnsupportedJSFeatures.Has(compat.FromBase64) {
if args.options.Platform == config.PlatformNode {
helper = &js_parser.HelperCall{Runtime: "__toBinaryNode"}
} else {
helper = &js_parser.HelperCall{Runtime: "__toBinary"}
}
} else {
helper = &js_parser.HelperCall{Global: []string{"Uint8Array", "fromBase64"}}
}
ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, helper)
ast.URLForCSS = "data:application/octet-stream;base64," + encoded
if pluginName != "" {
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin
} else {
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData
}
result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
result.ok = true
case config.LoaderDataURL:
mimeType := guessMimeType(ext, source.Contents)
url := helpers.EncodeStringAsShortestDataURL(mimeType, source.Contents)
expr := js_ast.Expr{Data: &js_ast.EString{Value: helpers.StringToUTF16(url)}}
ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, nil)
ast.URLForCSS = url
if pluginName != "" {
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin
} else {
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData
}
result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
result.ok = true
case config.LoaderFile:
uniqueKey := fmt.Sprintf("%sA%08d", args.uniqueKeyPrefix, args.sourceIndex)
uniqueKeyPath := uniqueKey + source.KeyPath.IgnoredSuffix
expr := js_ast.Expr{Data: &js_ast.EString{
Value: helpers.StringToUTF16(uniqueKeyPath),
ContainsUniqueKey: true,
}}
ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, nil)
ast.URLForCSS = uniqueKeyPath
if pluginName != "" {
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin
} else {
result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData
}
result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
result.ok = true
// Mark that this file is from the "file" loader
result.file.inputFile.UniqueKeyForAdditionalFile = uniqueKey
case config.LoaderCopy:
uniqueKey := fmt.Sprintf("%sA%08d", args.uniqueKeyPrefix, args.sourceIndex)
uniqueKeyPath := uniqueKey + source.KeyPath.IgnoredSuffix
result.file.inputFile.Repr = &graph.CopyRepr{
URLForCode: uniqueKeyPath,
}
result.ok = true
// Mark that this file is from the "copy" loader
result.file.inputFile.UniqueKeyForAdditionalFile = uniqueKey
default:
var message string
if source.KeyPath.Namespace == "file" && ext != "" {
message = fmt.Sprintf("No loader is configured for %q files: %s", ext, source.PrettyPaths.Select(args.options.LogPathStyle))
} else {
message = fmt.Sprintf("Do not know how to load path: %s", source.PrettyPaths.Select(args.options.LogPathStyle))
}
tracker := logger.MakeLineColumnTracker(args.importSource)
args.log.AddError(&tracker, args.importPathRange, message)
}
// Only continue now if parsing was successful
if result.ok {
// Run the resolver on the parse thread so it's not run on the main thread.
// That way the main thread isn't blocked if the resolver takes a while.
if recordsPtr := result.file.inputFile.Repr.ImportRecords(); args.options.Mode == config.ModeBundle && !args.skipResolve && recordsPtr != nil {
// Clone the import records because they will be mutated later
records := append([]ast.ImportRecord{}, *recordsPtr...)
*recordsPtr = records
result.resolveResults = make([]*resolver.ResolveResult, len(records))
if len(records) > 0 {
type cacheEntry struct {
resolveResult *resolver.ResolveResult
debug resolver.DebugMeta
didLogError bool
}
type cacheKey struct {
kind ast.ImportKind
path string
attrs logger.ImportAttributes
}
resolverCache := make(map[cacheKey]cacheEntry)
tracker := logger.MakeLineColumnTracker(&source)
for importRecordIndex := range records {
// Don't try to resolve imports that are already resolved
record := &records[importRecordIndex]
if record.SourceIndex.IsValid() {
continue
}
// Encode the import attributes
var attrs logger.ImportAttributes
if record.AssertOrWith != nil && record.AssertOrWith.Keyword == ast.WithKeyword {
data := make(map[string]string, len(record.AssertOrWith.Entries))
for _, entry := range record.AssertOrWith.Entries {
data[helpers.UTF16ToString(entry.Key)] = helpers.UTF16ToString(entry.Value)
}
attrs = logger.EncodeImportAttributes(data)
}
// Special-case glob pattern imports
if record.GlobPattern != nil {
prettyPath := helpers.GlobPatternToString(record.GlobPattern.Parts)
phase := ""
switch record.Phase {
case ast.DeferPhase:
phase = ".defer"
case ast.SourcePhase:
phase = ".source"
}
switch record.GlobPattern.Kind {
case ast.ImportRequire:
prettyPath = fmt.Sprintf("require%s(%q)", phase, prettyPath)
case ast.ImportDynamic:
prettyPath = fmt.Sprintf("import%s(%q)", phase, prettyPath)
}
if results, msg := args.res.ResolveGlob(absResolveDir, record.GlobPattern.Parts, record.GlobPattern.Kind, prettyPath); results != nil {
if msg != nil {
args.log.AddID(msg.ID, msg.Kind, &tracker, record.Range, msg.Data.Text)
}
if result.globResolveResults == nil {
result.globResolveResults = make(map[uint32]globResolveResult)
}
allAreExternal := true
for key, result := range results {
if !result.PathPair.IsExternal {
allAreExternal = false
}
result.PathPair.Primary.ImportAttributes = attrs
if result.PathPair.HasSecondary() {
result.PathPair.Secondary.ImportAttributes = attrs
}
results[key] = result
}
result.globResolveResults[uint32(importRecordIndex)] = globResolveResult{
resolveResults: results,
absPath: args.fs.Join(absResolveDir, "(glob)"),
prettyPaths: logger.PrettyPaths{
Abs: fmt.Sprintf("%s in %s", prettyPath, result.file.inputFile.Source.PrettyPaths.Abs),
Rel: fmt.Sprintf("%s in %s", prettyPath, result.file.inputFile.Source.PrettyPaths.Rel),
},
exportAlias: record.GlobPattern.ExportAlias,
}
// Forbid bundling of imports with explicit phases
if record.Phase != ast.EvaluationPhase {
reportExplicitPhaseImport(args.log, &tracker, record.Range,
record.Phase, allAreExternal, args.options.OutputFormat)
}
} else {
args.log.AddError(&tracker, record.Range, fmt.Sprintf("Could not resolve %s", prettyPath))
}
continue
}
// Ignore records that the parser has discarded. This is used to remove
// type-only imports in TypeScript files.
if record.Flags.Has(ast.IsUnused) {
continue
}
// Cache the path in case it's imported multiple times in this file
cacheKey := cacheKey{
kind: record.Kind,
path: record.Path.Text,
attrs: attrs,
}
entry, ok := resolverCache[cacheKey]
if ok {
result.resolveResults[importRecordIndex] = entry.resolveResult
} else {
// Run the resolver and log an error if the path couldn't be resolved
resolveResult, didLogError, debug := RunOnResolvePlugins(
args.options.Plugins,
args.res,
args.log,
args.fs,
&args.caches.FSCache,
&source,
record.Range,
source.KeyPath,
record.Path.Text,
attrs,
record.Kind,
absResolveDir,
pluginData,
args.options.LogPathStyle,
)
if resolveResult != nil {
resolveResult.PathPair.Primary.ImportAttributes = attrs
if resolveResult.PathPair.HasSecondary() {
resolveResult.PathPair.Secondary.ImportAttributes = attrs
}
}
entry = cacheEntry{
resolveResult: resolveResult,
debug: debug,
didLogError: didLogError,
}
resolverCache[cacheKey] = entry
// All "require.resolve()" imports should be external because we don't
// want to waste effort traversing into them
if record.Kind == ast.ImportRequireResolve {
if resolveResult != nil && resolveResult.PathPair.IsExternal {
// Allow path substitution as long as the result is external
result.resolveResults[importRecordIndex] = resolveResult
} else if !record.Flags.Has(ast.HandlesImportErrors) {
args.log.AddID(logger.MsgID_Bundler_RequireResolveNotExternal, logger.Warning, &tracker, record.Range,
fmt.Sprintf("%q should be marked as external for use with \"require.resolve\"", record.Path.Text))
}
continue
}
}
// Check whether we should log an error every time the result is nil,
// even if it's from the cache. Do this because the error may not
// have been logged for nil entries if the previous instances had
// the "HandlesImportErrors" flag.
if entry.resolveResult == nil {
// Failed imports inside a try/catch are silently turned into
// external imports instead of causing errors. This matches a common
// code pattern for conditionally importing a module with a graceful
// fallback.
if !entry.didLogError && !record.Flags.Has(ast.HandlesImportErrors) {
// Report an error
text, suggestion, notes := ResolveFailureErrorTextSuggestionNotes(
args.res, record.Path.Text, record.Kind, pluginName, args.fs, absResolveDir, args.options.Platform,
source.PrettyPaths, entry.debug.ModifiedImportPath, args.options.LogPathStyle)
entry.debug.LogErrorMsg(args.log, &source, record.Range, text, suggestion, notes)
// Only report this error once per unique import path in the file
entry.didLogError = true
resolverCache[cacheKey] = entry
} else if !entry.didLogError && record.Flags.Has(ast.HandlesImportErrors) {
// Report a debug message about why there was no error
args.log.AddIDWithNotes(logger.MsgID_Bundler_IgnoredDynamicImport, logger.Debug, &tracker, record.Range,
fmt.Sprintf("Importing %q was allowed even though it could not be resolved because dynamic import failures appear to be handled here:",
record.Path.Text), []logger.MsgData{tracker.MsgData(js_lexer.RangeOfIdentifier(source, record.ErrorHandlerLoc),
"The handler for dynamic import failures is here:")})
}
continue
}
// Forbid bundling of imports with explicit phases
if record.Phase != ast.EvaluationPhase {
reportExplicitPhaseImport(args.log, &tracker, record.Range,
record.Phase, entry.resolveResult.PathPair.IsExternal, args.options.OutputFormat)
}
result.resolveResults[importRecordIndex] = entry.resolveResult
}
}
}
// Attempt to parse the source map if present
if loader.CanHaveSourceMap() && args.options.SourceMap != config.SourceMapNone {
var sourceMapComment logger.Span
switch repr := result.file.inputFile.Repr.(type) {
case *graph.JSRepr:
sourceMapComment = repr.AST.SourceMapComment
case *graph.CSSRepr:
sourceMapComment = repr.AST.SourceMapComment
}
if sourceMapComment.Text != "" {
tracker := logger.MakeLineColumnTracker(&source)
if path, contents := extractSourceMapFromComment(args.log, args.fs, &args.caches.FSCache,
&source, &tracker, sourceMapComment, absResolveDir, args.options.LogPathStyle); contents != nil {
prettyPaths := resolver.MakePrettyPaths(args.fs, path)
log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, args.log.Overrides)
sourceMap := js_parser.ParseSourceMap(log, logger.Source{
KeyPath: path,
PrettyPaths: prettyPaths,
Contents: *contents,
})
if msgs := log.Done(); len(msgs) > 0 {
var text string
if path.Namespace == "file" {
text = fmt.Sprintf("The source map %q was referenced by the file %q here:",
prettyPaths.Select(args.options.LogPathStyle),
args.prettyPaths.Select(args.options.LogPathStyle))
} else {
text = fmt.Sprintf("This source map came from the file %q here:",
args.prettyPaths.Select(args.options.LogPathStyle))
}
note := tracker.MsgData(sourceMapComment.Range, text)
for _, msg := range msgs {
msg.Notes = append(msg.Notes, note)
args.log.AddMsg(msg)
}
}
// If "sourcesContent" entries aren't present, try filling them in
// using the file system. This includes both generating the entire
// "sourcesContent" array if it's absent as well as filling in
// individual null entries in the array if the array is present.
if sourceMap != nil && !args.options.ExcludeSourcesContent {
// Make sure "sourcesContent" is big enough
if len(sourceMap.SourcesContent) < len(sourceMap.Sources) {
slice := make([]sourcemap.SourceContent, len(sourceMap.Sources))
copy(slice, sourceMap.SourcesContent)
sourceMap.SourcesContent = slice
}
for i, source := range sourceMap.Sources {
// Convert absolute paths to "file://" URLs, which is especially important
// for Windows where file paths don't look like URLs at all (they use "\"
// as a path separator and start with a "C:\" volume label instead of "/").
//
// The new source map specification (https://tc39.es/ecma426/) says that
// each source is "a string that is a (potentially relative) URL". So we
// should technically not be finding absolute paths here in the first place.
//
// However, for a long time source maps was poorly-specified. The old source
// map specification (https://sourcemaps.info/spec.html) only says "sources"
// is "a list of original sources used by the mappings entry" which could
// be anything, really.
//
// So it makes sense that software which predates the formal specification
// of source maps might fill in the sources array with absolute file paths
// instead of URLs. Here are some cases where that happened:
//
// - https://github.com/mozilla/source-map/issues/355
// - https://github.com/webpack/webpack/issues/8226
//
if path.Namespace == "file" && args.fs.IsAbs(source) {
source = helpers.FileURLFromFilePath(source).String()
sourceMap.Sources[i] = source
}
// Attempt to fill in null entries using the file system
if sourceMap.SourcesContent[i].Value == nil {
if sourceURL, err := url.Parse(source); err == nil && helpers.IsFileURL(sourceURL) {
if contents, err, _ := args.caches.FSCache.ReadFile(args.fs, helpers.FilePathFromFileURL(args.fs, sourceURL)); err == nil {
sourceMap.SourcesContent[i].Value = helpers.StringToUTF16(contents)
}
}
}
}
}
result.file.inputFile.InputSourceMap = sourceMap
}
}
}
}
// Note: We must always send on the "inject" channel before we send on the
// "results" channel to avoid deadlock
if args.inject != nil {
var exports []config.InjectableExport
if repr, ok := result.file.inputFile.Repr.(*graph.JSRepr); ok {
aliases := make([]string, 0, len(repr.AST.NamedExports))
for alias := range repr.AST.NamedExports {
aliases = append(aliases, alias)
}
sort.Strings(aliases) // Sort for determinism
exports = make([]config.InjectableExport, len(aliases))
for i, alias := range aliases {
exports[i] = config.InjectableExport{
Alias: alias,
Loc: repr.AST.NamedExports[alias].AliasLoc,
}
}
}
// Once we send on the "inject" channel, the main thread may mutate the
// "options" object to populate the "InjectedFiles" field. So we must
// only send on the "inject" channel after we're done using the "options"
// object so we don't introduce a data race.
isCopyLoader := loader == config.LoaderCopy
if isCopyLoader && args.skipResolve {
// This is not allowed because the import path would have to be rewritten,
// but import paths are not rewritten when bundling isn't enabled.
args.log.AddError(nil, logger.Range{},
fmt.Sprintf("Cannot inject %q with the \"copy\" loader without bundling enabled",
source.PrettyPaths.Select(args.options.LogPathStyle)))
}
args.inject <- config.InjectedFile{
Source: source,
Exports: exports,
IsCopyLoader: isCopyLoader,
}
}
args.results <- result
}
func reportExplicitPhaseImport(
log logger.Log,
tracker *logger.LineColumnTracker,
r logger.Range,
phase ast.ImportPhase,
isExternal bool,
format config.Format,
) {
var phaseText string
switch phase {
case ast.DeferPhase:
phaseText = "deferred"
case ast.SourcePhase:
phaseText = "source phase"
default:
return
}
if format != config.FormatESModule {
log.AddError(tracker, r, fmt.Sprintf("Bundling %s imports with the %q output format is not supported", phaseText, format.String()))
} else if !isExternal {
log.AddError(tracker, r, fmt.Sprintf("Bundling with %s imports is not supported unless they are external", phaseText))
}
}
func ResolveFailureErrorTextSuggestionNotes(
res *resolver.Resolver,
path string,
kind ast.ImportKind,
pluginName string,
fs fs.FS,
absResolveDir string,
platform config.Platform,
originatingFilePaths logger.PrettyPaths,
modifiedImportPath string,
logPathStyle logger.PathStyle,
) (text string, suggestion string, notes []logger.MsgData) {
if modifiedImportPath != "" {
text = fmt.Sprintf("Could not resolve %q (originally %q)", modifiedImportPath, path)
notes = append(notes, logger.MsgData{Text: fmt.Sprintf(
"The path %q was remapped to %q using the alias feature, which then couldn't be resolved. "+
"Keep in mind that import path aliases are resolved in the current working directory.",
path, modifiedImportPath)})
path = modifiedImportPath
} else {
text = fmt.Sprintf("Could not resolve %q", path)
}
hint := ""
if resolver.IsPackagePath(path) && !fs.IsAbs(path) {
hint = fmt.Sprintf("You can mark the path %q as external to exclude it from the bundle, which will remove this error and leave the unresolved path in the bundle.", path)
if kind == ast.ImportRequire {
hint += " You can also surround this \"require\" call with a try/catch block to handle this failure at run-time instead of bundle-time."
} else if kind == ast.ImportDynamic {
hint += " You can also add \".catch()\" here to handle this failure at run-time instead of bundle-time."
}
if pluginName == "" && !fs.IsAbs(path) {
if query, _ := res.ProbeResolvePackageAsRelative(absResolveDir, path, kind); query != nil {
prettyPaths := resolver.MakePrettyPaths(fs, query.PathPair.Primary)
hint = fmt.Sprintf("Use the relative path %q to reference the file %q. "+
"Without the leading \"./\", the path %q is being interpreted as a package path instead.",
"./"+path, prettyPaths.Select(logPathStyle), path)
suggestion = string(helpers.QuoteForJSON("./"+path, false))
}
}
}
if platform != config.PlatformNode {
pkg := strings.TrimPrefix(path, "node:")
if resolver.BuiltInNodeModules[pkg] {
var how string
switch logger.API {
case logger.CLIAPI:
how = "--platform=node"
case logger.JSAPI:
how = "platform: 'node'"
case logger.GoAPI:
how = "Platform: api.PlatformNode"
}
hint = fmt.Sprintf("The package %q wasn't found on the file system but is built into node. "+
"Are you trying to bundle for node? You can use %q to do that, which will remove this error.", path, how)
}
}
if absResolveDir == "" && pluginName != "" {
where := ""
if originatingFilePaths != (logger.PrettyPaths{}) {
where = fmt.Sprintf(" for the file %q", originatingFilePaths.Select(logPathStyle))
}
hint = fmt.Sprintf("The plugin %q didn't set a resolve directory%s, "+
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | true |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/renamer/renamer.go | internal/renamer/renamer.go | package renamer
import (
"fmt"
"sort"
"strconv"
"sync"
"sync/atomic"
"github.com/evanw/esbuild/internal/ast"
"github.com/evanw/esbuild/internal/js_ast"
"github.com/evanw/esbuild/internal/js_lexer"
)
func ComputeReservedNames(moduleScopes []*js_ast.Scope, symbols ast.SymbolMap) map[string]uint32 {
names := make(map[string]uint32)
// All keywords and strict mode reserved words are reserved names
for k := range js_lexer.Keywords {
names[k] = 1
}
for k := range js_lexer.StrictModeReservedWords {
names[k] = 1
}
// All unbound symbols must be reserved names
for _, scope := range moduleScopes {
computeReservedNamesForScope(scope, symbols, names)
}
return names
}
func computeReservedNamesForScope(scope *js_ast.Scope, symbols ast.SymbolMap, names map[string]uint32) {
for _, member := range scope.Members {
symbol := symbols.Get(member.Ref)
if symbol.Kind == ast.SymbolUnbound || symbol.Flags.Has(ast.MustNotBeRenamed) {
names[symbol.OriginalName] = 1
}
}
for _, ref := range scope.Generated {
symbol := symbols.Get(ref)
if symbol.Kind == ast.SymbolUnbound || symbol.Flags.Has(ast.MustNotBeRenamed) {
names[symbol.OriginalName] = 1
}
}
// If there's a direct "eval" somewhere inside the current scope, continue
// traversing down the scope tree until we find it to get all reserved names
if scope.ContainsDirectEval {
for _, child := range scope.Children {
if child.ContainsDirectEval {
computeReservedNamesForScope(child, symbols, names)
}
}
}
}
type Renamer interface {
NameForSymbol(ref ast.Ref) string
}
////////////////////////////////////////////////////////////////////////////////
// noOpRenamer
type noOpRenamer struct {
symbols ast.SymbolMap
}
func NewNoOpRenamer(symbols ast.SymbolMap) Renamer {
return &noOpRenamer{
symbols: symbols,
}
}
func (r *noOpRenamer) NameForSymbol(ref ast.Ref) string {
ref = ast.FollowSymbols(r.symbols, ref)
return r.symbols.Get(ref).OriginalName
}
////////////////////////////////////////////////////////////////////////////////
// MinifyRenamer
type symbolSlot struct {
name string
count uint32
needsCapitalForJSX uint32 // This is really a bool but needs to be atomic
}
type MinifyRenamer struct {
reservedNames map[string]uint32
slots [4][]symbolSlot
topLevelSymbolToSlot map[ast.Ref]uint32
symbols ast.SymbolMap
}
func NewMinifyRenamer(symbols ast.SymbolMap, firstTopLevelSlots ast.SlotCounts, reservedNames map[string]uint32) *MinifyRenamer {
return &MinifyRenamer{
symbols: symbols,
reservedNames: reservedNames,
slots: [4][]symbolSlot{
make([]symbolSlot, firstTopLevelSlots[0]),
make([]symbolSlot, firstTopLevelSlots[1]),
make([]symbolSlot, firstTopLevelSlots[2]),
make([]symbolSlot, firstTopLevelSlots[3]),
},
topLevelSymbolToSlot: make(map[ast.Ref]uint32),
}
}
func (r *MinifyRenamer) NameForSymbol(ref ast.Ref) string {
// Follow links to get to the underlying symbol
ref = ast.FollowSymbols(r.symbols, ref)
symbol := r.symbols.Get(ref)
// Skip this symbol if the name is pinned
ns := symbol.SlotNamespace()
if ns == ast.SlotMustNotBeRenamed {
return symbol.OriginalName
}
// Check if it's a nested scope symbol
i := symbol.NestedScopeSlot
// If it's not (i.e. it's in a top-level scope), look up the slot
if !i.IsValid() {
index, ok := r.topLevelSymbolToSlot[ref]
if !ok {
// If we get here, then we're printing a symbol that never had any
// recorded uses. This is odd but can happen in certain scenarios.
// For example, code in a branch with dead control flow won't mark
// any uses but may still be printed. In that case it doesn't matter
// what name we use since it's dead code.
return symbol.OriginalName
}
i = ast.MakeIndex32(index)
}
return r.slots[ns][i.GetIndex()].name
}
// The InnerIndex should be stable because the parser for a single file is
// single-threaded and deterministically assigns out InnerIndex values
// sequentially. But the SourceIndex should be unstable because the main thread
// assigns out source index values sequentially to newly-discovered dependencies
// in a multi-threaded producer/consumer relationship. So instead we use the
// index of the source in the DFS order over all entry points for stability.
type StableSymbolCount struct {
StableSourceIndex uint32
Ref ast.Ref
Count uint32
}
// This type is just so we can use Go's native sort function
type StableSymbolCountArray []StableSymbolCount
func (a StableSymbolCountArray) Len() int { return len(a) }
func (a StableSymbolCountArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
func (a StableSymbolCountArray) Less(i int, j int) bool {
ai, aj := a[i], a[j]
if ai.Count > aj.Count {
return true
}
if ai.Count < aj.Count {
return false
}
if ai.StableSourceIndex < aj.StableSourceIndex {
return true
}
if ai.StableSourceIndex > aj.StableSourceIndex {
return false
}
return ai.Ref.InnerIndex < aj.Ref.InnerIndex
}
func (r *MinifyRenamer) AccumulateSymbolUseCounts(
topLevelSymbols *StableSymbolCountArray,
symbolUses map[ast.Ref]js_ast.SymbolUse,
stableSourceIndices []uint32,
) {
// NOTE: This function is run in parallel. Make sure to avoid data races.
for ref, use := range symbolUses {
r.AccumulateSymbolCount(topLevelSymbols, ref, use.CountEstimate, stableSourceIndices)
}
}
func (r *MinifyRenamer) AccumulateSymbolCount(
topLevelSymbols *StableSymbolCountArray,
ref ast.Ref,
count uint32,
stableSourceIndices []uint32,
) {
// NOTE: This function is run in parallel. Make sure to avoid data races.
// Follow links to get to the underlying symbol
ref = ast.FollowSymbols(r.symbols, ref)
symbol := r.symbols.Get(ref)
for symbol.NamespaceAlias != nil {
ref = ast.FollowSymbols(r.symbols, symbol.NamespaceAlias.NamespaceRef)
symbol = r.symbols.Get(ref)
}
// Skip this symbol if the name is pinned
ns := symbol.SlotNamespace()
if ns == ast.SlotMustNotBeRenamed {
return
}
// Check if it's a nested scope symbol
if i := symbol.NestedScopeSlot; i.IsValid() {
// If it is, accumulate the count using a parallel-safe atomic increment
slot := &r.slots[ns][i.GetIndex()]
atomic.AddUint32(&slot.count, count)
if symbol.Flags.Has(ast.MustStartWithCapitalLetterForJSX) {
atomic.StoreUint32(&slot.needsCapitalForJSX, 1)
}
return
}
// If it's a top-level symbol, defer it to later since we have
// to allocate slots for these in serial instead of in parallel
*topLevelSymbols = append(*topLevelSymbols, StableSymbolCount{
StableSourceIndex: stableSourceIndices[ref.SourceIndex],
Ref: ref,
Count: count,
})
}
// The parallel part of the symbol count accumulation algorithm above processes
// nested symbols and generates an array of top-level symbols to process later.
// After the parallel part has finished, that array of top-level symbols is passed
// to this function which processes them in serial.
func (r *MinifyRenamer) AllocateTopLevelSymbolSlots(topLevelSymbols StableSymbolCountArray) {
for _, stable := range topLevelSymbols {
symbol := r.symbols.Get(stable.Ref)
slots := &r.slots[symbol.SlotNamespace()]
if i, ok := r.topLevelSymbolToSlot[stable.Ref]; ok {
slot := &(*slots)[i]
slot.count += stable.Count
if symbol.Flags.Has(ast.MustStartWithCapitalLetterForJSX) {
slot.needsCapitalForJSX = 1
}
} else {
needsCapitalForJSX := uint32(0)
if symbol.Flags.Has(ast.MustStartWithCapitalLetterForJSX) {
needsCapitalForJSX = 1
}
i = uint32(len(*slots))
*slots = append(*slots, symbolSlot{
count: stable.Count,
needsCapitalForJSX: needsCapitalForJSX,
})
r.topLevelSymbolToSlot[stable.Ref] = i
}
}
}
func (r *MinifyRenamer) AssignNamesByFrequency(minifier *ast.NameMinifier) {
for ns, slots := range r.slots {
// Sort symbols by count
sorted := make(slotAndCountArray, len(slots))
for i, item := range slots {
sorted[i] = slotAndCount{slot: uint32(i), count: item.count}
}
sort.Sort(sorted)
// Assign names to symbols
nextName := 0
for _, data := range sorted {
slot := &slots[data.slot]
name := minifier.NumberToMinifiedName(nextName)
nextName++
// Make sure we never generate a reserved name. We only have to worry
// about collisions with reserved identifiers for normal symbols, and we
// only have to worry about collisions with keywords for labels. We do
// not have to worry about either for private names because they start
// with a "#" character.
switch ast.SlotNamespace(ns) {
case ast.SlotDefault:
for r.reservedNames[name] != 0 {
name = minifier.NumberToMinifiedName(nextName)
nextName++
}
// Make sure names of symbols used in JSX elements start with a capital letter
if slot.needsCapitalForJSX != 0 {
for name[0] >= 'a' && name[0] <= 'z' {
name = minifier.NumberToMinifiedName(nextName)
nextName++
}
}
case ast.SlotLabel:
for js_lexer.Keywords[name] != 0 {
name = minifier.NumberToMinifiedName(nextName)
nextName++
}
}
// Private names must be prefixed with "#"
if ast.SlotNamespace(ns) == ast.SlotPrivateName {
name = "#" + name
}
slot.name = name
}
}
}
// Returns the number of nested slots
func AssignNestedScopeSlots(moduleScope *js_ast.Scope, symbols []ast.Symbol) (slotCounts ast.SlotCounts) {
// Temporarily set the nested scope slots of top-level symbols to valid so
// they aren't renamed in nested scopes. This prevents us from accidentally
// assigning nested scope slots to variables declared using "var" in a nested
// scope that are actually hoisted up to the module scope to become a top-
// level symbol.
validSlot := ast.MakeIndex32(1)
for _, member := range moduleScope.Members {
symbols[member.Ref.InnerIndex].NestedScopeSlot = validSlot
}
for _, ref := range moduleScope.Generated {
symbols[ref.InnerIndex].NestedScopeSlot = validSlot
}
// Assign nested scope slots independently for each nested scope
for _, child := range moduleScope.Children {
slotCounts.UnionMax(assignNestedScopeSlotsHelper(child, symbols, ast.SlotCounts{}))
}
// Then set the nested scope slots of top-level symbols back to zero. Top-
// level symbols are not supposed to have nested scope slots.
for _, member := range moduleScope.Members {
symbols[member.Ref.InnerIndex].NestedScopeSlot = ast.Index32{}
}
for _, ref := range moduleScope.Generated {
symbols[ref.InnerIndex].NestedScopeSlot = ast.Index32{}
}
return
}
func assignNestedScopeSlotsHelper(scope *js_ast.Scope, symbols []ast.Symbol, slot ast.SlotCounts) ast.SlotCounts {
// Sort member map keys for determinism
sortedMembers := make([]int, 0, len(scope.Members))
for _, member := range scope.Members {
sortedMembers = append(sortedMembers, int(member.Ref.InnerIndex))
}
sort.Ints(sortedMembers)
// Assign slots for this scope's symbols. Only do this if the slot is
// not already assigned. Nested scopes have copies of symbols from parent
// scopes and we want to use the slot from the parent scope, not child scopes.
for _, innerIndex := range sortedMembers {
symbol := &symbols[innerIndex]
if ns := symbol.SlotNamespace(); ns != ast.SlotMustNotBeRenamed && !symbol.NestedScopeSlot.IsValid() {
symbol.NestedScopeSlot = ast.MakeIndex32(slot[ns])
slot[ns]++
}
}
for _, ref := range scope.Generated {
symbol := &symbols[ref.InnerIndex]
if ns := symbol.SlotNamespace(); ns != ast.SlotMustNotBeRenamed && !symbol.NestedScopeSlot.IsValid() {
symbol.NestedScopeSlot = ast.MakeIndex32(slot[ns])
slot[ns]++
}
}
// Labels are always declared in a nested scope, so we don't need to check.
if scope.Label.Ref != ast.InvalidRef {
symbol := &symbols[scope.Label.Ref.InnerIndex]
symbol.NestedScopeSlot = ast.MakeIndex32(slot[ast.SlotLabel])
slot[ast.SlotLabel]++
}
// Assign slots for the symbols of child scopes
slotCounts := slot
for _, child := range scope.Children {
slotCounts.UnionMax(assignNestedScopeSlotsHelper(child, symbols, slot))
}
return slotCounts
}
type slotAndCount struct {
slot uint32
count uint32
}
// This type is just so we can use Go's native sort function
type slotAndCountArray []slotAndCount
func (a slotAndCountArray) Len() int { return len(a) }
func (a slotAndCountArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
func (a slotAndCountArray) Less(i int, j int) bool {
ai, aj := a[i], a[j]
return ai.count > aj.count || (ai.count == aj.count && ai.slot < aj.slot)
}
////////////////////////////////////////////////////////////////////////////////
// NumberRenamer
type NumberRenamer struct {
symbols ast.SymbolMap
root numberScope
names [][]string
}
func NewNumberRenamer(symbols ast.SymbolMap, reservedNames map[string]uint32) *NumberRenamer {
return &NumberRenamer{
symbols: symbols,
names: make([][]string, len(symbols.SymbolsForSource)),
root: numberScope{nameCounts: reservedNames},
}
}
func (r *NumberRenamer) NameForSymbol(ref ast.Ref) string {
ref = ast.FollowSymbols(r.symbols, ref)
if inner := r.names[ref.SourceIndex]; inner != nil {
if name := inner[ref.InnerIndex]; name != "" {
return name
}
}
return r.symbols.Get(ref).OriginalName
}
func (r *NumberRenamer) AddTopLevelSymbol(ref ast.Ref) {
r.assignName(&r.root, ref)
}
func (r *NumberRenamer) assignName(scope *numberScope, ref ast.Ref) {
ref = ast.FollowSymbols(r.symbols, ref)
// Don't rename the same symbol more than once
inner := r.names[ref.SourceIndex]
if inner != nil && inner[ref.InnerIndex] != "" {
return
}
// Don't rename unbound symbols, symbols marked as reserved names, labels, or private names
symbol := r.symbols.Get(ref)
ns := symbol.SlotNamespace()
if ns != ast.SlotDefault && ns != ast.SlotPrivateName {
return
}
// Make sure names of symbols used in JSX elements start with a capital letter
originalName := symbol.OriginalName
if symbol.Flags.Has(ast.MustStartWithCapitalLetterForJSX) {
if first := rune(originalName[0]); first >= 'a' && first <= 'z' {
originalName = fmt.Sprintf("%c%s", first+('A'-'a'), originalName[1:])
}
}
// Compute a new name
name := scope.findUnusedName(originalName, ns)
// Store the new name
if inner == nil {
// Note: This should not be a data race even though this method is run from
// multiple threads. The parallel part only looks at symbols defined in
// nested scopes, and those can only ever be accessed from within the file.
// References to those symbols should never spread across files.
//
// While we could avoid the data race by densely preallocating the entire
// "names" array ahead of time, that will waste a lot more memory for
// builds that make heavy use of code splitting and have many chunks. Doing
// things lazily like this means we use less memory but still stay safe.
inner = make([]string, len(r.symbols.SymbolsForSource[ref.SourceIndex]))
r.names[ref.SourceIndex] = inner
}
inner[ref.InnerIndex] = name
}
func (r *NumberRenamer) assignNamesInScope(scope *js_ast.Scope, sourceIndex uint32, parent *numberScope, sorted *[]int) *numberScope {
s := &numberScope{parent: parent, nameCounts: make(map[string]uint32)}
if len(scope.Members) > 0 {
// Sort member map keys for determinism, reusing a shared memory buffer
*sorted = (*sorted)[:0]
for _, member := range scope.Members {
*sorted = append(*sorted, int(member.Ref.InnerIndex))
}
sort.Ints(*sorted)
// Rename all user-defined symbols in this scope
for _, innerIndex := range *sorted {
r.assignName(s, ast.Ref{SourceIndex: sourceIndex, InnerIndex: uint32(innerIndex)})
}
}
// Also rename all generated symbols in this scope
for _, ref := range scope.Generated {
r.assignName(s, ref)
}
return s
}
func (r *NumberRenamer) assignNamesRecursive(scope *js_ast.Scope, sourceIndex uint32, parent *numberScope, sorted *[]int) {
// For performance in extreme cases (e.g. 10,000 nested scopes), traversing
// through singly-nested scopes uses iteration instead of recursion
for {
if len(scope.Members) > 0 || len(scope.Generated) > 0 {
// For performance in extreme cases (e.g. 10,000 nested scopes), only
// allocate a scope when it's necessary. I'm not quite sure why allocating
// one scope per level is so much overhead. It's not that many objects.
// Or at least there are already that many objects for the AST that we're
// traversing, so I don't know why 80% of the time in these extreme cases
// is taken by this function (if we don't avoid this allocation).
parent = r.assignNamesInScope(scope, sourceIndex, parent, sorted)
}
if children := scope.Children; len(children) == 1 {
scope = children[0]
} else {
break
}
}
// Symbols in child scopes may also have to be renamed to avoid conflicts
for _, child := range scope.Children {
r.assignNamesRecursive(child, sourceIndex, parent, sorted)
}
}
func (r *NumberRenamer) AssignNamesByScope(nestedScopes map[uint32][]*js_ast.Scope) {
waitGroup := sync.WaitGroup{}
waitGroup.Add(len(nestedScopes))
// Rename nested scopes from separate files in parallel
for sourceIndex, scopes := range nestedScopes {
go func(sourceIndex uint32, scopes []*js_ast.Scope) {
var sorted []int
for _, scope := range scopes {
r.assignNamesRecursive(scope, sourceIndex, &r.root, &sorted)
}
waitGroup.Done()
}(sourceIndex, scopes)
}
waitGroup.Wait()
}
type numberScope struct {
parent *numberScope
// This is used as a set of used names in this scope. This also maps the name
// to the number of times the name has experienced a collision. When a name
// collides with an already-used name, we need to rename it. This is done by
// incrementing a number at the end until the name is unused. We save the
// count here so that subsequent collisions can start counting from where the
// previous collision ended instead of having to start counting from 1.
nameCounts map[string]uint32
}
type nameUse uint8
const (
nameUnused nameUse = iota
nameUsed
nameUsedInSameScope
)
func (s *numberScope) findNameUse(name string) nameUse {
original := s
for {
if _, ok := s.nameCounts[name]; ok {
if s == original {
return nameUsedInSameScope
}
return nameUsed
}
s = s.parent
if s == nil {
return nameUnused
}
}
}
func (s *numberScope) findUnusedName(name string, ns ast.SlotNamespace) string {
// We may not have a valid identifier if this is an internally-constructed name
if ns == ast.SlotPrivateName {
if id := name[1:]; !js_ast.IsIdentifier(id) {
name = js_ast.ForceValidIdentifier("#", id)
}
} else {
if !js_ast.IsIdentifier(name) {
name = js_ast.ForceValidIdentifier("", name)
}
}
if use := s.findNameUse(name); use != nameUnused {
// If the name is already in use, generate a new name by appending a number
tries := uint32(1)
if use == nameUsedInSameScope {
// To avoid O(n^2) behavior, the number must start off being the number
// that we used last time there was a collision with this name. Otherwise
// if there are many collisions with the same name, each name collision
// would have to increment the counter past all previous name collisions
// which is a O(n^2) time algorithm. Only do this if this symbol comes
// from the same scope as the previous one since sibling scopes can reuse
// the same name without problems.
tries = s.nameCounts[name]
}
prefix := name
// Keep incrementing the number until the name is unused
for {
tries++
name = prefix + strconv.Itoa(int(tries))
// Make sure this new name is unused
if s.findNameUse(name) == nameUnused {
// Store the count so we can start here next time instead of starting
// from 1. This means we avoid O(n^2) behavior.
if use == nameUsedInSameScope {
s.nameCounts[prefix] = tries
}
break
}
}
}
// Each name starts off with a count of 1 so that the first collision with
// "name" is called "name2"
s.nameCounts[name] = 1
return name
}
////////////////////////////////////////////////////////////////////////////////
// ExportRenamer
type ExportRenamer struct {
used map[string]uint32
count int
}
func (r *ExportRenamer) NextRenamedName(name string) string {
if r.used == nil {
r.used = make(map[string]uint32)
}
if tries, ok := r.used[name]; ok {
prefix := name
for {
tries++
name = prefix + strconv.Itoa(int(tries))
if _, ok := r.used[name]; !ok {
break
}
}
r.used[name] = tries
} else {
r.used[name] = 1
}
return name
}
func (r *ExportRenamer) NextMinifiedName() string {
name := ast.DefaultNameMinifierJS.NumberToMinifiedName(r.count)
r.count++
return name
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/xxhash/xxhash.go | internal/xxhash/xxhash.go | // Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
// at http://cyan4973.github.io/xxHash/.
package xxhash
import (
"encoding/binary"
"errors"
"math/bits"
)
const (
prime1 uint64 = 11400714785074694791
prime2 uint64 = 14029467366897019727
prime3 uint64 = 1609587929392839161
prime4 uint64 = 9650029242287828579
prime5 uint64 = 2870177450012600261
)
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
// possible in the Go code is worth a small (but measurable) performance boost
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
// convenience in the Go code in a few places where we need to intentionally
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
// result overflows a uint64).
var (
prime1v = prime1
prime2v = prime2
prime3v = prime3
prime4v = prime4
prime5v = prime5
)
// Digest implements hash.Hash64.
type Digest struct {
v1 uint64
v2 uint64
v3 uint64
v4 uint64
total uint64
mem [32]byte
n int // how much of mem is used
}
// New creates a new Digest that computes the 64-bit xxHash algorithm.
func New() *Digest {
var d Digest
d.Reset()
return &d
}
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
d.v1 = prime1v + prime2
d.v2 = prime2
d.v3 = 0
d.v4 = -prime1v
d.total = 0
d.n = 0
}
// Size always returns 8 bytes.
func (d *Digest) Size() int { return 8 }
// BlockSize always returns 32 bytes.
func (d *Digest) BlockSize() int { return 32 }
// Write adds more data to d. It always returns len(b), nil.
func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
if d.n+n < 32 {
// This new data doesn't even fill the current block.
copy(d.mem[d.n:], b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
copy(d.mem[d.n:], b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
b = b[32-d.n:]
d.n = 0
}
if len(b) >= 32 {
// One or more full blocks left.
nw := writeBlocks(d, b)
b = b[nw:]
}
// Store any remaining partial block.
copy(d.mem[:], b)
d.n = len(b)
return
}
// Sum appends the current hash to b and returns the resulting slice.
func (d *Digest) Sum(b []byte) []byte {
s := d.Sum64()
return append(
b,
byte(s>>56),
byte(s>>48),
byte(s>>40),
byte(s>>32),
byte(s>>24),
byte(s>>16),
byte(s>>8),
byte(s),
)
}
// Sum64 returns the current hash.
func (d *Digest) Sum64() uint64 {
var h uint64
if d.total >= 32 {
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = d.v3 + prime5
}
h += d.total
i, end := 0, d.n
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(d.mem[i:i+8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(d.mem[i:i+4])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for i < end {
h ^= uint64(d.mem[i]) * prime5
h = rol11(h) * prime1
i++
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
const (
magic = "xxh\x06"
marshaledSize = len(magic) + 8*5 + 32
)
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (d *Digest) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaledSize)
b = append(b, magic...)
b = appendUint64(b, d.v1)
b = appendUint64(b, d.v2)
b = appendUint64(b, d.v3)
b = appendUint64(b, d.v4)
b = appendUint64(b, d.total)
b = append(b, d.mem[:d.n]...)
b = b[:len(b)+len(d.mem)-d.n]
return b, nil
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (d *Digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("xxhash: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("xxhash: invalid hash state size")
}
b = b[len(magic):]
b, d.v1 = consumeUint64(b)
b, d.v2 = consumeUint64(b)
b, d.v3 = consumeUint64(b)
b, d.v4 = consumeUint64(b)
b, d.total = consumeUint64(b)
copy(d.mem[:], b)
d.n = int(d.total % uint64(len(d.mem)))
return nil
}
func appendUint64(b []byte, x uint64) []byte {
var a [8]byte
binary.LittleEndian.PutUint64(a[:], x)
return append(b, a[:]...)
}
func consumeUint64(b []byte) ([]byte, uint64) {
x := u64(b)
return b[8:], x
}
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
func round(acc, input uint64) uint64 {
acc += input * prime2
acc = rol31(acc)
acc *= prime1
return acc
}
func mergeRound(acc, val uint64) uint64 {
val = round(0, val)
acc ^= val
acc = acc*prime1 + prime4
return acc
}
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/xxhash/xxhash_other.go | internal/xxhash/xxhash_other.go | package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
func Sum64(b []byte) uint64 {
// A simpler version would be
// d := New()
// d.Write(b)
// return d.Sum64()
// but this is faster, particularly for small inputs.
n := len(b)
var h uint64
if n >= 32 {
v1 := prime1v + prime2
v2 := prime2
v3 := uint64(0)
v4 := -prime1v
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = prime5
}
h += uint64(n)
i, end := 0, len(b)
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(b[i:i+8:len(b)]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for ; i < end; i++ {
h ^= uint64(b[i]) * prime5
h = rol11(h) * prime1
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
func writeBlocks(d *Digest, b []byte) int {
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
n := len(b)
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
return n - len(b)
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/fs/iswin_windows.go | internal/fs/iswin_windows.go | //go:build windows
// +build windows
package fs
func CheckIfWindows() bool {
return true
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/fs/fs_real.go | internal/fs/fs_real.go | package fs
import (
"fmt"
"io"
"io/ioutil"
"os"
"sort"
"strings"
"sync"
"syscall"
)
type realFS struct {
// Stores the file entries for directories we've listed before
entries map[string]entriesOrErr
// This stores data that will end up being returned by "WatchData()"
watchData map[string]privateWatchData
// When building with WebAssembly, the Go compiler doesn't correctly handle
// platform-specific path behavior. Hack around these bugs by compiling
// support for both Unix and Windows paths into all executables and switch
// between them at run-time instead.
fp goFilepath
entriesMutex sync.Mutex
watchMutex sync.Mutex
// If true, do not use the "entries" cache
doNotCacheEntries bool
}
type entriesOrErr struct {
canonicalError error
originalError error
entries DirEntries
}
type watchState uint8
const (
stateNone watchState = iota
stateDirHasAccessedEntries // Compare "accessedEntries"
stateDirUnreadable // Compare directory readability
stateFileHasModKey // Compare "modKey"
stateFileNeedModKey // Need to transition to "stateFileHasModKey" or "stateFileUnusableModKey" before "WatchData()" returns
stateFileMissing // Compare file presence
stateFileUnusableModKey // Compare "fileContents"
)
type privateWatchData struct {
accessedEntries *accessedEntries
fileContents string
modKey ModKey
state watchState
}
type RealFSOptions struct {
AbsWorkingDir string
WantWatchData bool
DoNotCache bool
}
func RealFS(options RealFSOptions) (FS, error) {
var fp goFilepath
if CheckIfWindows() {
fp.isWindows = true
fp.pathSeparator = '\\'
} else {
fp.isWindows = false
fp.pathSeparator = '/'
}
// Come up with a default working directory if one was not specified
fp.cwd = options.AbsWorkingDir
if fp.cwd == "" {
if cwd, err := os.Getwd(); err == nil {
fp.cwd = cwd
} else if fp.isWindows {
fp.cwd = "C:\\"
} else {
fp.cwd = "/"
}
} else if !fp.isAbs(fp.cwd) {
return nil, fmt.Errorf("The working directory %q is not an absolute path", fp.cwd)
}
// Resolve symlinks in the current working directory. Symlinks are resolved
// when input file paths are converted to absolute paths because we need to
// recognize an input file as unique even if it has multiple symlinks
// pointing to it. The build will generate relative paths from the current
// working directory to the absolute input file paths for error messages,
// so the current working directory should be processed the same way. Not
// doing this causes test failures with esbuild when run from inside a
// symlinked directory.
//
// This deliberately ignores errors due to e.g. infinite loops. If there is
// an error, we will just use the original working directory and likely
// encounter an error later anyway. And if we don't encounter an error
// later, then the current working directory didn't even matter and the
// error is unimportant.
if path, err := fp.evalSymlinks(fp.cwd); err == nil {
fp.cwd = path
}
// Only allocate memory for watch data if necessary
var watchData map[string]privateWatchData
if options.WantWatchData {
watchData = make(map[string]privateWatchData)
}
var result FS = &realFS{
entries: make(map[string]entriesOrErr),
fp: fp,
watchData: watchData,
doNotCacheEntries: options.DoNotCache,
}
// Add a wrapper that lets us traverse into ".zip" files. This is what yarn
// uses as a package format when in yarn is in its "PnP" mode.
result = &zipFS{
inner: result,
zipFiles: make(map[string]*zipFile),
}
return result, nil
}
func (fs *realFS) ReadDirectory(dir string) (entries DirEntries, canonicalError error, originalError error) {
if !fs.doNotCacheEntries {
// First, check the cache
cached, ok := func() (cached entriesOrErr, ok bool) {
fs.entriesMutex.Lock()
defer fs.entriesMutex.Unlock()
cached, ok = fs.entries[dir]
return
}()
if ok {
// Cache hit: stop now
return cached.entries, cached.canonicalError, cached.originalError
}
}
// Cache miss: read the directory entries
names, canonicalError, originalError := fs.readdir(dir)
entries = DirEntries{dir: dir, data: make(map[string]*Entry)}
// Unwrap to get the underlying error
if pathErr, ok := canonicalError.(*os.PathError); ok {
canonicalError = pathErr.Unwrap()
}
if canonicalError == nil {
for _, name := range names {
// Call "stat" lazily for performance. The "@material-ui/icons" package
// contains a directory with over 11,000 entries in it and running "stat"
// for each entry was a big performance issue for that package.
entries.data[strings.ToLower(name)] = &Entry{
dir: dir,
base: name,
needStat: true,
}
}
}
// Store data for watch mode
if fs.watchData != nil {
defer fs.watchMutex.Unlock()
fs.watchMutex.Lock()
state := stateDirHasAccessedEntries
if canonicalError != nil {
state = stateDirUnreadable
}
entries.accessedEntries = &accessedEntries{wasPresent: make(map[string]bool)}
fs.watchData[dir] = privateWatchData{
accessedEntries: entries.accessedEntries,
state: state,
}
}
// Update the cache unconditionally. Even if the read failed, we don't want to
// retry again later. The directory is inaccessible so trying again is wasted.
if canonicalError != nil {
entries.data = nil
}
if !fs.doNotCacheEntries {
fs.entriesMutex.Lock()
defer fs.entriesMutex.Unlock()
fs.entries[dir] = entriesOrErr{
entries: entries,
canonicalError: canonicalError,
originalError: originalError,
}
}
return entries, canonicalError, originalError
}
func (fs *realFS) ReadFile(path string) (contents string, canonicalError error, originalError error) {
BeforeFileOpen()
defer AfterFileClose()
buffer, originalError := ioutil.ReadFile(path)
canonicalError = fs.canonicalizeError(originalError)
// Allocate the string once
fileContents := string(buffer)
// Store data for watch mode
if fs.watchData != nil {
defer fs.watchMutex.Unlock()
fs.watchMutex.Lock()
data, ok := fs.watchData[path]
if canonicalError != nil {
data.state = stateFileMissing
} else if !ok || data.state == stateDirUnreadable {
// Note: If "ReadDirectory" is called before "ReadFile" with this same
// path, then "data.state" will be "stateDirUnreadable". In that case
// we want to transition to "stateFileNeedModKey" because it's a file.
data.state = stateFileNeedModKey
}
data.fileContents = fileContents
fs.watchData[path] = data
}
return fileContents, canonicalError, originalError
}
type realOpenedFile struct {
handle *os.File
len int
}
func (f *realOpenedFile) Len() int {
return f.len
}
func (f *realOpenedFile) Read(start int, end int) ([]byte, error) {
bytes := make([]byte, end-start)
remaining := bytes
_, err := f.handle.Seek(int64(start), io.SeekStart)
if err != nil {
return nil, err
}
for len(remaining) > 0 {
n, err := f.handle.Read(remaining)
if err != nil && n <= 0 {
return nil, err
}
remaining = remaining[n:]
}
return bytes, nil
}
func (f *realOpenedFile) Close() error {
return f.handle.Close()
}
func (fs *realFS) OpenFile(path string) (OpenedFile, error, error) {
BeforeFileOpen()
defer AfterFileClose()
f, err := os.Open(path)
if err != nil {
return nil, fs.canonicalizeError(err), err
}
info, err := f.Stat()
if err != nil {
f.Close()
return nil, fs.canonicalizeError(err), err
}
return &realOpenedFile{f, int(info.Size())}, nil, nil
}
func (fs *realFS) ModKey(path string) (ModKey, error) {
BeforeFileOpen()
defer AfterFileClose()
key, err := modKey(path)
// Store data for watch mode
if fs.watchData != nil {
defer fs.watchMutex.Unlock()
fs.watchMutex.Lock()
data, ok := fs.watchData[path]
if !ok {
if err == modKeyUnusable {
data.state = stateFileUnusableModKey
} else if err != nil {
data.state = stateFileMissing
} else {
data.state = stateFileHasModKey
}
} else if data.state == stateFileNeedModKey {
data.state = stateFileHasModKey
}
data.modKey = key
fs.watchData[path] = data
}
return key, err
}
func (fs *realFS) IsAbs(p string) bool {
return fs.fp.isAbs(p)
}
func (fs *realFS) Abs(p string) (string, bool) {
abs, err := fs.fp.abs(p)
return abs, err == nil
}
func (fs *realFS) Dir(p string) string {
return fs.fp.dir(p)
}
func (fs *realFS) Base(p string) string {
return fs.fp.base(p)
}
func (fs *realFS) Ext(p string) string {
return fs.fp.ext(p)
}
func (fs *realFS) Join(parts ...string) string {
return fs.fp.clean(fs.fp.join(parts))
}
func (fs *realFS) Cwd() string {
return fs.fp.cwd
}
func (fs *realFS) Rel(base string, target string) (string, bool) {
if rel, err := fs.fp.rel(base, target); err == nil {
return rel, true
}
return "", false
}
func (fs *realFS) EvalSymlinks(path string) (string, bool) {
if path, err := fs.fp.evalSymlinks(path); err == nil {
return path, true
}
return "", false
}
func (fs *realFS) readdir(dirname string) (entries []string, canonicalError error, originalError error) {
BeforeFileOpen()
defer AfterFileClose()
f, originalError := os.Open(dirname)
canonicalError = fs.canonicalizeError(originalError)
// Stop now if there was an error
if canonicalError != nil {
return nil, canonicalError, originalError
}
defer f.Close()
entries, originalError = f.Readdirnames(-1)
canonicalError = originalError
// Unwrap to get the underlying error
if syscallErr, ok := canonicalError.(*os.SyscallError); ok {
canonicalError = syscallErr.Unwrap()
}
// Don't convert ENOTDIR to ENOENT here. ENOTDIR is a legitimate error
// condition for Readdirnames() on non-Windows platforms.
// Go's WebAssembly implementation returns EINVAL instead of ENOTDIR if we
// call "readdir" on a file. Canonicalize this to ENOTDIR so esbuild's path
// resolution code continues traversing instead of failing with an error.
// https://github.com/golang/go/blob/2449bbb5e614954ce9e99c8a481ea2ee73d72d61/src/syscall/fs_js.go#L144
if pathErr, ok := canonicalError.(*os.PathError); ok && pathErr.Unwrap() == syscall.EINVAL {
canonicalError = syscall.ENOTDIR
}
return entries, canonicalError, originalError
}
func (fs *realFS) canonicalizeError(err error) error {
// Unwrap to get the underlying error
if pathErr, ok := err.(*os.PathError); ok {
err = pathErr.Unwrap()
}
// Windows is much more restrictive than Unix about file names. If a file name
// is invalid, it will return ERROR_INVALID_NAME. Treat this as ENOENT (i.e.
// "the file does not exist") so that the resolver continues trying to resolve
// the path on this failure instead of aborting with an error.
if fs.fp.isWindows && is_ERROR_INVALID_NAME(err) {
err = syscall.ENOENT
}
// Windows returns ENOTDIR here even though nothing we've done yet has asked
// for a directory. This really means ENOENT on Windows. Return ENOENT here
// so callers that check for ENOENT will successfully detect this file as
// missing.
if err == syscall.ENOTDIR {
err = syscall.ENOENT
}
return err
}
func (fs *realFS) kind(dir string, base string) (symlink string, kind EntryKind) {
entryPath := fs.fp.join([]string{dir, base})
// Use "lstat" since we want information about symbolic links
BeforeFileOpen()
defer AfterFileClose()
stat, err := os.Lstat(entryPath)
if err != nil {
return
}
mode := stat.Mode()
// Follow symlinks now so the cache contains the translation
if (mode & os.ModeSymlink) != 0 {
link, err := fs.fp.evalSymlinks(entryPath)
if err != nil {
return // Skip over this entry
}
// Re-run "lstat" on the symlink target to see if it's a file or not
stat2, err2 := os.Lstat(link)
if err2 != nil {
return // Skip over this entry
}
mode = stat2.Mode()
if (mode & os.ModeSymlink) != 0 {
return // This should no longer be a symlink, so this is unexpected
}
symlink = link
}
// We consider the entry either a directory or a file
if (mode & os.ModeDir) != 0 {
kind = DirEntry
} else {
kind = FileEntry
}
return
}
func (fs *realFS) WatchData() WatchData {
paths := make(map[string]func() string)
for path, data := range fs.watchData {
// Each closure below needs its own copy of these loop variables
path := path
data := data
// Each function should return true if the state has been changed
if data.state == stateFileNeedModKey {
key, err := modKey(path)
if err == modKeyUnusable {
data.state = stateFileUnusableModKey
} else if err != nil {
data.state = stateFileMissing
} else {
data.state = stateFileHasModKey
data.modKey = key
}
}
switch data.state {
case stateDirUnreadable:
paths[path] = func() string {
_, err, _ := fs.readdir(path)
if err == nil {
return path
}
return ""
}
case stateDirHasAccessedEntries:
paths[path] = func() string {
names, err, _ := fs.readdir(path)
if err != nil {
return path
}
data.accessedEntries.mutex.Lock()
defer data.accessedEntries.mutex.Unlock()
if allEntries := data.accessedEntries.allEntries; allEntries != nil {
// Check all entries
if len(names) != len(allEntries) {
return path
}
sort.Strings(names)
for i, s := range names {
if s != allEntries[i] {
return path
}
}
} else {
// Check individual entries
lookup := make(map[string]string, len(names))
for _, name := range names {
lookup[strings.ToLower(name)] = name
}
for name, wasPresent := range data.accessedEntries.wasPresent {
if originalName, isPresent := lookup[name]; wasPresent != isPresent {
return fs.Join(path, originalName)
}
}
}
return ""
}
case stateFileMissing:
paths[path] = func() string {
if info, err := os.Stat(path); err == nil && !info.IsDir() {
return path
}
return ""
}
case stateFileHasModKey:
paths[path] = func() string {
if key, err := modKey(path); err != nil || key != data.modKey {
return path
}
return ""
}
case stateFileUnusableModKey:
paths[path] = func() string {
if buffer, err := ioutil.ReadFile(path); err != nil || string(buffer) != data.fileContents {
return path
}
return ""
}
}
}
return WatchData{
Paths: paths,
}
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/fs/modkey_unix.go | internal/fs/modkey_unix.go | //go:build darwin || freebsd || linux
// +build darwin freebsd linux
package fs
import (
"time"
"golang.org/x/sys/unix"
)
func modKey(path string) (ModKey, error) {
stat := unix.Stat_t{}
if err := unix.Stat(path, &stat); err != nil {
return ModKey{}, err
}
// We can't detect changes if the file system zeros out the modification time
if stat.Mtim.Sec == 0 && stat.Mtim.Nsec == 0 {
return ModKey{}, modKeyUnusable
}
// Don't generate a modification key if the file is too new
now, err := unix.TimeToTimespec(time.Now())
if err != nil {
return ModKey{}, err
}
mtimeSec := stat.Mtim.Sec + modKeySafetyGap
if mtimeSec > now.Sec || (mtimeSec == now.Sec && stat.Mtim.Nsec > now.Nsec) {
return ModKey{}, modKeyUnusable
}
return ModKey{
inode: stat.Ino,
size: stat.Size,
mtime_sec: int64(stat.Mtim.Sec),
mtime_nsec: int64(stat.Mtim.Nsec),
mode: uint32(stat.Mode),
uid: stat.Uid,
}, nil
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/fs/filepath.go | internal/fs/filepath.go | // Code in this file has been forked from the "filepath" module in the Go
// source code to work around bugs with the WebAssembly build target. More
// information about why here: https://github.com/golang/go/issues/43768.
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2009 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package fs
import (
"errors"
"os"
"strings"
"syscall"
)
type goFilepath struct {
cwd string
isWindows bool
pathSeparator byte
}
func isSlash(c uint8) bool {
return c == '\\' || c == '/'
}
// reservedNames lists reserved Windows names. Search for PRN in
// https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
// for details.
var reservedNames = []string{
"CON", "PRN", "AUX", "NUL",
"COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
"LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9",
}
// isReservedName returns true, if path is Windows reserved name.
// See reservedNames for the full list.
func isReservedName(path string) bool {
if len(path) == 0 {
return false
}
for _, reserved := range reservedNames {
if strings.EqualFold(path, reserved) {
return true
}
}
return false
}
// IsAbs reports whether the path is absolute.
func (fp goFilepath) isAbs(path string) bool {
if !fp.isWindows {
return strings.HasPrefix(path, "/")
}
if isReservedName(path) {
return true
}
l := fp.volumeNameLen(path)
if l == 0 {
return false
}
path = path[l:]
if path == "" {
return false
}
return isSlash(path[0])
}
// Abs returns an absolute representation of path.
// If the path is not absolute it will be joined with the current
// working directory to turn it into an absolute path. The absolute
// path name for a given file is not guaranteed to be unique.
// Abs calls Clean on the result.
func (fp goFilepath) abs(path string) (string, error) {
if fp.isAbs(path) {
return fp.clean(path), nil
}
return fp.join([]string{fp.cwd, path}), nil
}
// IsPathSeparator reports whether c is a directory separator character.
func (fp goFilepath) isPathSeparator(c uint8) bool {
return c == '/' || (fp.isWindows && c == '\\')
}
// volumeNameLen returns length of the leading volume name on Windows.
// It returns 0 elsewhere.
func (fp goFilepath) volumeNameLen(path string) int {
if !fp.isWindows {
return 0
}
if len(path) < 2 {
return 0
}
// with drive letter
c := path[0]
if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
return 2
}
// is it UNC? https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) &&
!isSlash(path[2]) && path[2] != '.' {
// first, leading `\\` and next shouldn't be `\`. its server name.
for n := 3; n < l-1; n++ {
// second, next '\' shouldn't be repeated.
if isSlash(path[n]) {
n++
// third, following something characters. its share name.
if !isSlash(path[n]) {
if path[n] == '.' {
break
}
for ; n < l; n++ {
if isSlash(path[n]) {
break
}
}
return n
}
break
}
}
}
return 0
}
// EvalSymlinks returns the path name after the evaluation of any symbolic
// links.
// If path is relative the result will be relative to the current directory,
// unless one of the components is an absolute symbolic link.
// EvalSymlinks calls Clean on the result.
func (fp goFilepath) evalSymlinks(path string) (string, error) {
volLen := fp.volumeNameLen(path)
pathSeparator := string(fp.pathSeparator)
if volLen < len(path) && fp.isPathSeparator(path[volLen]) {
volLen++
}
vol := path[:volLen]
dest := vol
linksWalked := 0
for start, end := volLen, volLen; start < len(path); start = end {
for start < len(path) && fp.isPathSeparator(path[start]) {
start++
}
end = start
for end < len(path) && !fp.isPathSeparator(path[end]) {
end++
}
// On Windows, "." can be a symlink.
// We look it up, and use the value if it is absolute.
// If not, we just return ".".
isWindowsDot := fp.isWindows && path[fp.volumeNameLen(path):] == "."
// The next path component is in path[start:end].
if end == start {
// No more path components.
break
} else if path[start:end] == "." && !isWindowsDot {
// Ignore path component ".".
continue
} else if path[start:end] == ".." {
// Back up to previous component if possible.
// Note that volLen includes any leading slash.
// Set r to the index of the last slash in dest,
// after the volume.
var r int
for r = len(dest) - 1; r >= volLen; r-- {
if fp.isPathSeparator(dest[r]) {
break
}
}
if r < volLen || dest[r+1:] == ".." {
// Either path has no slashes
// (it's empty or just "C:")
// or it ends in a ".." we had to keep.
// Either way, keep this "..".
if len(dest) > volLen {
dest += pathSeparator
}
dest += ".."
} else {
// Discard everything since the last slash.
dest = dest[:r]
}
continue
}
// Ordinary path component. Add it to result.
if len(dest) > fp.volumeNameLen(dest) && !fp.isPathSeparator(dest[len(dest)-1]) {
dest += pathSeparator
}
dest += path[start:end]
// Resolve symlink.
fi, err := os.Lstat(dest)
if err != nil {
return "", err
}
if fi.Mode()&os.ModeSymlink == 0 {
if !fi.Mode().IsDir() && end < len(path) {
return "", syscall.ENOTDIR
}
continue
}
// Found symlink.
linksWalked++
if linksWalked > 255 {
return "", errors.New("EvalSymlinks: too many links")
}
link, err := os.Readlink(dest)
if err != nil {
return "", err
}
if isWindowsDot && !fp.isAbs(link) {
// On Windows, if "." is a relative symlink,
// just return ".".
break
}
path = link + path[end:]
v := fp.volumeNameLen(link)
if v > 0 {
// Symlink to drive name is an absolute path.
if v < len(link) && fp.isPathSeparator(link[v]) {
v++
}
vol = link[:v]
dest = vol
end = len(vol)
} else if len(link) > 0 && fp.isPathSeparator(link[0]) {
// Symlink to absolute path.
dest = link[:1]
end = 1
} else {
// Symlink to relative path; replace last
// path component in dest.
var r int
for r = len(dest) - 1; r >= volLen; r-- {
if fp.isPathSeparator(dest[r]) {
break
}
}
if r < volLen {
dest = vol
} else {
dest = dest[:r]
}
end = 0
}
}
return fp.clean(dest), nil
}
// A lazybuf is a lazily constructed path buffer.
// It supports append, reading previously appended bytes,
// and retrieving the final string. It does not allocate a buffer
// to hold the output until that output diverges from s.
type lazybuf struct {
path string
volAndPath string
buf []byte
w int
volLen int
}
func (b *lazybuf) index(i int) byte {
if b.buf != nil {
return b.buf[i]
}
return b.path[i]
}
func (b *lazybuf) append(c byte) {
if b.buf == nil {
if b.w < len(b.path) && b.path[b.w] == c {
b.w++
return
}
b.buf = make([]byte, len(b.path))
copy(b.buf, b.path[:b.w])
}
b.buf[b.w] = c
b.w++
}
func (b *lazybuf) string() string {
if b.buf == nil {
return b.volAndPath[:b.volLen+b.w]
}
return b.volAndPath[:b.volLen] + string(b.buf[:b.w])
}
// FromSlash returns the result of replacing each slash ('/') character
// in path with a separator character. Multiple slashes are replaced
// by multiple separators.
func (fp goFilepath) fromSlash(path string) string {
if !fp.isWindows {
return path
}
return strings.ReplaceAll(path, "/", "\\")
}
// Clean returns the shortest path name equivalent to path
// by purely lexical processing. It applies the following rules
// iteratively until no further processing can be done:
//
// 1. Replace multiple Separator elements with a single one.
// 2. Eliminate each . path name element (the current directory).
// 3. Eliminate each inner .. path name element (the parent directory)
// along with the non-.. element that precedes it.
// 4. Eliminate .. elements that begin a rooted path:
// that is, replace "/.." by "/" at the beginning of a path,
// assuming Separator is '/'.
//
// The returned path ends in a slash only if it represents a root directory,
// such as "/" on Unix or `C:\` on Windows.
//
// Finally, any occurrences of slash are replaced by Separator.
//
// If the result of this process is an empty string, Clean
// returns the string ".".
//
// See also Rob Pike, "Lexical File Names in Plan 9 or
// Getting Dot-Dot Right,"
// https://9p.io/sys/doc/lexnames.html
func (fp goFilepath) clean(path string) string {
originalPath := path
volLen := fp.volumeNameLen(path)
path = path[volLen:]
if path == "" {
if volLen > 1 && originalPath[1] != ':' {
// should be UNC
return fp.fromSlash(originalPath)
}
return originalPath + "."
}
rooted := fp.isPathSeparator(path[0])
// Invariants:
// reading from path; r is index of next byte to process.
// writing to buf; w is index of next byte to write.
// dotdot is index in buf where .. must stop, either because
// it is the leading slash or it is a leading ../../.. prefix.
n := len(path)
out := lazybuf{path: path, volAndPath: originalPath, volLen: volLen}
r, dotdot := 0, 0
if rooted {
out.append(fp.pathSeparator)
r, dotdot = 1, 1
}
for r < n {
switch {
case fp.isPathSeparator(path[r]):
// empty path element
r++
case path[r] == '.' && (r+1 == n || fp.isPathSeparator(path[r+1])):
// . element
r++
case path[r] == '.' && path[r+1] == '.' && (r+2 == n || fp.isPathSeparator(path[r+2])):
// .. element: remove to last separator
r += 2
switch {
case out.w > dotdot:
// can backtrack
out.w--
for out.w > dotdot && !fp.isPathSeparator(out.index(out.w)) {
out.w--
}
case !rooted:
// cannot backtrack, but not rooted, so append .. element.
if out.w > 0 {
out.append(fp.pathSeparator)
}
out.append('.')
out.append('.')
dotdot = out.w
}
default:
// real path element.
// add slash if needed
if rooted && out.w != 1 || !rooted && out.w != 0 {
out.append(fp.pathSeparator)
}
// copy element
for ; r < n && !fp.isPathSeparator(path[r]); r++ {
out.append(path[r])
}
}
}
// Turn empty string into "."
if out.w == 0 {
out.append('.')
}
return fp.fromSlash(out.string())
}
// VolumeName returns leading volume name.
// Given "C:\foo\bar" it returns "C:" on Windows.
// Given "\\host\share\foo" it returns "\\host\share".
// On other platforms it returns "".
func (fp goFilepath) volumeName(path string) string {
return path[:fp.volumeNameLen(path)]
}
// Base returns the last element of path.
// Trailing path separators are removed before extracting the last element.
// If the path is empty, Base returns ".".
// If the path consists entirely of separators, Base returns a single separator.
func (fp goFilepath) base(path string) string {
if path == "" {
return "."
}
// Strip trailing slashes.
for len(path) > 0 && fp.isPathSeparator(path[len(path)-1]) {
path = path[0 : len(path)-1]
}
// Throw away volume name
path = path[len(fp.volumeName(path)):]
// Find the last element
i := len(path) - 1
for i >= 0 && !fp.isPathSeparator(path[i]) {
i--
}
if i >= 0 {
path = path[i+1:]
}
// If empty now, it had only slashes.
if path == "" {
return string(fp.pathSeparator)
}
return path
}
// Dir returns all but the last element of path, typically the path's directory.
// After dropping the final element, Dir calls Clean on the path and trailing
// slashes are removed.
// If the path is empty, Dir returns ".".
// If the path consists entirely of separators, Dir returns a single separator.
// The returned path does not end in a separator unless it is the root directory.
func (fp goFilepath) dir(path string) string {
vol := fp.volumeName(path)
i := len(path) - 1
for i >= len(vol) && !fp.isPathSeparator(path[i]) {
i--
}
dir := fp.clean(path[len(vol) : i+1])
if dir == "." && len(vol) > 2 {
// must be UNC
return vol
}
return vol + dir
}
// Ext returns the file name extension used by path.
// The extension is the suffix beginning at the final dot
// in the final element of path; it is empty if there is
// no dot.
func (fp goFilepath) ext(path string) string {
for i := len(path) - 1; i >= 0 && !fp.isPathSeparator(path[i]); i-- {
if path[i] == '.' {
return path[i:]
}
}
return ""
}
// Join joins any number of path elements into a single path,
// separating them with an OS specific Separator. Empty elements
// are ignored. The result is Cleaned. However, if the argument
// list is empty or all its elements are empty, Join returns
// an empty string.
// On Windows, the result will only be a UNC path if the first
// non-empty element is a UNC path.
func (fp goFilepath) join(elem []string) string {
for i, e := range elem {
if e != "" {
if fp.isWindows {
return fp.joinNonEmpty(elem[i:])
}
return fp.clean(strings.Join(elem[i:], string(fp.pathSeparator)))
}
}
return ""
}
// joinNonEmpty is like join, but it assumes that the first element is non-empty.
func (fp goFilepath) joinNonEmpty(elem []string) string {
if len(elem[0]) == 2 && elem[0][1] == ':' {
// First element is drive letter without terminating slash.
// Keep path relative to current directory on that drive.
// Skip empty elements.
i := 1
for ; i < len(elem); i++ {
if elem[i] != "" {
break
}
}
return fp.clean(elem[0] + strings.Join(elem[i:], string(fp.pathSeparator)))
}
// The following logic prevents Join from inadvertently creating a
// UNC path on Windows. Unless the first element is a UNC path, Join
// shouldn't create a UNC path. See golang.org/issue/9167.
p := fp.clean(strings.Join(elem, string(fp.pathSeparator)))
if !fp.isUNC(p) {
return p
}
// p == UNC only allowed when the first element is a UNC path.
head := fp.clean(elem[0])
if fp.isUNC(head) {
return p
}
// head + tail == UNC, but joining two non-UNC paths should not result
// in a UNC path. Undo creation of UNC path.
tail := fp.clean(strings.Join(elem[1:], string(fp.pathSeparator)))
if head[len(head)-1] == fp.pathSeparator {
return head + tail
}
return head + string(fp.pathSeparator) + tail
}
// isUNC reports whether path is a UNC path.
func (fp goFilepath) isUNC(path string) bool {
return fp.volumeNameLen(path) > 2
}
// Rel returns a relative path that is lexically equivalent to targpath when
// joined to basepath with an intervening separator. That is,
// Join(basepath, Rel(basepath, targpath)) is equivalent to targpath itself.
// On success, the returned path will always be relative to basepath,
// even if basepath and targpath share no elements.
// An error is returned if targpath can't be made relative to basepath or if
// knowing the current working directory would be necessary to compute it.
// Rel calls Clean on the result.
func (fp goFilepath) rel(basepath, targpath string) (string, error) {
baseVol := fp.volumeName(basepath)
targVol := fp.volumeName(targpath)
base := fp.clean(basepath)
targ := fp.clean(targpath)
if fp.sameWord(targ, base) {
return ".", nil
}
base = base[len(baseVol):]
targ = targ[len(targVol):]
if base == "." {
base = ""
}
// Can't use IsAbs - `\a` and `a` are both relative in Windows.
baseSlashed := len(base) > 0 && base[0] == fp.pathSeparator
targSlashed := len(targ) > 0 && targ[0] == fp.pathSeparator
if baseSlashed != targSlashed || !fp.sameWord(baseVol, targVol) {
return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
}
// Position base[b0:bi] and targ[t0:ti] at the first differing elements.
bl := len(base)
tl := len(targ)
var b0, bi, t0, ti int
for {
for bi < bl && base[bi] != fp.pathSeparator {
bi++
}
for ti < tl && targ[ti] != fp.pathSeparator {
ti++
}
if !fp.sameWord(targ[t0:ti], base[b0:bi]) {
break
}
if bi < bl {
bi++
}
if ti < tl {
ti++
}
b0 = bi
t0 = ti
}
if base[b0:bi] == ".." {
return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
}
if b0 != bl {
// Base elements left. Must go up before going down.
seps := strings.Count(base[b0:bl], string(fp.pathSeparator))
size := 2 + seps*3
if tl != t0 {
size += 1 + tl - t0
}
buf := make([]byte, size)
n := copy(buf, "..")
for i := 0; i < seps; i++ {
buf[n] = fp.pathSeparator
copy(buf[n+1:], "..")
n += 3
}
if t0 != tl {
buf[n] = fp.pathSeparator
copy(buf[n+1:], targ[t0:])
}
return string(buf), nil
}
return targ[t0:], nil
}
func (fp goFilepath) sameWord(a, b string) bool {
if !fp.isWindows {
return a == b
}
return strings.EqualFold(a, b)
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/fs/iswin_wasm.go | internal/fs/iswin_wasm.go | //go:build js && wasm
// +build js,wasm
package fs
import (
"os"
)
var checkedIfWindows bool
var cachedIfWindows bool
func CheckIfWindows() bool {
if !checkedIfWindows {
checkedIfWindows = true
// Hack: Assume that we're on Windows if we're running WebAssembly and
// the "C:\\" directory exists. This is a workaround for a bug in Go's
// WebAssembly support: https://github.com/golang/go/issues/43768.
_, err := os.Stat("C:\\")
cachedIfWindows = err == nil
}
return cachedIfWindows
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/fs/fs_mock.go | internal/fs/fs_mock.go | package fs
// This is a mock implementation of the "fs" module for use with tests. It does
// not actually read from the file system. Instead, it reads from a pre-specified
// map of file paths to files.
import (
"errors"
"path"
"strings"
"syscall"
)
type MockKind uint8
const (
MockUnix MockKind = iota
MockWindows
)
type mockFS struct {
dirs map[string]DirEntries
files map[string]string
absWorkingDir string
defaultVolume string
Kind MockKind
}
func MockFS(input map[string]string, kind MockKind, absWorkingDir string) FS {
dirs := make(map[string]DirEntries)
files := make(map[string]string)
var defaultVolume string
if kind == MockWindows {
_, defaultVolume = win2unix(absWorkingDir)
}
for k, v := range input {
var volume string
files[k] = v
if kind == MockWindows {
k, volume = win2unix(k)
}
original := k
// Build the directory map
for {
kDir := path.Dir(k)
key := kDir
if kind == MockWindows {
key = unix2win(key, volume, defaultVolume)
}
dir, ok := dirs[key]
if !ok {
dir = DirEntries{dir: key, data: make(map[string]*Entry)}
dirs[key] = dir
}
if kDir == k {
break
}
base := path.Base(k)
if k == original {
dir.data[strings.ToLower(base)] = &Entry{kind: FileEntry, base: base}
} else {
dir.data[strings.ToLower(base)] = &Entry{kind: DirEntry, base: base}
}
k = kDir
}
}
return &mockFS{dirs, files, absWorkingDir, defaultVolume, kind}
}
func (fs *mockFS) ReadDirectory(path string) (DirEntries, error, error) {
if fs.Kind == MockWindows {
path = strings.ReplaceAll(path, "/", "\\")
}
var slash byte = '/'
if fs.Kind == MockWindows {
slash = '\\'
}
// Trim trailing slashes before lookup
firstSlash := strings.IndexByte(path, slash)
for {
i := strings.LastIndexByte(path, slash)
if i != len(path)-1 || i <= firstSlash {
break
}
path = path[:i]
}
if dir, ok := fs.dirs[path]; ok {
return dir, nil, nil
}
return DirEntries{}, syscall.ENOENT, syscall.ENOENT
}
func (fs *mockFS) ReadFile(path string) (string, error, error) {
if fs.Kind == MockWindows {
path = strings.ReplaceAll(path, "/", "\\")
}
if contents, ok := fs.files[path]; ok {
return contents, nil, nil
}
return "", syscall.ENOENT, syscall.ENOENT
}
func (fs *mockFS) OpenFile(path string) (OpenedFile, error, error) {
if fs.Kind == MockWindows {
path = strings.ReplaceAll(path, "/", "\\")
}
if contents, ok := fs.files[path]; ok {
return &InMemoryOpenedFile{Contents: []byte(contents)}, nil, nil
}
return nil, syscall.ENOENT, syscall.ENOENT
}
func (fs *mockFS) ModKey(path string) (ModKey, error) {
return ModKey{}, errors.New("This is not available during tests")
}
func win2unix(p string) (result string, volume string) {
if len(p) > 0 && strings.HasPrefix(p[1:], ":\\") {
if c := p[0]; (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') {
volume = p[0:1]
p = p[2:]
}
}
p = strings.ReplaceAll(p, "\\", "/")
return p, volume
}
func unix2win(p string, volume string, defaultVolume string) string {
p = strings.ReplaceAll(p, "/", "\\")
if strings.HasPrefix(p, "\\") {
if volume == "" {
volume = defaultVolume
}
p = volume + ":" + p
}
return p
}
func (fs *mockFS) IsAbs(p string) bool {
if fs.Kind == MockWindows {
p, _ = win2unix(p)
}
return path.IsAbs(p)
}
func (fs *mockFS) Abs(p string) (string, bool) {
var volume string
if fs.Kind == MockWindows {
p, volume = win2unix(p)
}
p = path.Clean(path.Join("/", p))
if fs.Kind == MockWindows {
p = unix2win(p, volume, fs.defaultVolume)
}
return p, true
}
func (fs *mockFS) Dir(p string) string {
var volume string
if fs.Kind == MockWindows {
p, volume = win2unix(p)
}
p = path.Dir(p)
if fs.Kind == MockWindows {
p = unix2win(p, volume, fs.defaultVolume)
}
return p
}
func (fs *mockFS) Base(p string) string {
var volume string
if fs.Kind == MockWindows {
p, volume = win2unix(p)
}
p = path.Base(p)
if fs.Kind == MockWindows && p == "/" {
p = volume + ":\\"
}
return p
}
func (fs *mockFS) Ext(p string) string {
if fs.Kind == MockWindows {
p, _ = win2unix(p)
}
return path.Ext(p)
}
func (fs *mockFS) Join(parts ...string) string {
var volume string
if fs.Kind == MockWindows {
converted := make([]string, len(parts))
for i, part := range parts {
var v string
converted[i], v = win2unix(part)
if i == 0 {
volume = v
}
}
parts = converted
}
p := path.Clean(path.Join(parts...))
if fs.Kind == MockWindows {
p = unix2win(p, volume, fs.defaultVolume)
}
return p
}
func (fs *mockFS) Cwd() string {
return fs.absWorkingDir
}
func splitOnSlash(path string) (string, string) {
if slash := strings.IndexByte(path, '/'); slash != -1 {
return path[:slash], path[slash+1:]
}
return path, ""
}
func (fs *mockFS) Rel(base string, target string) (string, bool) {
var volume string
if fs.Kind == MockWindows {
var v string
base, volume = win2unix(base)
target, v = win2unix(target)
if volume == "" {
volume = fs.defaultVolume
}
if v == "" {
v = fs.defaultVolume
}
if strings.ToUpper(v) != strings.ToUpper(volume) {
return "", false
}
}
base = path.Clean(base)
target = path.Clean(target)
// Go's implementation does these checks
if base == target {
return ".", true
}
if base == "." {
base = ""
}
// Go's implementation fails when this condition is false. I believe this is
// because of this part of the contract, from Go's documentation: "An error
// is returned if targpath can't be made relative to basepath or if knowing
// the current working directory would be necessary to compute it."
if (len(base) > 0 && base[0] == '/') != (len(target) > 0 && target[0] == '/') {
return "", false
}
// Find the common parent directory
for {
bHead, bTail := splitOnSlash(base)
tHead, tTail := splitOnSlash(target)
if bHead != tHead {
break
}
base = bTail
target = tTail
}
// Stop now if base is a subpath of target
if base == "" {
if fs.Kind == MockWindows {
target = unix2win(target, volume, fs.defaultVolume)
}
return target, true
}
// Traverse up to the common parent
commonParent := strings.Repeat("../", strings.Count(base, "/")+1)
// Stop now if target is a subpath of base
if target == "" {
target = commonParent[:len(commonParent)-1]
if fs.Kind == MockWindows {
target = unix2win(target, volume, fs.defaultVolume)
}
return target, true
}
// Otherwise, down to the parent
target = commonParent + target
if fs.Kind == MockWindows {
target = unix2win(target, volume, fs.defaultVolume)
}
return target, true
}
func (fs *mockFS) EvalSymlinks(path string) (string, bool) {
return "", false
}
func (fs *mockFS) kind(dir string, base string) (symlink string, kind EntryKind) {
panic("This should never be called")
}
func (fs *mockFS) WatchData() WatchData {
panic("This should never be called")
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/fs/error_other.go | internal/fs/error_other.go | //go:build (!js || !wasm) && !windows
// +build !js !wasm
// +build !windows
package fs
func is_ERROR_INVALID_NAME(err error) bool {
return false
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/fs/iswin_other.go | internal/fs/iswin_other.go | //go:build (!js || !wasm) && !windows
// +build !js !wasm
// +build !windows
package fs
func CheckIfWindows() bool {
return false
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/fs/modkey_other.go | internal/fs/modkey_other.go | //go:build !darwin && !freebsd && !linux
// +build !darwin,!freebsd,!linux
package fs
import (
"os"
"time"
)
var zeroTime time.Time
func modKey(path string) (ModKey, error) {
info, err := os.Stat(path)
if err != nil {
return ModKey{}, err
}
// We can't detect changes if the file system zeros out the modification time
mtime := info.ModTime()
if mtime == zeroTime || mtime.Unix() == 0 {
return ModKey{}, modKeyUnusable
}
// Don't generate a modification key if the file is too new
if mtime.Add(modKeySafetyGap * time.Second).After(time.Now()) {
return ModKey{}, modKeyUnusable
}
return ModKey{
size: info.Size(),
mtime_sec: mtime.Unix(),
mode: uint32(info.Mode()),
}, nil
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/fs/fs_zip.go | internal/fs/fs_zip.go | package fs
// The Yarn package manager (https://yarnpkg.com/) has a custom installation
// strategy called "Plug'n'Play" where they install packages as zip files
// instead of directory trees, and then modify node to treat zip files like
// directories. This reduces package installation time because Yarn now only
// has to copy a single file per package instead of a whole directory tree.
// However, it introduces overhead at run-time because the virtual file system
// is written in JavaScript.
//
// This file contains esbuild's implementation of the behavior that treats zip
// files like directories. It implements the "FS" interface and wraps an inner
// "FS" interface that treats zip files like files. That way it can run both on
// a real file system and a mock file system.
//
// This file also implements another Yarn-specific behavior where certain paths
// containing the special path segments "__virtual__" or "$$virtual" have some
// unusual behavior. See the code below for details.
import (
"archive/zip"
"io/ioutil"
"strconv"
"strings"
"sync"
"syscall"
)
type zipFS struct {
inner FS
zipFilesMutex sync.Mutex
zipFiles map[string]*zipFile
}
type zipFile struct {
reader *zip.ReadCloser
err error
dirs map[string]*compressedDir
files map[string]*compressedFile
wait sync.WaitGroup
}
type compressedDir struct {
entries map[string]EntryKind
path string
// Compatible entries are decoded lazily
mutex sync.Mutex
dirEntries DirEntries
}
type compressedFile struct {
compressed *zip.File
// The file is decompressed lazily
mutex sync.Mutex
contents string
err error
wasRead bool
}
func (fs *zipFS) checkForZip(path string, kind EntryKind) (*zipFile, string) {
var zipPath string
var pathTail string
// Do a quick check for a ".zip" in the path at all
path = strings.ReplaceAll(path, "\\", "/")
if i := strings.Index(path, ".zip/"); i != -1 {
zipPath = path[:i+len(".zip")]
pathTail = path[i+len(".zip/"):]
} else if kind == DirEntry && strings.HasSuffix(path, ".zip") {
zipPath = path
} else {
return nil, ""
}
// If there is one, then check whether it's a file on the file system or not
fs.zipFilesMutex.Lock()
archive := fs.zipFiles[zipPath]
if archive != nil {
fs.zipFilesMutex.Unlock()
archive.wait.Wait()
} else {
archive = &zipFile{}
archive.wait.Add(1)
fs.zipFiles[zipPath] = archive
fs.zipFilesMutex.Unlock()
defer archive.wait.Done()
// Try reading the zip archive if it's not in the cache
tryToReadZipArchive(zipPath, archive)
}
if archive.err != nil {
return nil, ""
}
return archive, pathTail
}
func tryToReadZipArchive(zipPath string, archive *zipFile) {
reader, err := zip.OpenReader(zipPath)
if err != nil {
archive.err = err
return
}
dirs := make(map[string]*compressedDir)
files := make(map[string]*compressedFile)
seeds := []string{}
// Build an index of all files in the archive
for _, file := range reader.File {
baseName := strings.TrimSuffix(file.Name, "/")
dirPath := ""
if slash := strings.LastIndexByte(baseName, '/'); slash != -1 {
dirPath = baseName[:slash]
baseName = baseName[slash+1:]
}
if file.FileInfo().IsDir() {
// Handle a directory
lowerDir := strings.ToLower(dirPath)
if _, ok := dirs[lowerDir]; !ok {
dir := &compressedDir{
path: dirPath,
entries: make(map[string]EntryKind),
}
// List the same directory both with and without the slash
dirs[lowerDir] = dir
dirs[lowerDir+"/"] = dir
seeds = append(seeds, lowerDir)
}
} else {
// Handle a file
files[strings.ToLower(file.Name)] = &compressedFile{compressed: file}
lowerDir := strings.ToLower(dirPath)
dir, ok := dirs[lowerDir]
if !ok {
dir = &compressedDir{
path: dirPath,
entries: make(map[string]EntryKind),
}
// List the same directory both with and without the slash
dirs[lowerDir] = dir
dirs[lowerDir+"/"] = dir
seeds = append(seeds, lowerDir)
}
dir.entries[baseName] = FileEntry
}
}
// Populate child directories
for _, baseName := range seeds {
for baseName != "" {
dirPath := ""
if slash := strings.LastIndexByte(baseName, '/'); slash != -1 {
dirPath = baseName[:slash]
baseName = baseName[slash+1:]
}
lowerDir := strings.ToLower(dirPath)
dir, ok := dirs[lowerDir]
if !ok {
dir = &compressedDir{
path: dirPath,
entries: make(map[string]EntryKind),
}
// List the same directory both with and without the slash
dirs[lowerDir] = dir
dirs[lowerDir+"/"] = dir
}
dir.entries[baseName] = DirEntry
baseName = dirPath
}
}
archive.dirs = dirs
archive.files = files
archive.reader = reader
}
func (fs *zipFS) ReadDirectory(path string) (entries DirEntries, canonicalError error, originalError error) {
path = mangleYarnPnPVirtualPath(path)
entries, canonicalError, originalError = fs.inner.ReadDirectory(path)
// Only continue if reading this path as a directory caused an error that's
// consistent with trying to read a zip file as a directory. Note that EINVAL
// is produced by the file system in Go's WebAssembly implementation.
if canonicalError != syscall.ENOENT && canonicalError != syscall.ENOTDIR && canonicalError != syscall.EINVAL {
return
}
// If the directory doesn't exist, try reading from an enclosing zip archive
zip, pathTail := fs.checkForZip(path, DirEntry)
if zip == nil {
return
}
// Does the zip archive have this directory?
dir, ok := zip.dirs[strings.ToLower(pathTail)]
if !ok {
return DirEntries{}, syscall.ENOENT, syscall.ENOENT
}
// Check whether it has already been converted
dir.mutex.Lock()
defer dir.mutex.Unlock()
if dir.dirEntries.data != nil {
return dir.dirEntries, nil, nil
}
// Otherwise, fill in the entries
dir.dirEntries = DirEntries{dir: path, data: make(map[string]*Entry, len(dir.entries))}
for name, kind := range dir.entries {
dir.dirEntries.data[strings.ToLower(name)] = &Entry{
dir: path,
base: name,
kind: kind,
}
}
return dir.dirEntries, nil, nil
}
func (fs *zipFS) ReadFile(path string) (contents string, canonicalError error, originalError error) {
path = mangleYarnPnPVirtualPath(path)
contents, canonicalError, originalError = fs.inner.ReadFile(path)
if canonicalError != syscall.ENOENT {
return
}
// If the file doesn't exist, try reading from an enclosing zip archive
zip, pathTail := fs.checkForZip(path, FileEntry)
if zip == nil {
return
}
// Does the zip archive have this file?
file, ok := zip.files[strings.ToLower(pathTail)]
if !ok {
return "", syscall.ENOENT, syscall.ENOENT
}
// Check whether it has already been read
file.mutex.Lock()
defer file.mutex.Unlock()
if file.wasRead {
return file.contents, file.err, file.err
}
file.wasRead = true
// If not, try to open it
reader, err := file.compressed.Open()
if err != nil {
file.err = err
return "", err, err
}
defer reader.Close()
// Then try to read it
bytes, err := ioutil.ReadAll(reader)
if err != nil {
file.err = err
return "", err, err
}
file.contents = string(bytes)
return file.contents, nil, nil
}
func (fs *zipFS) OpenFile(path string) (result OpenedFile, canonicalError error, originalError error) {
path = mangleYarnPnPVirtualPath(path)
result, canonicalError, originalError = fs.inner.OpenFile(path)
return
}
func (fs *zipFS) ModKey(path string) (modKey ModKey, err error) {
path = mangleYarnPnPVirtualPath(path)
modKey, err = fs.inner.ModKey(path)
return
}
func (fs *zipFS) IsAbs(path string) bool {
return fs.inner.IsAbs(path)
}
func (fs *zipFS) Abs(path string) (string, bool) {
return fs.inner.Abs(path)
}
func (fs *zipFS) Dir(path string) string {
if prefix, suffix, ok := ParseYarnPnPVirtualPath(path); ok && suffix == "" {
return prefix
}
return fs.inner.Dir(path)
}
func (fs *zipFS) Base(path string) string {
return fs.inner.Base(path)
}
func (fs *zipFS) Ext(path string) string {
return fs.inner.Ext(path)
}
func (fs *zipFS) Join(parts ...string) string {
return fs.inner.Join(parts...)
}
func (fs *zipFS) Cwd() string {
return fs.inner.Cwd()
}
func (fs *zipFS) Rel(base string, target string) (string, bool) {
return fs.inner.Rel(base, target)
}
func (fs *zipFS) EvalSymlinks(path string) (string, bool) {
return fs.inner.EvalSymlinks(path)
}
func (fs *zipFS) kind(dir string, base string) (symlink string, kind EntryKind) {
return fs.inner.kind(dir, base)
}
func (fs *zipFS) WatchData() WatchData {
return fs.inner.WatchData()
}
func ParseYarnPnPVirtualPath(path string) (string, string, bool) {
i := 0
for {
start := i
slash := strings.IndexAny(path[i:], "/\\")
if slash == -1 {
break
}
i += slash + 1
// Replace the segments "__virtual__/<segment>/<n>" with N times the ".."
// operation. Note: The "__virtual__" folder name appeared with Yarn 3.0.
// Earlier releases used "$$virtual", but it was changed after discovering
// that this pattern triggered bugs in software where paths were used as
// either regexps or replacement. For example, "$$" found in the second
// parameter of "String.prototype.replace" silently turned into "$".
if segment := path[start : i-1]; segment == "__virtual__" || segment == "$$virtual" {
if slash := strings.IndexAny(path[i:], "/\\"); slash != -1 {
var count string
var suffix string
j := i + slash + 1
// Find the range of the count
if slash := strings.IndexAny(path[j:], "/\\"); slash != -1 {
count = path[j : j+slash]
suffix = path[j+slash:]
} else {
count = path[j:]
}
// Parse the count
if n, err := strconv.ParseInt(count, 10, 64); err == nil {
prefix := path[:start]
// Apply N times the ".." operator
for n > 0 && (strings.HasSuffix(prefix, "/") || strings.HasSuffix(prefix, "\\")) {
slash := strings.LastIndexAny(prefix[:len(prefix)-1], "/\\")
if slash == -1 {
break
}
prefix = prefix[:slash+1]
n--
}
// Make sure the prefix and suffix work well when joined together
if suffix == "" && strings.IndexAny(prefix, "/\\") != strings.LastIndexAny(prefix, "/\\") {
prefix = prefix[:len(prefix)-1]
} else if prefix == "" {
prefix = "."
} else if strings.HasPrefix(suffix, "/") || strings.HasPrefix(suffix, "\\") {
suffix = suffix[1:]
}
return prefix, suffix, true
}
}
}
}
return "", "", false
}
func mangleYarnPnPVirtualPath(path string) string {
if prefix, suffix, ok := ParseYarnPnPVirtualPath(path); ok {
return prefix + suffix
}
return path
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/fs/fs.go | internal/fs/fs.go | package fs
// Most of esbuild's internals use this file system abstraction instead of
// using native file system APIs. This lets us easily mock the file system
// for tests and also implement Yarn's virtual ".zip" file system overlay.
import (
"errors"
"os"
"sort"
"strings"
"sync"
"syscall"
)
type EntryKind uint8
const (
DirEntry EntryKind = 1
FileEntry EntryKind = 2
)
type Entry struct {
symlink string
dir string
base string
mutex sync.Mutex
kind EntryKind
needStat bool
}
func (e *Entry) Kind(fs FS) EntryKind {
e.mutex.Lock()
defer e.mutex.Unlock()
if e.needStat {
e.needStat = false
e.symlink, e.kind = fs.kind(e.dir, e.base)
}
return e.kind
}
func (e *Entry) Symlink(fs FS) string {
e.mutex.Lock()
defer e.mutex.Unlock()
if e.needStat {
e.needStat = false
e.symlink, e.kind = fs.kind(e.dir, e.base)
}
return e.symlink
}
type accessedEntries struct {
wasPresent map[string]bool
// If this is nil, "SortedKeys()" was not accessed. This means we should
// check for whether this directory has changed or not by seeing if any of
// the entries in the "wasPresent" map have changed in "present or not"
// status, since the only access was to individual entries via "Get()".
//
// If this is non-nil, "SortedKeys()" was accessed. This means we should
// check for whether this directory has changed or not by checking the
// "allEntries" array for equality with the existing entries list, since the
// code asked for all entries and may have used the presence or absence of
// entries in that list.
//
// The goal of having these two checks is to be as narrow as possible to
// avoid unnecessary rebuilds. If only "Get()" is called on a few entries,
// then we won't invalidate the build if random unrelated entries are added
// or removed. But if "SortedKeys()" is called, we need to invalidate the
// build if anything about the set of entries in this directory is changed.
allEntries []string
mutex sync.Mutex
}
type DirEntries struct {
data map[string]*Entry
accessedEntries *accessedEntries
dir string
}
func MakeEmptyDirEntries(dir string) DirEntries {
return DirEntries{dir: dir, data: make(map[string]*Entry)}
}
type DifferentCase struct {
Dir string
Query string
Actual string
}
func (entries DirEntries) Get(query string) (*Entry, *DifferentCase) {
if entries.data != nil {
key := strings.ToLower(query)
entry := entries.data[key]
// Track whether this specific entry was present or absent for watch mode
if accessed := entries.accessedEntries; accessed != nil {
accessed.mutex.Lock()
accessed.wasPresent[key] = entry != nil
accessed.mutex.Unlock()
}
if entry != nil {
if entry.base != query {
return entry, &DifferentCase{
Dir: entries.dir,
Query: query,
Actual: entry.base,
}
}
return entry, nil
}
}
return nil, nil
}
// This function lets you "peek" at the number of entries without watch mode
// considering the number of entries as having been observed. This is used when
// generating debug log messages to log the number of entries without causing
// watch mode to rebuild when the number of entries has been changed.
func (entries DirEntries) PeekEntryCount() int {
if entries.data != nil {
return len(entries.data)
}
return 0
}
func (entries DirEntries) SortedKeys() (keys []string) {
if entries.data != nil {
keys = make([]string, 0, len(entries.data))
for _, entry := range entries.data {
keys = append(keys, entry.base)
}
sort.Strings(keys)
// Track the exact set of all entries for watch mode
if entries.accessedEntries != nil {
entries.accessedEntries.mutex.Lock()
entries.accessedEntries.allEntries = keys
entries.accessedEntries.mutex.Unlock()
}
return keys
}
return
}
type OpenedFile interface {
Len() int
Read(start int, end int) ([]byte, error)
Close() error
}
type InMemoryOpenedFile struct {
Contents []byte
}
func (f *InMemoryOpenedFile) Len() int {
return len(f.Contents)
}
func (f *InMemoryOpenedFile) Read(start int, end int) ([]byte, error) {
return []byte(f.Contents[start:end]), nil
}
func (f *InMemoryOpenedFile) Close() error {
return nil
}
type FS interface {
// The returned map is immutable and is cached across invocations. Do not
// mutate it.
ReadDirectory(path string) (entries DirEntries, canonicalError error, originalError error)
ReadFile(path string) (contents string, canonicalError error, originalError error)
OpenFile(path string) (result OpenedFile, canonicalError error, originalError error)
// This is a key made from the information returned by "stat". It is intended
// to be different if the file has been edited, and to otherwise be equal if
// the file has not been edited. It should usually work, but no guarantees.
//
// See https://apenwarr.ca/log/20181113 for more information about why this
// can be broken. For example, writing to a file with mmap on WSL on Windows
// won't change this key. Hopefully this isn't too much of an issue.
//
// Additional reading:
// - https://github.com/npm/npm/pull/20027
// - https://github.com/golang/go/commit/7dea509703eb5ad66a35628b12a678110fbb1f72
ModKey(path string) (ModKey, error)
// This is part of the interface because the mock interface used for tests
// should not depend on file system behavior (i.e. different slashes for
// Windows) while the real interface should.
IsAbs(path string) bool
Abs(path string) (string, bool)
Dir(path string) string
Base(path string) string
Ext(path string) string
Join(parts ...string) string
Cwd() string
Rel(base string, target string) (string, bool)
EvalSymlinks(path string) (string, bool)
// This is used in the implementation of "Entry"
kind(dir string, base string) (symlink string, kind EntryKind)
// This is a set of all files used and all directories checked. The build
// must be invalidated if any of these watched files change.
WatchData() WatchData
}
type WatchData struct {
// These functions return a non-empty path as a string if the file system
// entry has been modified. For files, the returned path is the same as the
// file path. For directories, the returned path is either the directory
// itself or a file in the directory that was changed.
Paths map[string]func() string
}
type ModKey struct {
// What gets filled in here is OS-dependent
inode uint64
size int64
mtime_sec int64
mtime_nsec int64
mode uint32
uid uint32
}
// Some file systems have a time resolution of only a few seconds. If a mtime
// value is too new, we won't be able to tell if it has been recently modified
// or not. So we only use mtimes for comparison if they are sufficiently old.
// Apparently the FAT file system has a resolution of two seconds according to
// this article: https://en.wikipedia.org/wiki/Stat_(system_call).
const modKeySafetyGap = 3 // In seconds
var modKeyUnusable = errors.New("The modification key is unusable")
// Limit the number of files open simultaneously to avoid ulimit issues
var fileOpenLimit = make(chan bool, 32)
func BeforeFileOpen() {
// This will block if the number of open files is already at the limit
fileOpenLimit <- false
}
func AfterFileClose() {
<-fileOpenLimit
}
// This is a fork of "os.MkdirAll" to work around bugs with the WebAssembly
// build target. More information here: https://github.com/golang/go/issues/43768.
func MkdirAll(fs FS, path string, perm os.FileMode) error {
// Run "Join" once to run "Clean" on the path, which removes trailing slashes
return mkdirAll(fs, fs.Join(path), perm)
}
func mkdirAll(fs FS, path string, perm os.FileMode) error {
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
if dir, err := os.Stat(path); err == nil {
if dir.IsDir() {
return nil
}
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
}
// Slow path: make sure parent exists and then call Mkdir for path.
if parent := fs.Dir(path); parent != path {
// Create parent.
if err := mkdirAll(fs, parent, perm); err != nil {
return err
}
}
// Parent now exists; invoke Mkdir and use its result.
if err := os.Mkdir(path, perm); err != nil {
// Handle arguments like "foo/." by
// double-checking that directory doesn't exist.
dir, err1 := os.Lstat(path)
if err1 == nil && dir.IsDir() {
return nil
}
return err
}
return nil
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/fs/fs_mock_test.go | internal/fs/fs_mock_test.go | package fs
import (
"fmt"
"testing"
)
func TestMockFSBasicUnix(t *testing.T) {
fs := MockFS(map[string]string{
"/README.md": "// README.md",
"/package.json": "// package.json",
"/src/index.js": "// src/index.js",
"/src/util.js": "// src/util.js",
}, MockUnix, "/")
// Test a missing file
_, err, _ := fs.ReadFile("/missing.txt")
if err == nil {
t.Fatal("Unexpectedly found /missing.txt")
}
// Test an existing file
readme, err, _ := fs.ReadFile("/README.md")
if err != nil {
t.Fatal("Expected to find /README.md")
}
if readme != "// README.md" {
t.Fatalf("Incorrect contents for /README.md: %q", readme)
}
// Test an existing nested file
index, err, _ := fs.ReadFile("/src/index.js")
if err != nil {
t.Fatal("Expected to find /src/index.js")
}
if index != "// src/index.js" {
t.Fatalf("Incorrect contents for /src/index.js: %q", index)
}
// Test a missing directory
_, err, _ = fs.ReadDirectory("/missing")
if err == nil {
t.Fatal("Unexpectedly found /missing")
}
// Test a nested directory
src, err, _ := fs.ReadDirectory("/src")
if err != nil {
t.Fatal("Expected to find /src")
}
indexEntry, _ := src.Get("index.js")
utilEntry, _ := src.Get("util.js")
if len(src.data) != 2 ||
indexEntry == nil || indexEntry.Kind(fs) != FileEntry ||
utilEntry == nil || utilEntry.Kind(fs) != FileEntry {
t.Fatalf("Incorrect contents for /src: %v", src)
}
// Test the top-level directory
slash, err, _ := fs.ReadDirectory("/")
if err != nil {
t.Fatal("Expected to find /")
}
srcEntry, _ := slash.Get("src")
readmeEntry, _ := slash.Get("README.md")
packageEntry, _ := slash.Get("package.json")
if len(slash.data) != 3 ||
srcEntry == nil || srcEntry.Kind(fs) != DirEntry ||
readmeEntry == nil || readmeEntry.Kind(fs) != FileEntry ||
packageEntry == nil || packageEntry.Kind(fs) != FileEntry {
t.Fatalf("Incorrect contents for /: %v", slash)
}
}
func TestMockFSBasicWindows(t *testing.T) {
fs := MockFS(map[string]string{
"C:\\README.md": "// README.md",
"C:\\package.json": "// package.json",
"C:\\src\\index.js": "// src/index.js",
"C:\\src\\util.js": "// src/util.js",
"D:\\other\\file.txt": "// other/file.txt",
}, MockWindows, "C:\\")
// Test a missing file
_, err, _ := fs.ReadFile("C:\\missing.txt")
if err == nil {
t.Fatal("Unexpectedly found C:\\missing.txt")
}
// Test an existing file
readme, err, _ := fs.ReadFile("C:\\README.md")
if err != nil {
t.Fatal("Expected to find C:\\README.md")
}
if readme != "// README.md" {
t.Fatalf("Incorrect contents for C:\\README.md: %q", readme)
}
// Test an existing nested file
index, err, _ := fs.ReadFile("C:\\src\\index.js")
if err != nil {
t.Fatal("Expected to find C:\\src\\index.js")
}
if index != "// src/index.js" {
t.Fatalf("Incorrect contents for C:\\src\\index.js: %q", index)
}
// Test an existing nested file on another drive
file, err, _ := fs.ReadFile("D:\\other\\file.txt")
if err != nil {
t.Fatal("Expected to find D:\\other\\file.txt")
}
if file != "// other/file.txt" {
t.Fatalf("Incorrect contents for D:\\other/file.txt: %q", file)
}
// Should not find a file on another drive
_, err, _ = fs.ReadFile("C:\\other\\file.txt")
if err == nil {
t.Fatal("Unexpectedly found C:\\other\\file.txt")
}
// Test a missing directory
_, err, _ = fs.ReadDirectory("C:\\missing")
if err == nil {
t.Fatal("Unexpectedly found C:\\missing")
}
// Test a nested directory
src, err, _ := fs.ReadDirectory("C:\\src")
if err != nil {
t.Fatal("Expected to find C:\\src")
}
indexEntry, _ := src.Get("index.js")
utilEntry, _ := src.Get("util.js")
if len(src.data) != 2 ||
indexEntry == nil || indexEntry.Kind(fs) != FileEntry ||
utilEntry == nil || utilEntry.Kind(fs) != FileEntry {
t.Fatalf("Incorrect contents for C:\\src: %v", src)
}
// Test a nested directory on another drive
other, err, _ := fs.ReadDirectory("D:\\other")
if err != nil {
t.Fatal("Expected to find D:\\other")
}
fileEntry, _ := other.Get("file.txt")
if len(other.data) != 1 ||
fileEntry == nil || fileEntry.Kind(fs) != FileEntry {
t.Fatalf("Incorrect contents for D:\\other: %v", other)
}
// Test the top-level directory
slash, err, _ := fs.ReadDirectory("C:\\")
if err != nil {
t.Fatal("Expected to find C:\\")
}
srcEntry, _ := slash.Get("src")
readmeEntry, _ := slash.Get("README.md")
packageEntry, _ := slash.Get("package.json")
if len(slash.data) != 3 ||
srcEntry == nil || srcEntry.Kind(fs) != DirEntry ||
readmeEntry == nil || readmeEntry.Kind(fs) != FileEntry ||
packageEntry == nil || packageEntry.Kind(fs) != FileEntry {
t.Fatalf("Incorrect contents for C:\\: %v", slash)
}
}
func TestMockFSRelUnix(t *testing.T) {
fs := MockFS(map[string]string{}, MockUnix, "/")
expect := func(a string, b string, c string) {
t.Helper()
t.Run(fmt.Sprintf("Rel(%q, %q) == %q", a, b, c), func(t *testing.T) {
t.Helper()
rel, ok := fs.Rel(a, b)
if !ok {
t.Fatalf("!ok")
}
if rel != c {
t.Fatalf("Expected %q, got %q", c, rel)
}
})
}
expect("/a/b", "/a/b", ".")
expect("/a/b", "/a/b/c", "c")
expect("/a/b", "/a/b/c/d", "c/d")
expect("/a/b/c", "/a/b", "..")
expect("/a/b/c/d", "/a/b", "../..")
expect("/a/b/c", "/a/b/x", "../x")
expect("/a/b/c/d", "/a/b/x", "../../x")
expect("/a/b/c", "/a/b/x/y", "../x/y")
expect("/a/b/c/d", "/a/b/x/y", "../../x/y")
expect("a/b", "a/c", "../c")
expect("./a/b", "./a/c", "../c")
expect(".", "./a/b", "a/b")
expect(".", ".//a/b", "a/b")
expect(".", "././a/b", "a/b")
expect(".", "././/a/b", "a/b")
}
func TestMockFSRelWindows(t *testing.T) {
fs := MockFS(map[string]string{}, MockWindows, "C:\\")
expect := func(a string, b string, works bool, c string) {
t.Helper()
t.Run(fmt.Sprintf("Rel(%q, %q) == %q", a, b, c), func(t *testing.T) {
t.Helper()
rel, ok := fs.Rel(a, b)
if works {
if !ok {
t.Fatalf("!ok")
}
if rel != c {
t.Fatalf("Expected %q, got %q", c, rel)
}
} else {
if ok {
t.Fatalf("ok")
}
}
})
}
expect("C:\\a\\b", "C:\\a\\b", true, ".")
expect("C:\\a\\b", "C:\\a\\b\\c", true, "c")
expect("C:\\a\\b", "C:\\a\\b\\c\\d", true, "c\\d")
expect("C:\\a\\b\\c", "C:\\a\\b", true, "..")
expect("C:\\a\\b\\c\\d", "C:\\a\\b", true, "..\\..")
expect("C:\\a\\b\\c", "C:\\a\\b\\x", true, "..\\x")
expect("C:\\a\\b\\c\\d", "C:\\a\\b\\x", true, "..\\..\\x")
expect("C:\\a\\b\\c", "C:\\a\\b\\x\\y", true, "..\\x\\y")
expect("C:\\a\\b\\c\\d", "C:\\a\\b\\x\\y", true, "..\\..\\x\\y")
expect("a\\b", "a\\c", true, "..\\c")
expect(".\\a\\b", ".\\a\\c", true, "..\\c")
expect(".", ".\\a\\b", true, "a\\b")
expect(".", ".\\\\a\\b", true, "a\\b")
expect(".", ".\\.\\a\\b", true, "a\\b")
expect(".", ".\\.\\\\a\\b", true, "a\\b")
expect("C:\\a\\b", "\\a\\b", true, ".")
expect("\\a", "\\b", true, "..\\b")
expect("C:\\a", "D:\\a", false, "")
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
evanw/esbuild | https://github.com/evanw/esbuild/blob/cd832972927f1f67b6d2cc895c06a8759c1cf309/internal/fs/error_wasm+windows.go | internal/fs/error_wasm+windows.go | //go:build (js && wasm) || windows
// +build js,wasm windows
package fs
import "syscall"
// This check is here in a conditionally-compiled file because Go's standard
// library for Plan 9 doesn't define a type called "syscall.Errno". Plan 9 is
// not a supported operating system but someone wanted to be able to compile
// esbuild for Plan 9 anyway.
func is_ERROR_INVALID_NAME(err error) bool {
// This has been copied from golang.org/x/sys/windows
const ERROR_INVALID_NAME syscall.Errno = 123
return err == ERROR_INVALID_NAME
}
| go | MIT | cd832972927f1f67b6d2cc895c06a8759c1cf309 | 2026-01-07T08:35:49.242278Z | false |
astaxie/build-web-application-with-golang | https://github.com/astaxie/build-web-application-with-golang/blob/c294b087b96d99d2593ad8f470151af5354bb57f/de/code/src/apps/ch.5.4/main.go | de/code/src/apps/ch.5.4/main.go | // Example code for Chapter 5.4 from "Build Web Application with Golang"
// Purpose: Show how to perform CRUD operations using a postgres driver
package main
import (
"database/sql"
"fmt"
"time"
_ "github.com/lib/pq"
)
const (
DB_USER = "user"
DB_PASSWORD = ""
DB_NAME = "test"
)
func main() {
dbinfo := fmt.Sprintf("user=%s password=%s dbname=%s sslmode=disable",
DB_USER, DB_PASSWORD, DB_NAME)
db, err := sql.Open("postgres", dbinfo)
checkErr(err)
defer db.Close()
fmt.Println("# Inserting values")
var lastInsertId int
err = db.QueryRow("INSERT INTO userinfo(username,departname,created) VALUES($1,$2,$3) returning uid;",
"astaxie", "software developement", "2012-12-09").Scan(&lastInsertId)
checkErr(err)
fmt.Println("id of last inserted row =", lastInsertId)
fmt.Println("# Updating")
stmt, err := db.Prepare("update userinfo set username=$1 where uid=$2")
checkErr(err)
res, err := stmt.Exec("astaxieupdate", lastInsertId)
checkErr(err)
affect, err := res.RowsAffected()
checkErr(err)
fmt.Println(affect, "row(s) changed")
fmt.Println("# Querying")
rows, err := db.Query("SELECT * FROM userinfo")
checkErr(err)
for rows.Next() {
var uid int
var username string
var department string
var created time.Time
err = rows.Scan(&uid, &username, &department, &created)
checkErr(err)
fmt.Println("uid | username | department | created ")
fmt.Printf("%3v | %8v | %6v | %6v\n", uid, username, department, created)
}
fmt.Println("# Deleting")
stmt, err = db.Prepare("delete from userinfo where uid=$1")
checkErr(err)
res, err = stmt.Exec(lastInsertId)
checkErr(err)
affect, err = res.RowsAffected()
checkErr(err)
fmt.Println(affect, "row(s) changed")
}
func checkErr(err error) {
if err != nil {
panic(err)
}
}
| go | BSD-3-Clause | c294b087b96d99d2593ad8f470151af5354bb57f | 2026-01-07T08:35:46.175972Z | false |
astaxie/build-web-application-with-golang | https://github.com/astaxie/build-web-application-with-golang/blob/c294b087b96d99d2593ad8f470151af5354bb57f/de/code/src/apps/ch.4.1/main.go | de/code/src/apps/ch.4.1/main.go | // Example code for Chapter 4.1 from "Build Web Application with Golang"
// Purpose: Shows how to create a simple login using a template
// Run: `go run main.go`, then access `http://localhost:9090` and `http://localhost:9090/login`
package main
import (
"fmt"
"html/template"
"log"
"net/http"
"strings"
)
func sayhelloName(w http.ResponseWriter, r *http.Request) {
r.ParseForm() //Parse url parameters passed, then parse the response packet for the POST body (request body)
// attention: If you do not call ParseForm method, the following data can not be obtained form
fmt.Println(r.Form) // print information on server side.
fmt.Println("path", r.URL.Path)
fmt.Println("scheme", r.URL.Scheme)
fmt.Println(r.Form["url_long"])
for k, v := range r.Form {
fmt.Println("key:", k)
fmt.Println("val:", strings.Join(v, ""))
}
fmt.Fprintf(w, "Hello astaxie!") // write data to response
}
func login(w http.ResponseWriter, r *http.Request) {
fmt.Println("method:", r.Method) //get request method
if r.Method == "GET" {
t, _ := template.ParseFiles("login.gtpl")
t.Execute(w, nil)
} else {
r.ParseForm()
// logic part of log in
fmt.Println("username:", r.Form["username"])
fmt.Println("password:", r.Form["password"])
}
}
func main() {
http.HandleFunc("/", sayhelloName) // setting router rule
http.HandleFunc("/login", login)
err := http.ListenAndServe(":9090", nil) // setting listening port
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
| go | BSD-3-Clause | c294b087b96d99d2593ad8f470151af5354bb57f | 2026-01-07T08:35:46.175972Z | false |
astaxie/build-web-application-with-golang | https://github.com/astaxie/build-web-application-with-golang/blob/c294b087b96d99d2593ad8f470151af5354bb57f/de/code/src/apps/ch.5.5/main.go | de/code/src/apps/ch.5.5/main.go | // Example code for Chapter 5.5
// Purpose is to show to use BeeDB ORM for basic CRUD operations for sqlite3
package main
import (
"database/sql"
"fmt"
"github.com/astaxie/beedb"
_ "github.com/mattn/go-sqlite3"
"time"
)
var orm beedb.Model
type Userinfo struct {
Uid int `beedb:"PK"`
Username string
Department string
Created string
}
const DB_PATH = "./foo.db"
func checkError(err error) {
if err != nil {
panic(err)
}
}
func getTimeStamp() string {
return time.Now().Format("2006-01-02 15:04:05")
}
func insertUsingStruct() int64 {
fmt.Println("insertUsingStruct()")
var obj Userinfo
obj.Username = "Test Add User"
obj.Department = "Test Add Department"
obj.Created = getTimeStamp()
checkError(orm.Save(&obj))
fmt.Printf("%+v\n", obj)
return int64(obj.Uid)
}
func insertUsingMap() int64 {
fmt.Println("insertUsingMap()")
add := make(map[string]interface{})
add["username"] = "astaxie"
add["department"] = "cloud develop"
add["created"] = getTimeStamp()
id, err := orm.SetTable("userinfo").Insert(add)
checkError(err)
fmt.Println("Last row inserted id =", id)
return id
}
func getOneUserInfo(id int64) Userinfo {
fmt.Println("getOneUserInfo()")
var obj Userinfo
checkError(orm.Where("uid=?", id).Find(&obj))
return obj
}
func getAllUserInfo(id int64) []Userinfo {
fmt.Println("getAllUserInfo()")
var alluser []Userinfo
checkError(orm.Limit(10).Where("uid>?", id).FindAll(&alluser))
return alluser
}
func updateUserinfo(id int64) {
fmt.Println("updateUserinfo()")
var obj Userinfo
obj.Uid = int(id)
obj.Username = "Update Username"
obj.Department = "Update Department"
obj.Created = getTimeStamp()
checkError(orm.Save(&obj))
fmt.Printf("%+v\n", obj)
}
func updateUsingMap(id int64) {
fmt.Println("updateUsingMap()")
t := make(map[string]interface{})
t["username"] = "updateastaxie"
//update one
// id, err := orm.SetTable("userinfo").SetPK("uid").Where(2).Update(t)
//update batch
lastId, err := orm.SetTable("userinfo").Where("uid>?", id).Update(t)
checkError(err)
fmt.Println("Last row updated id =", lastId)
}
func getMapsFromSelect(id int64) []map[string][]byte {
fmt.Println("getMapsFromSelect()")
//Original SQL Backinfo resultsSlice []map[string][]byte
//default PrimaryKey id
c, err := orm.SetTable("userinfo").SetPK("uid").Where(id).Select("uid,username").FindMap()
checkError(err)
fmt.Printf("%+v\n", c)
return c
}
func groupby() {
fmt.Println("groupby()")
//Original SQL Group By
b, err := orm.SetTable("userinfo").GroupBy("username").Having("username='updateastaxie'").FindMap()
checkError(err)
fmt.Printf("%+v\n", b)
}
func joinTables(id int64) {
fmt.Println("joinTables()")
//Original SQL Join Table
a, err := orm.SetTable("userinfo").Join("LEFT", "userdetail", "userinfo.uid=userdetail.uid").Where("userinfo.uid=?", id).Select("userinfo.uid,userinfo.username,userdetail.profile").FindMap()
checkError(err)
fmt.Printf("%+v\n", a)
}
func deleteWithUserinfo(id int64) {
fmt.Println("deleteWithUserinfo()")
obj := getOneUserInfo(id)
id, err := orm.Delete(&obj)
checkError(err)
fmt.Println("Last row deleted id =", id)
}
func deleteRows() {
fmt.Println("deleteRows()")
//original SQL delete
id, err := orm.SetTable("userinfo").Where("uid>?", 2).DeleteRow()
checkError(err)
fmt.Println("Last row updated id =", id)
}
func deleteAllUserinfo(id int64) {
fmt.Println("deleteAllUserinfo()")
//delete all data
alluser := getAllUserInfo(id)
id, err := orm.DeleteAll(&alluser)
checkError(err)
fmt.Println("Last row updated id =", id)
}
func main() {
db, err := sql.Open("sqlite3", DB_PATH)
checkError(err)
orm = beedb.New(db)
var lastIdInserted int64
fmt.Println("Inserting")
lastIdInserted = insertUsingStruct()
insertUsingMap()
a := getOneUserInfo(lastIdInserted)
fmt.Println(a)
b := getAllUserInfo(lastIdInserted)
fmt.Println(b)
fmt.Println("Updating")
updateUserinfo(lastIdInserted)
updateUsingMap(lastIdInserted)
fmt.Println("Querying")
getMapsFromSelect(lastIdInserted)
groupby()
joinTables(lastIdInserted)
fmt.Println("Deleting")
deleteWithUserinfo(lastIdInserted)
deleteRows()
deleteAllUserinfo(lastIdInserted)
}
| go | BSD-3-Clause | c294b087b96d99d2593ad8f470151af5354bb57f | 2026-01-07T08:35:46.175972Z | false |
astaxie/build-web-application-with-golang | https://github.com/astaxie/build-web-application-with-golang/blob/c294b087b96d99d2593ad8f470151af5354bb57f/de/code/src/apps/ch.2.1/main.go | de/code/src/apps/ch.2.1/main.go | // Example code for Chapter ? from "Build Web Application with Golang"
// Purpose: Hello world example demonstrating UTF-8 support.
// To run in the console, type `go run main.go`
// You're missing language fonts, if you're seeing squares or question marks.
package main
import "fmt"
func main() {
fmt.Printf("Hello, world or 你好,世界 or καλημ ́ρα κóσμ or こんにちは世界\n")
}
| go | BSD-3-Clause | c294b087b96d99d2593ad8f470151af5354bb57f | 2026-01-07T08:35:46.175972Z | false |
astaxie/build-web-application-with-golang | https://github.com/astaxie/build-web-application-with-golang/blob/c294b087b96d99d2593ad8f470151af5354bb57f/de/code/src/apps/ch.4.5/main.go | de/code/src/apps/ch.4.5/main.go | // Example code for Chapter 4.5
// Purpose is to create a server to handle uploading files.
package main
import (
"apps/ch.4.4/nonce"
"apps/ch.4.4/validator"
"fmt"
"html/template"
"io"
"mime/multipart"
"net/http"
"os"
)
const MiB_UNIT = 1 << 20
var t *template.Template
var submissions nonce.Nonces = nonce.New()
func checkError(err error) {
if err != nil {
panic(err)
}
}
func indexHandler(w http.ResponseWriter, r *http.Request) {
err := t.ExecuteTemplate(w, "index", submissions.NewToken())
checkError(err)
}
func uploadHandler(w http.ResponseWriter, r *http.Request) {
var errs validator.Errors
r.ParseMultipartForm(32 * MiB_UNIT)
token := r.Form.Get("token")
if err := submissions.CheckThenMarkToken(token); err != nil {
errs = validator.Errors{[]error{err}}
} else {
file, handler, err := r.FormFile("uploadfile")
checkError(err)
saveUpload(file, handler)
}
err := t.ExecuteTemplate(w, "upload", errs)
checkError(err)
}
func saveUpload(file multipart.File, handler *multipart.FileHeader) {
defer file.Close()
fmt.Printf("Uploaded file info: %#v", handler.Header)
localFilename := fmt.Sprintf("./uploads/%v.%v", handler.Filename, submissions.NewToken())
f, err := os.OpenFile(localFilename, os.O_WRONLY|os.O_CREATE, 0666)
checkError(err)
defer f.Close()
_, err = io.Copy(f, file)
checkError(err)
}
func init() {
var err error
t, err = template.ParseFiles("index.gtpl", "upload.gtpl")
checkError(err)
}
func main() {
http.HandleFunc("/", indexHandler)
http.HandleFunc("/upload", uploadHandler)
err := http.ListenAndServe(":9090", nil)
checkError(err)
}
| go | BSD-3-Clause | c294b087b96d99d2593ad8f470151af5354bb57f | 2026-01-07T08:35:46.175972Z | false |
astaxie/build-web-application-with-golang | https://github.com/astaxie/build-web-application-with-golang/blob/c294b087b96d99d2593ad8f470151af5354bb57f/de/code/src/apps/ch.4.5/nonce/main.go | de/code/src/apps/ch.4.5/nonce/main.go | // A nonce is a number or string used only once.
// This is useful for generating a unique token for login pages to prevent duplicate submissions.
package nonce
import (
"crypto/md5"
"errors"
"fmt"
"io"
"math/rand"
"strconv"
"time"
)
// Contains a unique token
type Nonce struct {
Token string
}
// Keeps track of marked/used tokens
type Nonces struct {
hashs map[string]bool
}
func New() Nonces {
return Nonces{make(map[string]bool)}
}
func (n *Nonces) NewNonce() Nonce {
return Nonce{n.NewToken()}
}
// Returns a new unique token
func (n *Nonces) NewToken() string {
t := createToken()
for n.HasToken(t) {
t = createToken()
}
return t
}
// Checks if token has been marked.
func (n *Nonces) HasToken(token string) bool {
return n.hashs[token] == true
}
func (n *Nonces) MarkToken(token string) {
n.hashs[token] = true
}
func (n *Nonces) CheckToken(token string) error {
if token == "" {
return errors.New("No token supplied")
}
if n.HasToken(token) {
return errors.New("Duplicate submission.")
}
return nil
}
func (n *Nonces) CheckThenMarkToken(token string) error {
defer n.MarkToken(token)
if err := n.CheckToken(token); err != nil {
return err
}
return nil
}
func createToken() string {
h := md5.New()
now := time.Now().Unix()
io.WriteString(h, strconv.FormatInt(now, 10))
io.WriteString(h, strconv.FormatInt(rand.Int63(), 10))
return fmt.Sprintf("%x", h.Sum(nil))
}
| go | BSD-3-Clause | c294b087b96d99d2593ad8f470151af5354bb57f | 2026-01-07T08:35:46.175972Z | false |
astaxie/build-web-application-with-golang | https://github.com/astaxie/build-web-application-with-golang/blob/c294b087b96d99d2593ad8f470151af5354bb57f/de/code/src/apps/ch.4.5/validator/main.go | de/code/src/apps/ch.4.5/validator/main.go | // This file contains all the validators to validate the profile page.
package validator
import (
"errors"
"fmt"
"net/url"
"regexp"
"strconv"
"strings"
"time"
)
type ProfilePage struct {
Form *url.Values
}
type Errors struct {
Errors []error
}
// Goes through the form object and validates each element.
// Attachs an error to the output if validation fails.
func (p *ProfilePage) GetErrors() Errors {
errs := make([]error, 0, 10)
if *p.Form == nil || len(*p.Form) < 1 {
errs = append(errs, errors.New("No data was received. Please submit from the profile page."))
}
for name, val := range *p.Form {
if fn, ok := stringValidator[name]; ok {
if err := fn(strings.Join(val, "")); err != nil {
errs = append(errs, err)
}
} else {
if fn, ok := stringsValidator[name]; ok {
if err := fn(val); err != nil {
errs = append(errs, err)
}
}
}
}
return Errors{errs}
}
const (
// Used for parsing the time
mmddyyyyForm = "01/02/2006" // we want the date sent in this format
yyyymmddForm = "2006-01-02" // However, HTML5 pages send the date in this format
)
var stringValidator map[string]func(string) error = map[string]func(string) error{
// parameter name : validator reference
"age": checkAge,
"birthday": checkDate,
"chineseName": checkChineseName,
"email": checkEmail,
"gender": checkGender,
"shirtsize": checkShirtSize,
"username": checkUsername,
}
var stringsValidator map[string]func([]string) error = map[string]func([]string) error{
// parameter name : validator reference
"sibling": checkSibling,
}
// Returns true if slices have a common element
func doSlicesIntersect(s1, s2 []string) bool {
if s1 == nil || s2 == nil {
return false
}
for _, str := range s1 {
if isElementInSlice(str, s2) {
return true
}
}
return false
}
func isElementInSlice(str string, sl []string) bool {
if sl == nil || str == "" {
return false
}
for _, v := range sl {
if v == str {
return true
}
}
return false
}
// Checks if all the characters are chinese characters. Won't check if empty.'
func checkChineseName(str string) error {
if str != "" {
if m, _ := regexp.MatchString("^[\\x{4e00}-\\x{9fa5}]+$", strings.Trim(str, " ")); !m {
return errors.New("Please make sure that the chinese name only contains chinese characters.")
}
}
return nil
}
// Checks if a user name exist.
func checkUsername(str string) error {
if strings.Trim(str, " ") == "" {
return errors.New("Please enter a username.")
}
return nil
}
// Check if age is a number and between 13 and 130
func checkAge(str string) error {
age, err := strconv.Atoi(str)
if str == "" || err != nil {
return errors.New("Please enter a valid age.")
}
if age < 13 {
return errors.New("You must be at least 13 years of age to submit.")
}
if age > 130 {
return errors.New("You're too old to register, grandpa.")
}
return nil
}
func checkEmail(str string) error {
if m, err := regexp.MatchString(`^[^@]+@[^@]+$`, str); !m {
fmt.Println("err = ", err)
return errors.New("Please enter a valid email address.")
}
return nil
}
// Checks if a valid date was passed.
func checkDate(str string) error {
_, err := time.Parse(mmddyyyyForm, str)
if err != nil {
_, err = time.Parse(yyyymmddForm, str)
}
if str == "" || err != nil {
return errors.New("Please enter a valid Date.")
}
return nil
}
// Checks if the passed input is a known gender option
func checkGender(str string) error {
if str == "" {
return nil
}
siblings := []string{"m", "f", "na"}
if !isElementInSlice(str, siblings) {
return errors.New("Please select a valid gender.")
}
return nil
}
// Check if all the values are known options.
func checkSibling(strs []string) error {
if strs == nil || len(strs) < 1 {
return nil
}
siblings := []string{"m", "f"}
if siblings != nil && !doSlicesIntersect(siblings, strs) {
return errors.New("Please select a valid sibling")
}
return nil
}
// Checks if the shirt size is a known option.
func checkShirtSize(str string) error {
if str == "" {
return nil
}
shirts := []string{"s", "m", "l", "xl", "xxl"}
if !isElementInSlice(str, shirts) {
return errors.New("Please select a valid shirt size")
}
return nil
}
| go | BSD-3-Clause | c294b087b96d99d2593ad8f470151af5354bb57f | 2026-01-07T08:35:46.175972Z | false |
astaxie/build-web-application-with-golang | https://github.com/astaxie/build-web-application-with-golang/blob/c294b087b96d99d2593ad8f470151af5354bb57f/de/code/src/apps/ch.4.5/client_upload/main.go | de/code/src/apps/ch.4.5/client_upload/main.go | package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"os"
)
func checkError(err error) {
if err != nil {
panic(err)
}
}
func postFile(filename string, targetUrl string) {
bodyBuf := &bytes.Buffer{}
bodyWriter := multipart.NewWriter(bodyBuf)
fileWriter, err := bodyWriter.CreateFormFile("uploadfile", filename)
checkError(err)
fh, err := os.Open(filename)
checkError(err)
_, err = io.Copy(fileWriter, fh)
checkError(err)
contentType := bodyWriter.FormDataContentType()
bodyWriter.Close()
resp, err := http.Post(targetUrl, contentType, bodyBuf)
checkError(err)
defer resp.Body.Close()
resp_body, err := ioutil.ReadAll(resp.Body)
checkError(err)
fmt.Println(resp.Status)
fmt.Println(string(resp_body))
}
func main() {
target_url := "http://localhost:9090/upload"
filename := "../file.txt"
postFile(filename, target_url)
}
| go | BSD-3-Clause | c294b087b96d99d2593ad8f470151af5354bb57f | 2026-01-07T08:35:46.175972Z | false |
astaxie/build-web-application-with-golang | https://github.com/astaxie/build-web-application-with-golang/blob/c294b087b96d99d2593ad8f470151af5354bb57f/de/code/src/apps/ch.5.3/main.go | de/code/src/apps/ch.5.3/main.go | // Example code for Chapter 5.3 from "Build Web Application with Golang"
// Purpose: Shows how to run simple CRUD operations using a sqlite driver
package main
import (
"database/sql"
"fmt"
_ "github.com/mattn/go-sqlite3"
"time"
)
const DB_PATH = "./foo.db"
func main() {
db, err := sql.Open("sqlite3", DB_PATH)
checkErr(err)
defer db.Close()
fmt.Println("Inserting")
stmt, err := db.Prepare("INSERT INTO userinfo(username, department, created) values(?,?,?)")
checkErr(err)
res, err := stmt.Exec("astaxie", "software developement", time.Now().Format("2006-01-02"))
checkErr(err)
id, err := res.LastInsertId()
checkErr(err)
fmt.Println("id of last inserted row =", id)
fmt.Println("Updating")
stmt, err = db.Prepare("update userinfo set username=? where uid=?")
checkErr(err)
res, err = stmt.Exec("astaxieupdate", id)
checkErr(err)
affect, err := res.RowsAffected()
checkErr(err)
fmt.Println(affect, "row(s) changed")
fmt.Println("Querying")
rows, err := db.Query("SELECT * FROM userinfo")
checkErr(err)
for rows.Next() {
var uid int
var username, department, created string
err = rows.Scan(&uid, &username, &department, &created)
checkErr(err)
fmt.Println("uid | username | department | created")
fmt.Printf("%3v | %6v | %8v | %6v\n", uid, username, department, created)
}
fmt.Println("Deleting")
stmt, err = db.Prepare("delete from userinfo where uid=?")
checkErr(err)
res, err = stmt.Exec(id)
checkErr(err)
affect, err = res.RowsAffected()
checkErr(err)
fmt.Println(affect, "row(s) changed")
}
func checkErr(err error) {
if err != nil {
panic(err)
}
}
| go | BSD-3-Clause | c294b087b96d99d2593ad8f470151af5354bb57f | 2026-01-07T08:35:46.175972Z | false |
astaxie/build-web-application-with-golang | https://github.com/astaxie/build-web-application-with-golang/blob/c294b087b96d99d2593ad8f470151af5354bb57f/de/code/src/apps/ch.4.4/main.go | de/code/src/apps/ch.4.4/main.go | // Example code for Chapter 3.2 from "Build Web Application with Golang"
// Purpose: Shows how to prevent duplicate submissions by using tokens
// Example code for Chapter 4.4 based off the code from Chapter 4.2
// Run `go run main.go` then access http://localhost:9090
package main
import (
"apps/ch.4.4/nonce"
"apps/ch.4.4/validator"
"html/template"
"log"
"net/http"
)
const (
PORT = "9090"
HOST_URL = "http://localhost:" + PORT
)
var submissions nonce.Nonces
var t *template.Template
func index(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, HOST_URL+"/profile", http.StatusTemporaryRedirect)
}
func profileHandler(w http.ResponseWriter, r *http.Request) {
t.ExecuteTemplate(w, "profile", submissions.NewNonce())
}
func checkProfile(w http.ResponseWriter, r *http.Request) {
var errs validator.Errors
r.ParseForm()
token := r.Form.Get("token")
if err := submissions.CheckThenMarkToken(token); err != nil {
errs = validator.Errors{[]error{err}}
} else {
p := validator.ProfilePage{&r.Form}
errs = p.GetErrors()
}
t.ExecuteTemplate(w, "submission", errs)
}
func init() {
submissions = nonce.New()
t = template.Must(template.ParseFiles("profile.gtpl", "submission.gtpl"))
}
func main() {
http.HandleFunc("/", index)
http.HandleFunc("/profile", profileHandler)
http.HandleFunc("/checkprofile", checkProfile)
err := http.ListenAndServe(":"+PORT, nil) // setting listening port
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
| go | BSD-3-Clause | c294b087b96d99d2593ad8f470151af5354bb57f | 2026-01-07T08:35:46.175972Z | false |
astaxie/build-web-application-with-golang | https://github.com/astaxie/build-web-application-with-golang/blob/c294b087b96d99d2593ad8f470151af5354bb57f/de/code/src/apps/ch.4.4/nonce/main.go | de/code/src/apps/ch.4.4/nonce/main.go | // A nonce is a number or string used only once.
// This is useful for generating a unique token for login pages to prevent duplicate submissions.
package nonce
import (
"crypto/md5"
"errors"
"fmt"
"io"
"math/rand"
"strconv"
"time"
)
// Contains a unique token
type Nonce struct {
Token string
}
// Keeps track of marked/used tokens
type Nonces struct {
hashs map[string]bool
}
func New() Nonces {
return Nonces{make(map[string]bool)}
}
func (n *Nonces) NewNonce() Nonce {
return Nonce{n.NewToken()}
}
// Returns a new unique token
func (n *Nonces) NewToken() string {
t := createToken()
for n.HasToken(t) {
t = createToken()
}
return t
}
// Checks if token has been marked.
func (n *Nonces) HasToken(token string) bool {
return n.hashs[token] == true
}
func (n *Nonces) MarkToken(token string) {
n.hashs[token] = true
}
func (n *Nonces) CheckToken(token string) error {
if token == "" {
return errors.New("No token supplied")
}
if n.HasToken(token) {
return errors.New("Duplicate submission.")
}
return nil
}
func (n *Nonces) CheckThenMarkToken(token string) error {
defer n.MarkToken(token)
if err := n.CheckToken(token); err != nil {
return err
}
return nil
}
func createToken() string {
h := md5.New()
now := time.Now().Unix()
io.WriteString(h, strconv.FormatInt(now, 10))
io.WriteString(h, strconv.FormatInt(rand.Int63(), 10))
return fmt.Sprintf("%x", h.Sum(nil))
}
| go | BSD-3-Clause | c294b087b96d99d2593ad8f470151af5354bb57f | 2026-01-07T08:35:46.175972Z | false |
astaxie/build-web-application-with-golang | https://github.com/astaxie/build-web-application-with-golang/blob/c294b087b96d99d2593ad8f470151af5354bb57f/de/code/src/apps/ch.4.4/validator/main.go | de/code/src/apps/ch.4.4/validator/main.go | // This file contains all the validators to validate the profile page.
package validator
import (
"errors"
"fmt"
"net/url"
"regexp"
"strconv"
"strings"
"time"
)
type ProfilePage struct {
Form *url.Values
}
type Errors struct {
Errors []error
}
// Goes through the form object and validates each element.
// Attachs an error to the output if validation fails.
func (p *ProfilePage) GetErrors() Errors {
errs := make([]error, 0, 10)
if *p.Form == nil || len(*p.Form) < 1 {
errs = append(errs, errors.New("No data was received. Please submit from the profile page."))
}
for name, val := range *p.Form {
if fn, ok := stringValidator[name]; ok {
if err := fn(strings.Join(val, "")); err != nil {
errs = append(errs, err)
}
} else {
if fn, ok := stringsValidator[name]; ok {
if err := fn(val); err != nil {
errs = append(errs, err)
}
}
}
}
return Errors{errs}
}
const (
// Used for parsing the time
mmddyyyyForm = "01/02/2006" // we want the date sent in this format
yyyymmddForm = "2006-01-02" // However, HTML5 pages send the date in this format
)
var stringValidator map[string]func(string) error = map[string]func(string) error{
// parameter name : validator reference
"age": checkAge,
"birthday": checkDate,
"chineseName": checkChineseName,
"email": checkEmail,
"gender": checkGender,
"shirtsize": checkShirtSize,
"username": checkUsername,
}
var stringsValidator map[string]func([]string) error = map[string]func([]string) error{
// parameter name : validator reference
"sibling": checkSibling,
}
// Returns true if slices have a common element
func doSlicesIntersect(s1, s2 []string) bool {
if s1 == nil || s2 == nil {
return false
}
for _, str := range s1 {
if isElementInSlice(str, s2) {
return true
}
}
return false
}
func isElementInSlice(str string, sl []string) bool {
if sl == nil || str == "" {
return false
}
for _, v := range sl {
if v == str {
return true
}
}
return false
}
// Checks if all the characters are chinese characters. Won't check if empty.'
func checkChineseName(str string) error {
if str != "" {
if m, _ := regexp.MatchString("^[\\x{4e00}-\\x{9fa5}]+$", strings.Trim(str, " ")); !m {
return errors.New("Please make sure that the chinese name only contains chinese characters.")
}
}
return nil
}
// Checks if a user name exist.
func checkUsername(str string) error {
if strings.Trim(str, " ") == "" {
return errors.New("Please enter a username.")
}
return nil
}
// Check if age is a number and between 13 and 130
func checkAge(str string) error {
age, err := strconv.Atoi(str)
if str == "" || err != nil {
return errors.New("Please enter a valid age.")
}
if age < 13 {
return errors.New("You must be at least 13 years of age to submit.")
}
if age > 130 {
return errors.New("You're too old to register, grandpa.")
}
return nil
}
func checkEmail(str string) error {
if m, err := regexp.MatchString(`^[^@]+@[^@]+$`, str); !m {
fmt.Println("err = ", err)
return errors.New("Please enter a valid email address.")
}
return nil
}
// Checks if a valid date was passed.
func checkDate(str string) error {
_, err := time.Parse(mmddyyyyForm, str)
if err != nil {
_, err = time.Parse(yyyymmddForm, str)
}
if str == "" || err != nil {
return errors.New("Please enter a valid Date.")
}
return nil
}
// Checks if the passed input is a known gender option
func checkGender(str string) error {
if str == "" {
return nil
}
siblings := []string{"m", "f", "na"}
if !isElementInSlice(str, siblings) {
return errors.New("Please select a valid gender.")
}
return nil
}
// Check if all the values are known options.
func checkSibling(strs []string) error {
if strs == nil || len(strs) < 1 {
return nil
}
siblings := []string{"m", "f"}
if siblings != nil && !doSlicesIntersect(siblings, strs) {
return errors.New("Please select a valid sibling")
}
return nil
}
// Checks if the shirt size is a known option.
func checkShirtSize(str string) error {
if str == "" {
return nil
}
shirts := []string{"s", "m", "l", "xl", "xxl"}
if !isElementInSlice(str, shirts) {
return errors.New("Please select a valid shirt size")
}
return nil
}
| go | BSD-3-Clause | c294b087b96d99d2593ad8f470151af5354bb57f | 2026-01-07T08:35:46.175972Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.