diff --git a/go.mod b/go.mod index 079c9a80a0..de19c9805c 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,9 @@ require ( github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgryski/dgoogauth v0.0.0-20190221195224-5a805980a5f3 github.com/disintegration/imaging v1.6.0 + github.com/dop251/goja v0.0.0-20210726224656-a55e4cfac4cf github.com/edwvee/exiffix v0.0.0-20180602190213-b57537c92a6b + github.com/evanw/esbuild v0.12.16 github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239 // indirect github.com/fsnotify/fsnotify v1.4.9 github.com/gabriel-vasile/mimetype v1.1.2 @@ -80,12 +82,10 @@ require ( google.golang.org/grpc v1.32.0 google.golang.org/protobuf v1.25.0 gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect gopkg.in/ini.v1 v1.51.0 // indirect gopkg.in/mail.v2 v2.3.1 gopkg.in/square/go-jose.v2 v2.3.1 // indirect gopkg.in/urfave/cli.v1 v1.20.0 // indirect - gopkg.in/yaml.v2 v2.3.0 gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 rsc.io/qr v0.2.0 ) diff --git a/go.sum b/go.sum index 61bb3031cf..e1a33a2bcf 100644 --- a/go.sum +++ b/go.sum @@ -96,6 +96,7 @@ github.com/codegangsta/envy v0.0.0-20141216192214-4b78388c8ce4 h1:ihrIKrLQzm6Q6N github.com/codegangsta/envy v0.0.0-20141216192214-4b78388c8ce4/go.mod h1:X7wHz0C25Lga6CnJ4WAQNbUQ9P/8eWSNv8qIO71YkSM= github.com/codegangsta/gin v0.0.0-20171026143024-cafe2ce98974 h1:ysuVNDVE4LIky6I+6JlgAKG+wBNKMpVv3m3neVpvFVw= github.com/codegangsta/gin v0.0.0-20171026143024-cafe2ce98974/go.mod h1:UBYuwaH3dMw91EZ7tGVaFF6GDj5j46S7zqB9lZPIe58= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/crewjam/httperr v0.0.0-20190612203328-a946449404da h1:WXnT88cFG2davqSFqvaFfzkSMC0lqh/8/rKZ+z7tYvI= github.com/crewjam/httperr v0.0.0-20190612203328-a946449404da/go.mod h1:+rmNIXRvYMqLQeR4DHyTvs6y0MEMymTz4vyFpFkKTPs= github.com/crewjam/saml v0.4.5 h1:H9u+6CZAESUKHxMyxUbVn0IawYvKZn4nt3d4ccV4O/M= @@ -115,6 +116,11 @@ github.com/dgryski/dgoogauth v0.0.0-20190221195224-5a805980a5f3/go.mod h1:hEfFau github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/disintegration/imaging v1.6.0 h1:nVPXRUUQ36Z7MNf0O77UzgnOb1mkMMor7lmJMJXc/mA= github.com/disintegration/imaging v1.6.0/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dop251/goja v0.0.0-20210726224656-a55e4cfac4cf h1:eK64KqjIBLpCtzIbzciHtJ3Al9t3PSVYKJiaB6E4bB8= +github.com/dop251/goja v0.0.0-20210726224656-a55e4cfac4cf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/edwvee/exiffix v0.0.0-20180602190213-b57537c92a6b h1:6CBzNasH8+bKeFwr5Bt5JtALHLFN4iQp7sf4ShlP/ik= github.com/edwvee/exiffix v0.0.0-20180602190213-b57537c92a6b/go.mod h1:KoE3Ti1qbQXCb3s/XGj0yApHnbnNnn1bXTtB5Auq/Vc= @@ -122,6 +128,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanw/esbuild v0.12.16 h1:UxvizOzRZk0gnlal2g2MulpCjIiAPtciLr674nOKtcI= +github.com/evanw/esbuild v0.12.16/go.mod h1:y2AFBAGVelPqPodpdtxWWqe6n2jYf5FrsJbligmRmuw= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239 h1:Ghm4eQYC0nEPnSJdVkTrXpu9KtoVCSo1hg7mtI7G9KU= github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239/go.mod h1:Gdwt2ce0yfBxPvZrHkprdPPTTS3N5rwmLE8T22KBXlw= @@ -157,6 +165,8 @@ github.com/go-oauth2/oauth2/v4 v4.2.0/go.mod h1:+rsyi0o/ZbSfhL/3Xr/sAtL4brS+IdGj github.com/go-oauth2/oauth2/v4 v4.3.0 h1:vp4goUmrq1YaPzpm34FDLlZiAkIqK3LsuNTTRyTnPbo= github.com/go-oauth2/oauth2/v4 v4.3.0/go.mod h1:+rsyi0o/ZbSfhL/3Xr/sAtL4brS+IdGj86PHVlPjE+4= github.com/go-session/session v3.1.2+incompatible/go.mod h1:8B3iivBQjrz/JtC68Np2T1yBBLxTan3mn/3OM0CyRt0= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -275,6 +285,7 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= @@ -572,6 +583,7 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -737,6 +749,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -754,6 +767,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/jsenv/func.go b/pkg/jsenv/func.go new file mode 100644 index 0000000000..f8bb376332 --- /dev/null +++ b/pkg/jsenv/func.go @@ -0,0 +1,23 @@ +package jsenv + +import ( + "fmt" + + "github.com/dop251/goja" +) + +type ( + fn struct { + f goja.Callable + } +) + +func (f fn) Exec(i ...goja.Value) (interface{}, error) { + ret, err := f.f(goja.Undefined(), i...) + + if err != nil { + return nil, fmt.Errorf("could not run function: %s", err) + } + + return ret.Export(), nil +} diff --git a/pkg/jsenv/scope.go b/pkg/jsenv/scope.go new file mode 100644 index 0000000000..26f8c15ff0 --- /dev/null +++ b/pkg/jsenv/scope.go @@ -0,0 +1,17 @@ +package jsenv + +type ( + globalScope map[string]interface{} +) + +func (gs globalScope) Set(k string, i interface{}) { + gs[k] = i +} + +func (gs globalScope) Get(k string) interface{} { + if v, ok := gs[k]; ok { + return v + } + + return nil +} diff --git a/pkg/jsenv/transformer.go b/pkg/jsenv/transformer.go new file mode 100644 index 0000000000..a4df281b66 --- /dev/null +++ b/pkg/jsenv/transformer.go @@ -0,0 +1,64 @@ +package jsenv + +import ( + "fmt" + + "github.com/evanw/esbuild/pkg/api" +) + +const ( + // limiting the support for loaders and targets + LoaderJS TransformLoader = TransformLoader(api.LoaderJS) + LoaderJSX TransformLoader = TransformLoader(api.LoaderJSX) + LoaderTS TransformLoader = TransformLoader(api.LoaderTS) + + TargetNoop TransformTarget = 0 + TargetES5 TransformTarget = TransformTarget(api.ES5) + TargetES2016 TransformTarget = TransformTarget(api.ES2016) +) + +type ( + TransformLoader uint8 + TransformTarget uint8 + + t struct { + ldr TransformLoader + tr TransformTarget + } + + noop struct{} + + Transformer interface { + Transform(string) ([]byte, error) + } +) + +func NewTransformer(loader TransformLoader, target TransformTarget) Transformer { + if target == TargetNoop { + return &noop{} + } + + return &t{ + ldr: loader, + tr: target, + } +} + +// Transform uses the loaders and targets and transpiles +func (tt t) Transform(p string) (b []byte, err error) { + result := api.Transform(p, api.TransformOptions{ + Loader: api.Loader(tt.ldr), + Target: api.Target(tt.tr), + }) + + if len(result.Errors) > 0 { + return []byte{}, fmt.Errorf("could not transform payload: %s", result.Errors[0].Text) + } + + return result.Code, nil +} + +// Fallback transform that keeps the original intact +func (tt noop) Transform(p string) ([]byte, error) { + return []byte(p), nil +} diff --git a/pkg/jsenv/vm.go b/pkg/jsenv/vm.go new file mode 100644 index 0000000000..45db449063 --- /dev/null +++ b/pkg/jsenv/vm.go @@ -0,0 +1,113 @@ +package jsenv + +import ( + "crypto/md5" + "encoding/hex" + "errors" + "fmt" + + "github.com/dop251/goja" +) + +const ( + // placeholder for functions in js env + exportFuncDescriptor = "_expFunc_" +) + +type ( + Vm struct { + g *goja.Runtime + t Transformer + } +) + +func New(t Transformer) Vm { + return Vm{ + g: goja.New(), + t: t, + } +} + +// New creates a goja internal type +func (ss Vm) New(i interface{}) goja.Value { + return ss.g.ToValue(i) +} + +// Register the value in vm +func (ss Vm) Register(key string, i interface{}) error { + return ss.g.Set(key, i) +} + +// Fetch value from vm +func (ss Vm) Fetch(key string) goja.Value { + return ss.g.Get(key) +} + +// RegisterFunction registers the function to the vm and returns the +// function that can be used in go +func (ss Vm) RegisterFunction(s string, wrapperFn ...func() string) (f *fn, err error) { + + if len(wrapperFn) > 0 { + for _, wfn := range wrapperFn { + s = fmt.Sprintf(wfn(), s) + } + } else { + s = fmt.Sprintf("function (input) { %s }", s) + } + + desc := ss.funcDescriptor(s) + run := fmt.Sprintf("var %s=%s;", desc, s) + + err = ss.Eval(run) + + if err != nil { + return + } + + internalF := ss.Fetch(desc) + + if internalF == nil { + err = errors.New("could not fetch registered value") + return + } + + fnn, ok := goja.AssertFunction(internalF) + + if !ok { + err = errors.New("could not assert function") + return + } + + return &fn{ + f: fnn, + }, nil +} + +// Eval transforms the input js to the specified +// version and evals in vm +func (ss Vm) Eval(p string) (err error) { + tr, err := ss.t.Transform(p) + + if err != nil { + return + } + + _, err = ss.g.RunString(string(tr)) + return +} + +// Compile is used only when parsing the +// input evaluation without actually running it +func (ss Vm) Compile(p string) (err error) { + _, err = goja.Parse("", p) + return +} + +func (ss Vm) genID(s string) string { + h := md5.Sum([]byte(s)) + return hex.EncodeToString(h[:]) +} + +func (ss Vm) funcDescriptor(s string) string { + return fmt.Sprintf("%s%s", exportFuncDescriptor, ss.genID(s)) +} diff --git a/pkg/jsenv/vm_test.go b/pkg/jsenv/vm_test.go new file mode 100644 index 0000000000..078613246e --- /dev/null +++ b/pkg/jsenv/vm_test.go @@ -0,0 +1,101 @@ +package jsenv + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_registerFunction(t *testing.T) { + type ( + tf struct { + name string + err string + fn string + wrapperFn func() string + } + ) + + var ( + tcc = []tf{ + { + name: "register js func", + fn: "return 1;", + }, + { + name: "register js custom wrapper fn", + fn: "return second;", + wrapperFn: func() string { return "function (first, second, third) { %s }" }, + }, + { + name: "register js func error", + fn: "function () {return 1", + err: "SyntaxError: SyntaxError: (anonymous): Line 1:69 Unexpected end of input (and 1 more errors)", + }, + } + ) + + for _, tc := range tcc { + t.Run(tc.name, func(t *testing.T) { + var ( + f *fn + err error + + req = require.New(t) + tr = NewTransformer(LoaderJS, TargetNoop) + vm = New(tr) + ) + + if tc.wrapperFn != nil { + f, err = vm.RegisterFunction(tc.fn, tc.wrapperFn) + } else { + f, err = vm.RegisterFunction(tc.fn) + } + + if tc.err == "" { + req.NoError(err) + req.Equal(reflect.Func, reflect.TypeOf(f.f).Kind()) + req.Equal("Callable", reflect.TypeOf(f.f).Name()) + } else { + req.Errorf(err, tc.err) + req.Nil(f) + } + + }) + } +} + +func Test_compile(t *testing.T) { + var ( + req = require.New(t) + tr = NewTransformer(LoaderJS, TargetNoop) + vm = New(tr) + ) + + err := vm.Compile(`function () {return 1`) + + req.Errorf(err, "SyntaxError: SyntaxError: (anonymous): Line 1:69 Unexpected end of input (and 1 more errors)") +} + +func Test_funcDescriptor(t *testing.T) { + var ( + req = require.New(t) + tr = NewTransformer(LoaderJS, TargetNoop) + vm = New(tr) + ) + + req.Equal("_expFunc_1a6126e35863d2e16ba8e40f40668fdb", vm.funcDescriptor(`function () {return 1}`)) +} + +func Test_fetchEval(t *testing.T) { + var ( + req = require.New(t) + tr = NewTransformer(LoaderJS, TargetNoop) + vm = New(tr) + ) + + req.NoError(vm.Eval(`foo = 'bar'`)) + req.Equal("bar", vm.Fetch("foo").Export()) + req.Nil(vm.Fetch("bar")) +} diff --git a/vendor/github.com/dlclark/regexp2/.gitignore b/vendor/github.com/dlclark/regexp2/.gitignore new file mode 100644 index 0000000000..fb844c330c --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/.gitignore @@ -0,0 +1,27 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.out + +.DS_Store diff --git a/vendor/github.com/dlclark/regexp2/.travis.yml b/vendor/github.com/dlclark/regexp2/.travis.yml new file mode 100644 index 0000000000..a2da6be473 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/.travis.yml @@ -0,0 +1,7 @@ +language: go +arch: + - AMD64 + - ppc64le +go: + - 1.9 + - tip diff --git a/vendor/github.com/dlclark/regexp2/ATTRIB b/vendor/github.com/dlclark/regexp2/ATTRIB new file mode 100644 index 0000000000..cdf4560b9e --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/ATTRIB @@ -0,0 +1,133 @@ +============ +These pieces of code were ported from dotnet/corefx: + +syntax/charclass.go (from RegexCharClass.cs): ported to use the built-in Go unicode classes. Canonicalize is + a direct port, but most of the other code required large changes because the C# implementation + used a string to represent the CharSet data structure and I cleaned that up in my implementation. + +syntax/code.go (from RegexCode.cs): ported literally with various cleanups and layout to make it more Go-ish. + +syntax/escape.go (from RegexParser.cs): ported Escape method and added some optimizations. Unescape is inspired by + the C# implementation but couldn't be directly ported because of the lack of do-while syntax in Go. + +syntax/parser.go (from RegexpParser.cs and RegexOptions.cs): ported parser struct and associated methods as + literally as possible. Several language differences required changes. E.g. lack pre/post-fix increments as + expressions, lack of do-while loops, lack of overloads, etc. + +syntax/prefix.go (from RegexFCD.cs and RegexBoyerMoore.cs): ported as literally as possible and added support + for unicode chars that are longer than the 16-bit char in C# for the 32-bit rune in Go. + +syntax/replacerdata.go (from RegexReplacement.cs): conceptually ported and re-organized to handle differences + in charclass implementation, and fix odd code layout between RegexParser.cs, Regex.cs, and RegexReplacement.cs. + +syntax/tree.go (from RegexTree.cs and RegexNode.cs): ported literally as possible. + +syntax/writer.go (from RegexWriter.cs): ported literally with minor changes to make it more Go-ish. + +match.go (from RegexMatch.cs): ported, simplified, and changed to handle Go's lack of inheritence. + +regexp.go (from Regex.cs and RegexOptions.cs): conceptually serves the same "starting point", but is simplified + and changed to handle differences in C# strings and Go strings/runes. + +replace.go (from RegexReplacement.cs): ported closely and then cleaned up to combine the MatchEvaluator and + simple string replace implementations. + +runner.go (from RegexRunner.cs): ported literally as possible. + +regexp_test.go (from CaptureTests.cs and GroupNamesAndNumbers.cs): conceptually ported, but the code was + manually structured like Go tests. + +replace_test.go (from RegexReplaceStringTest0.cs): conceptually ported + +rtl_test.go (from RightToLeft.cs): conceptually ported +--- +dotnet/corefx was released under this license: + +The MIT License (MIT) + +Copyright (c) Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +============ +These pieces of code are copied from the Go framework: + +- The overall directory structure of regexp2 was inspired by the Go runtime regexp package. +- The optimization in the escape method of syntax/escape.go is from the Go runtime QuoteMeta() func in regexp/regexp.go +- The method signatures in regexp.go are designed to match the Go framework regexp methods closely +- func regexp2.MustCompile and func quote are almost identifical to the regexp package versions +- BenchmarkMatch* and TestProgramTooLong* funcs in regexp_performance_test.go were copied from the framework + regexp/exec_test.go +--- +The Go framework was released under this license: + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +============ +Some test data were gathered from the Mono project. + +regexp_mono_test.go: ported from https://github.com/mono/mono/blob/master/mcs/class/System/Test/System.Text.RegularExpressions/PerlTrials.cs +--- +Mono tests released under this license: + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dlclark/regexp2/LICENSE b/vendor/github.com/dlclark/regexp2/LICENSE new file mode 100644 index 0000000000..fe83dfdc92 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Doug Clark + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/dlclark/regexp2/README.md b/vendor/github.com/dlclark/regexp2/README.md new file mode 100644 index 0000000000..f92f8b10be --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/README.md @@ -0,0 +1,101 @@ +# regexp2 - full featured regular expressions for Go +Regexp2 is a feature-rich RegExp engine for Go. It doesn't have constant time guarantees like the built-in `regexp` package, but it allows backtracking and is compatible with Perl5 and .NET. You'll likely be better off with the RE2 engine from the `regexp` package and should only use this if you need to write very complex patterns or require compatibility with .NET. + +## Basis of the engine +The engine is ported from the .NET framework's System.Text.RegularExpressions.Regex engine. That engine was open sourced in 2015 under the MIT license. There are some fundamental differences between .NET strings and Go strings that required a bit of borrowing from the Go framework regex engine as well. I cleaned up a couple of the dirtier bits during the port (regexcharclass.cs was terrible), but the parse tree, code emmitted, and therefore patterns matched should be identical. + +## Installing +This is a go-gettable library, so install is easy: + + go get github.com/dlclark/regexp2/... + +## Usage +Usage is similar to the Go `regexp` package. Just like in `regexp`, you start by converting a regex into a state machine via the `Compile` or `MustCompile` methods. They ultimately do the same thing, but `MustCompile` will panic if the regex is invalid. You can then use the provided `Regexp` struct to find matches repeatedly. A `Regexp` struct is safe to use across goroutines. + +```go +re := regexp2.MustCompile(`Your pattern`, 0) +if isMatch, _ := re.MatchString(`Something to match`); isMatch { + //do something +} +``` + +The only error that the `*Match*` methods *should* return is a Timeout if you set the `re.MatchTimeout` field. Any other error is a bug in the `regexp2` package. If you need more details about capture groups in a match then use the `FindStringMatch` method, like so: + +```go +if m, _ := re.FindStringMatch(`Something to match`); m != nil { + // the whole match is always group 0 + fmt.Printf("Group 0: %v\n", m.String()) + + // you can get all the groups too + gps := m.Groups() + + // a group can be captured multiple times, so each cap is separately addressable + fmt.Printf("Group 1, first capture", gps[1].Captures[0].String()) + fmt.Printf("Group 1, second capture", gps[1].Captures[1].String()) +} +``` + +Group 0 is embedded in the Match. Group 0 is an automatically-assigned group that encompasses the whole pattern. This means that `m.String()` is the same as `m.Group.String()` and `m.Groups()[0].String()` + +The __last__ capture is embedded in each group, so `g.String()` will return the same thing as `g.Capture.String()` and `g.Captures[len(g.Captures)-1].String()`. + +If you want to find multiple matches from a single input string you should use the `FindNextMatch` method. For example, to implement a function similar to `regexp.FindAllString`: + +```go +func regexp2FindAllString(re *regexp2.Regexp, s string) []string { + var matches []string + m, _ := re.FindStringMatch(s) + for m != nil { + matches = append(matches, m.String()) + m, _ = re.FindNextMatch(m) + } + return matches +} +``` + +`FindNextMatch` is optmized so that it re-uses the underlying string/rune slice. + +The internals of `regexp2` always operate on `[]rune` so `Index` and `Length` data in a `Match` always reference a position in `rune`s rather than `byte`s (even if the input was given as a string). This is a dramatic difference between `regexp` and `regexp2`. It's advisable to use the provided `String()` methods to avoid having to work with indices. + +## Compare `regexp` and `regexp2` +| Category | regexp | regexp2 | +| --- | --- | --- | +| Catastrophic backtracking possible | no, constant execution time guarantees | yes, if your pattern is at risk you can use the `re.MatchTimeout` field | +| Python-style capture groups `(?Pre)` | yes | no (yes in RE2 compat mode) | +| .NET-style capture groups `(?re)` or `(?'name're)` | no | yes | +| comments `(?#comment)` | no | yes | +| branch numbering reset `(?\|a\|b)` | no | no | +| possessive match `(?>re)` | no | yes | +| positive lookahead `(?=re)` | no | yes | +| negative lookahead `(?!re)` | no | yes | +| positive lookbehind `(?<=re)` | no | yes | +| negative lookbehind `(?re)`) +* change singleline behavior for `$` to only match end of string (like RE2) (see [#24](https://github.com/dlclark/regexp2/issues/24)) + +```go +re := regexp2.MustCompile(`Your RE2-compatible pattern`, regexp2.RE2) +if isMatch, _ := re.MatchString(`Something to match`); isMatch { + //do something +} +``` + +This feature is a work in progress and I'm open to ideas for more things to put here (maybe more relaxed character escaping rules?). + + +## Library features that I'm still working on +- Regex split + +## Potential bugs +I've run a battery of tests against regexp2 from various sources and found the debug output matches the .NET engine, but .NET and Go handle strings very differently. I've attempted to handle these differences, but most of my testing deals with basic ASCII with a little bit of multi-byte Unicode. There's a chance that there are bugs in the string handling related to character sets with supplementary Unicode chars. Right-to-Left support is coded, but not well tested either. + +## Find a bug? +I'm open to new issues and pull requests with tests if you find something odd! diff --git a/vendor/github.com/dlclark/regexp2/match.go b/vendor/github.com/dlclark/regexp2/match.go new file mode 100644 index 0000000000..1871cffe30 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/match.go @@ -0,0 +1,347 @@ +package regexp2 + +import ( + "bytes" + "fmt" +) + +// Match is a single regex result match that contains groups and repeated captures +// -Groups +// -Capture +type Match struct { + Group //embeded group 0 + + regex *Regexp + otherGroups []Group + + // input to the match + textpos int + textstart int + + capcount int + caps []int + sparseCaps map[int]int + + // output from the match + matches [][]int + matchcount []int + + // whether we've done any balancing with this match. If we + // have done balancing, we'll need to do extra work in Tidy(). + balancing bool +} + +// Group is an explicit or implit (group 0) matched group within the pattern +type Group struct { + Capture // the last capture of this group is embeded for ease of use + + Name string // group name + Captures []Capture // captures of this group +} + +// Capture is a single capture of text within the larger original string +type Capture struct { + // the original string + text []rune + // the position in the original string where the first character of + // captured substring was found. + Index int + // the length of the captured substring. + Length int +} + +// String returns the captured text as a String +func (c *Capture) String() string { + return string(c.text[c.Index : c.Index+c.Length]) +} + +// Runes returns the captured text as a rune slice +func (c *Capture) Runes() []rune { + return c.text[c.Index : c.Index+c.Length] +} + +func newMatch(regex *Regexp, capcount int, text []rune, startpos int) *Match { + m := Match{ + regex: regex, + matchcount: make([]int, capcount), + matches: make([][]int, capcount), + textstart: startpos, + balancing: false, + } + m.Name = "0" + m.text = text + m.matches[0] = make([]int, 2) + return &m +} + +func newMatchSparse(regex *Regexp, caps map[int]int, capcount int, text []rune, startpos int) *Match { + m := newMatch(regex, capcount, text, startpos) + m.sparseCaps = caps + return m +} + +func (m *Match) reset(text []rune, textstart int) { + m.text = text + m.textstart = textstart + for i := 0; i < len(m.matchcount); i++ { + m.matchcount[i] = 0 + } + m.balancing = false +} + +func (m *Match) tidy(textpos int) { + + interval := m.matches[0] + m.Index = interval[0] + m.Length = interval[1] + m.textpos = textpos + m.capcount = m.matchcount[0] + //copy our root capture to the list + m.Group.Captures = []Capture{m.Group.Capture} + + if m.balancing { + // The idea here is that we want to compact all of our unbalanced captures. To do that we + // use j basically as a count of how many unbalanced captures we have at any given time + // (really j is an index, but j/2 is the count). First we skip past all of the real captures + // until we find a balance captures. Then we check each subsequent entry. If it's a balance + // capture (it's negative), we decrement j. If it's a real capture, we increment j and copy + // it down to the last free position. + for cap := 0; cap < len(m.matchcount); cap++ { + limit := m.matchcount[cap] * 2 + matcharray := m.matches[cap] + + var i, j int + + for i = 0; i < limit; i++ { + if matcharray[i] < 0 { + break + } + } + + for j = i; i < limit; i++ { + if matcharray[i] < 0 { + // skip negative values + j-- + } else { + // but if we find something positive (an actual capture), copy it back to the last + // unbalanced position. + if i != j { + matcharray[j] = matcharray[i] + } + j++ + } + } + + m.matchcount[cap] = j / 2 + } + + m.balancing = false + } +} + +// isMatched tells if a group was matched by capnum +func (m *Match) isMatched(cap int) bool { + return cap < len(m.matchcount) && m.matchcount[cap] > 0 && m.matches[cap][m.matchcount[cap]*2-1] != (-3+1) +} + +// matchIndex returns the index of the last specified matched group by capnum +func (m *Match) matchIndex(cap int) int { + i := m.matches[cap][m.matchcount[cap]*2-2] + if i >= 0 { + return i + } + + return m.matches[cap][-3-i] +} + +// matchLength returns the length of the last specified matched group by capnum +func (m *Match) matchLength(cap int) int { + i := m.matches[cap][m.matchcount[cap]*2-1] + if i >= 0 { + return i + } + + return m.matches[cap][-3-i] +} + +// Nonpublic builder: add a capture to the group specified by "c" +func (m *Match) addMatch(c, start, l int) { + + if m.matches[c] == nil { + m.matches[c] = make([]int, 2) + } + + capcount := m.matchcount[c] + + if capcount*2+2 > len(m.matches[c]) { + oldmatches := m.matches[c] + newmatches := make([]int, capcount*8) + copy(newmatches, oldmatches[:capcount*2]) + m.matches[c] = newmatches + } + + m.matches[c][capcount*2] = start + m.matches[c][capcount*2+1] = l + m.matchcount[c] = capcount + 1 + //log.Printf("addMatch: c=%v, i=%v, l=%v ... matches: %v", c, start, l, m.matches) +} + +// Nonpublic builder: Add a capture to balance the specified group. This is used by the +// balanced match construct. (?...) +// +// If there were no such thing as backtracking, this would be as simple as calling RemoveMatch(c). +// However, since we have backtracking, we need to keep track of everything. +func (m *Match) balanceMatch(c int) { + m.balancing = true + + // we'll look at the last capture first + capcount := m.matchcount[c] + target := capcount*2 - 2 + + // first see if it is negative, and therefore is a reference to the next available + // capture group for balancing. If it is, we'll reset target to point to that capture. + if m.matches[c][target] < 0 { + target = -3 - m.matches[c][target] + } + + // move back to the previous capture + target -= 2 + + // if the previous capture is a reference, just copy that reference to the end. Otherwise, point to it. + if target >= 0 && m.matches[c][target] < 0 { + m.addMatch(c, m.matches[c][target], m.matches[c][target+1]) + } else { + m.addMatch(c, -3-target, -4-target /* == -3 - (target + 1) */) + } +} + +// Nonpublic builder: removes a group match by capnum +func (m *Match) removeMatch(c int) { + m.matchcount[c]-- +} + +// GroupCount returns the number of groups this match has matched +func (m *Match) GroupCount() int { + return len(m.matchcount) +} + +// GroupByName returns a group based on the name of the group, or nil if the group name does not exist +func (m *Match) GroupByName(name string) *Group { + num := m.regex.GroupNumberFromName(name) + if num < 0 { + return nil + } + return m.GroupByNumber(num) +} + +// GroupByNumber returns a group based on the number of the group, or nil if the group number does not exist +func (m *Match) GroupByNumber(num int) *Group { + // check our sparse map + if m.sparseCaps != nil { + if newNum, ok := m.sparseCaps[num]; ok { + num = newNum + } + } + if num >= len(m.matchcount) || num < 0 { + return nil + } + + if num == 0 { + return &m.Group + } + + m.populateOtherGroups() + + return &m.otherGroups[num-1] +} + +// Groups returns all the capture groups, starting with group 0 (the full match) +func (m *Match) Groups() []Group { + m.populateOtherGroups() + g := make([]Group, len(m.otherGroups)+1) + g[0] = m.Group + copy(g[1:], m.otherGroups) + return g +} + +func (m *Match) populateOtherGroups() { + // Construct all the Group objects first time called + if m.otherGroups == nil { + m.otherGroups = make([]Group, len(m.matchcount)-1) + for i := 0; i < len(m.otherGroups); i++ { + m.otherGroups[i] = newGroup(m.regex.GroupNameFromNumber(i+1), m.text, m.matches[i+1], m.matchcount[i+1]) + } + } +} + +func (m *Match) groupValueAppendToBuf(groupnum int, buf *bytes.Buffer) { + c := m.matchcount[groupnum] + if c == 0 { + return + } + + matches := m.matches[groupnum] + + index := matches[(c-1)*2] + last := index + matches[(c*2)-1] + + for ; index < last; index++ { + buf.WriteRune(m.text[index]) + } +} + +func newGroup(name string, text []rune, caps []int, capcount int) Group { + g := Group{} + g.text = text + if capcount > 0 { + g.Index = caps[(capcount-1)*2] + g.Length = caps[(capcount*2)-1] + } + g.Name = name + g.Captures = make([]Capture, capcount) + for i := 0; i < capcount; i++ { + g.Captures[i] = Capture{ + text: text, + Index: caps[i*2], + Length: caps[i*2+1], + } + } + //log.Printf("newGroup! capcount %v, %+v", capcount, g) + + return g +} + +func (m *Match) dump() string { + buf := &bytes.Buffer{} + buf.WriteRune('\n') + if len(m.sparseCaps) > 0 { + for k, v := range m.sparseCaps { + fmt.Fprintf(buf, "Slot %v -> %v\n", k, v) + } + } + + for i, g := range m.Groups() { + fmt.Fprintf(buf, "Group %v (%v), %v caps:\n", i, g.Name, len(g.Captures)) + + for _, c := range g.Captures { + fmt.Fprintf(buf, " (%v, %v) %v\n", c.Index, c.Length, c.String()) + } + } + /* + for i := 0; i < len(m.matchcount); i++ { + fmt.Fprintf(buf, "\nGroup %v (%v):\n", i, m.regex.GroupNameFromNumber(i)) + + for j := 0; j < m.matchcount[i]; j++ { + text := "" + + if m.matches[i][j*2] >= 0 { + start := m.matches[i][j*2] + text = m.text[start : start+m.matches[i][j*2+1]] + } + + fmt.Fprintf(buf, " (%v, %v) %v\n", m.matches[i][j*2], m.matches[i][j*2+1], text) + } + } + */ + return buf.String() +} diff --git a/vendor/github.com/dlclark/regexp2/regexp.go b/vendor/github.com/dlclark/regexp2/regexp.go new file mode 100644 index 0000000000..7c7b01d879 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/regexp.go @@ -0,0 +1,355 @@ +/* +Package regexp2 is a regexp package that has an interface similar to Go's framework regexp engine but uses a +more feature full regex engine behind the scenes. + +It doesn't have constant time guarantees, but it allows backtracking and is compatible with Perl5 and .NET. +You'll likely be better off with the RE2 engine from the regexp package and should only use this if you +need to write very complex patterns or require compatibility with .NET. +*/ +package regexp2 + +import ( + "errors" + "math" + "strconv" + "sync" + "time" + + "github.com/dlclark/regexp2/syntax" +) + +// Default timeout used when running regexp matches -- "forever" +var DefaultMatchTimeout = time.Duration(math.MaxInt64) + +// Regexp is the representation of a compiled regular expression. +// A Regexp is safe for concurrent use by multiple goroutines. +type Regexp struct { + //timeout when trying to find matches + MatchTimeout time.Duration + + // read-only after Compile + pattern string // as passed to Compile + options RegexOptions // options + + caps map[int]int // capnum->index + capnames map[string]int //capture group name -> index + capslist []string //sorted list of capture group names + capsize int // size of the capture array + + code *syntax.Code // compiled program + + // cache of machines for running regexp + muRun sync.Mutex + runner []*runner +} + +// Compile parses a regular expression and returns, if successful, +// a Regexp object that can be used to match against text. +func Compile(expr string, opt RegexOptions) (*Regexp, error) { + // parse it + tree, err := syntax.Parse(expr, syntax.RegexOptions(opt)) + if err != nil { + return nil, err + } + + // translate it to code + code, err := syntax.Write(tree) + if err != nil { + return nil, err + } + + // return it + return &Regexp{ + pattern: expr, + options: opt, + caps: code.Caps, + capnames: tree.Capnames, + capslist: tree.Caplist, + capsize: code.Capsize, + code: code, + MatchTimeout: DefaultMatchTimeout, + }, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled regular +// expressions. +func MustCompile(str string, opt RegexOptions) *Regexp { + regexp, error := Compile(str, opt) + if error != nil { + panic(`regexp2: Compile(` + quote(str) + `): ` + error.Error()) + } + return regexp +} + +// Escape adds backslashes to any special characters in the input string +func Escape(input string) string { + return syntax.Escape(input) +} + +// Unescape removes any backslashes from previously-escaped special characters in the input string +func Unescape(input string) (string, error) { + return syntax.Unescape(input) +} + +// String returns the source text used to compile the regular expression. +func (re *Regexp) String() string { + return re.pattern +} + +func quote(s string) string { + if strconv.CanBackquote(s) { + return "`" + s + "`" + } + return strconv.Quote(s) +} + +// RegexOptions impact the runtime and parsing behavior +// for each specific regex. They are setable in code as well +// as in the regex pattern itself. +type RegexOptions int32 + +const ( + None RegexOptions = 0x0 + IgnoreCase = 0x0001 // "i" + Multiline = 0x0002 // "m" + ExplicitCapture = 0x0004 // "n" + Compiled = 0x0008 // "c" + Singleline = 0x0010 // "s" + IgnorePatternWhitespace = 0x0020 // "x" + RightToLeft = 0x0040 // "r" + Debug = 0x0080 // "d" + ECMAScript = 0x0100 // "e" + RE2 = 0x0200 // RE2 (regexp package) compatibility mode +) + +func (re *Regexp) RightToLeft() bool { + return re.options&RightToLeft != 0 +} + +func (re *Regexp) Debug() bool { + return re.options&Debug != 0 +} + +// Replace searches the input string and replaces each match found with the replacement text. +// Count will limit the number of matches attempted and startAt will allow +// us to skip past possible matches at the start of the input (left or right depending on RightToLeft option). +// Set startAt and count to -1 to go through the whole string +func (re *Regexp) Replace(input, replacement string, startAt, count int) (string, error) { + data, err := syntax.NewReplacerData(replacement, re.caps, re.capsize, re.capnames, syntax.RegexOptions(re.options)) + if err != nil { + return "", err + } + //TODO: cache ReplacerData + + return replace(re, data, nil, input, startAt, count) +} + +// ReplaceFunc searches the input string and replaces each match found using the string from the evaluator +// Count will limit the number of matches attempted and startAt will allow +// us to skip past possible matches at the start of the input (left or right depending on RightToLeft option). +// Set startAt and count to -1 to go through the whole string. +func (re *Regexp) ReplaceFunc(input string, evaluator MatchEvaluator, startAt, count int) (string, error) { + return replace(re, nil, evaluator, input, startAt, count) +} + +// FindStringMatch searches the input string for a Regexp match +func (re *Regexp) FindStringMatch(s string) (*Match, error) { + // convert string to runes + return re.run(false, -1, getRunes(s)) +} + +// FindRunesMatch searches the input rune slice for a Regexp match +func (re *Regexp) FindRunesMatch(r []rune) (*Match, error) { + return re.run(false, -1, r) +} + +// FindStringMatchStartingAt searches the input string for a Regexp match starting at the startAt index +func (re *Regexp) FindStringMatchStartingAt(s string, startAt int) (*Match, error) { + if startAt > len(s) { + return nil, errors.New("startAt must be less than the length of the input string") + } + r, startAt := re.getRunesAndStart(s, startAt) + if startAt == -1 { + // we didn't find our start index in the string -- that's a problem + return nil, errors.New("startAt must align to the start of a valid rune in the input string") + } + + return re.run(false, startAt, r) +} + +// FindRunesMatchStartingAt searches the input rune slice for a Regexp match starting at the startAt index +func (re *Regexp) FindRunesMatchStartingAt(r []rune, startAt int) (*Match, error) { + return re.run(false, startAt, r) +} + +// FindNextMatch returns the next match in the same input string as the match parameter. +// Will return nil if there is no next match or if given a nil match. +func (re *Regexp) FindNextMatch(m *Match) (*Match, error) { + if m == nil { + return nil, nil + } + + // If previous match was empty, advance by one before matching to prevent + // infinite loop + startAt := m.textpos + if m.Length == 0 { + if m.textpos == len(m.text) { + return nil, nil + } + + if re.RightToLeft() { + startAt-- + } else { + startAt++ + } + } + return re.run(false, startAt, m.text) +} + +// MatchString return true if the string matches the regex +// error will be set if a timeout occurs +func (re *Regexp) MatchString(s string) (bool, error) { + m, err := re.run(true, -1, getRunes(s)) + if err != nil { + return false, err + } + return m != nil, nil +} + +func (re *Regexp) getRunesAndStart(s string, startAt int) ([]rune, int) { + if startAt < 0 { + if re.RightToLeft() { + r := getRunes(s) + return r, len(r) + } + return getRunes(s), 0 + } + ret := make([]rune, len(s)) + i := 0 + runeIdx := -1 + for strIdx, r := range s { + if strIdx == startAt { + runeIdx = i + } + ret[i] = r + i++ + } + if startAt == len(s) { + runeIdx = i + } + return ret[:i], runeIdx +} + +func getRunes(s string) []rune { + return []rune(s) +} + +// MatchRunes return true if the runes matches the regex +// error will be set if a timeout occurs +func (re *Regexp) MatchRunes(r []rune) (bool, error) { + m, err := re.run(true, -1, r) + if err != nil { + return false, err + } + return m != nil, nil +} + +// GetGroupNames Returns the set of strings used to name capturing groups in the expression. +func (re *Regexp) GetGroupNames() []string { + var result []string + + if re.capslist == nil { + result = make([]string, re.capsize) + + for i := 0; i < len(result); i++ { + result[i] = strconv.Itoa(i) + } + } else { + result = make([]string, len(re.capslist)) + copy(result, re.capslist) + } + + return result +} + +// GetGroupNumbers returns the integer group numbers corresponding to a group name. +func (re *Regexp) GetGroupNumbers() []int { + var result []int + + if re.caps == nil { + result = make([]int, re.capsize) + + for i := 0; i < len(result); i++ { + result[i] = i + } + } else { + result = make([]int, len(re.caps)) + + for k, v := range re.caps { + result[v] = k + } + } + + return result +} + +// GroupNameFromNumber retrieves a group name that corresponds to a group number. +// It will return "" for and unknown group number. Unnamed groups automatically +// receive a name that is the decimal string equivalent of its number. +func (re *Regexp) GroupNameFromNumber(i int) string { + if re.capslist == nil { + if i >= 0 && i < re.capsize { + return strconv.Itoa(i) + } + + return "" + } + + if re.caps != nil { + var ok bool + if i, ok = re.caps[i]; !ok { + return "" + } + } + + if i >= 0 && i < len(re.capslist) { + return re.capslist[i] + } + + return "" +} + +// GroupNumberFromName returns a group number that corresponds to a group name. +// Returns -1 if the name is not a recognized group name. Numbered groups +// automatically get a group name that is the decimal string equivalent of its number. +func (re *Regexp) GroupNumberFromName(name string) int { + // look up name if we have a hashtable of names + if re.capnames != nil { + if k, ok := re.capnames[name]; ok { + return k + } + + return -1 + } + + // convert to an int if it looks like a number + result := 0 + for i := 0; i < len(name); i++ { + ch := name[i] + + if ch > '9' || ch < '0' { + return -1 + } + + result *= 10 + result += int(ch - '0') + } + + // return int if it's in range + if result >= 0 && result < re.capsize { + return result + } + + return -1 +} diff --git a/vendor/github.com/dlclark/regexp2/replace.go b/vendor/github.com/dlclark/regexp2/replace.go new file mode 100644 index 0000000000..0376bd9d37 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/replace.go @@ -0,0 +1,177 @@ +package regexp2 + +import ( + "bytes" + "errors" + + "github.com/dlclark/regexp2/syntax" +) + +const ( + replaceSpecials = 4 + replaceLeftPortion = -1 + replaceRightPortion = -2 + replaceLastGroup = -3 + replaceWholeString = -4 +) + +// MatchEvaluator is a function that takes a match and returns a replacement string to be used +type MatchEvaluator func(Match) string + +// Three very similar algorithms appear below: replace (pattern), +// replace (evaluator), and split. + +// Replace Replaces all occurrences of the regex in the string with the +// replacement pattern. +// +// Note that the special case of no matches is handled on its own: +// with no matches, the input string is returned unchanged. +// The right-to-left case is split out because StringBuilder +// doesn't handle right-to-left string building directly very well. +func replace(regex *Regexp, data *syntax.ReplacerData, evaluator MatchEvaluator, input string, startAt, count int) (string, error) { + if count < -1 { + return "", errors.New("Count too small") + } + if count == 0 { + return "", nil + } + + m, err := regex.FindStringMatchStartingAt(input, startAt) + + if err != nil { + return "", err + } + if m == nil { + return input, nil + } + + buf := &bytes.Buffer{} + text := m.text + + if !regex.RightToLeft() { + prevat := 0 + for m != nil { + if m.Index != prevat { + buf.WriteString(string(text[prevat:m.Index])) + } + prevat = m.Index + m.Length + if evaluator == nil { + replacementImpl(data, buf, m) + } else { + buf.WriteString(evaluator(*m)) + } + + count-- + if count == 0 { + break + } + m, err = regex.FindNextMatch(m) + if err != nil { + return "", nil + } + } + + if prevat < len(text) { + buf.WriteString(string(text[prevat:])) + } + } else { + prevat := len(text) + var al []string + + for m != nil { + if m.Index+m.Length != prevat { + al = append(al, string(text[m.Index+m.Length:prevat])) + } + prevat = m.Index + if evaluator == nil { + replacementImplRTL(data, &al, m) + } else { + al = append(al, evaluator(*m)) + } + + count-- + if count == 0 { + break + } + m, err = regex.FindNextMatch(m) + if err != nil { + return "", nil + } + } + + if prevat > 0 { + buf.WriteString(string(text[:prevat])) + } + + for i := len(al) - 1; i >= 0; i-- { + buf.WriteString(al[i]) + } + } + + return buf.String(), nil +} + +// Given a Match, emits into the StringBuilder the evaluated +// substitution pattern. +func replacementImpl(data *syntax.ReplacerData, buf *bytes.Buffer, m *Match) { + for _, r := range data.Rules { + + if r >= 0 { // string lookup + buf.WriteString(data.Strings[r]) + } else if r < -replaceSpecials { // group lookup + m.groupValueAppendToBuf(-replaceSpecials-1-r, buf) + } else { + switch -replaceSpecials - 1 - r { // special insertion patterns + case replaceLeftPortion: + for i := 0; i < m.Index; i++ { + buf.WriteRune(m.text[i]) + } + case replaceRightPortion: + for i := m.Index + m.Length; i < len(m.text); i++ { + buf.WriteRune(m.text[i]) + } + case replaceLastGroup: + m.groupValueAppendToBuf(m.GroupCount()-1, buf) + case replaceWholeString: + for i := 0; i < len(m.text); i++ { + buf.WriteRune(m.text[i]) + } + } + } + } +} + +func replacementImplRTL(data *syntax.ReplacerData, al *[]string, m *Match) { + l := *al + buf := &bytes.Buffer{} + + for _, r := range data.Rules { + buf.Reset() + if r >= 0 { // string lookup + l = append(l, data.Strings[r]) + } else if r < -replaceSpecials { // group lookup + m.groupValueAppendToBuf(-replaceSpecials-1-r, buf) + l = append(l, buf.String()) + } else { + switch -replaceSpecials - 1 - r { // special insertion patterns + case replaceLeftPortion: + for i := 0; i < m.Index; i++ { + buf.WriteRune(m.text[i]) + } + case replaceRightPortion: + for i := m.Index + m.Length; i < len(m.text); i++ { + buf.WriteRune(m.text[i]) + } + case replaceLastGroup: + m.groupValueAppendToBuf(m.GroupCount()-1, buf) + case replaceWholeString: + for i := 0; i < len(m.text); i++ { + buf.WriteRune(m.text[i]) + } + } + l = append(l, buf.String()) + } + } + + *al = l +} diff --git a/vendor/github.com/dlclark/regexp2/runner.go b/vendor/github.com/dlclark/regexp2/runner.go new file mode 100644 index 0000000000..4d7f9b0611 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/runner.go @@ -0,0 +1,1634 @@ +package regexp2 + +import ( + "bytes" + "errors" + "fmt" + "math" + "strconv" + "strings" + "time" + "unicode" + + "github.com/dlclark/regexp2/syntax" +) + +type runner struct { + re *Regexp + code *syntax.Code + + runtextstart int // starting point for search + + runtext []rune // text to search + runtextpos int // current position in text + runtextend int + + // The backtracking stack. Opcodes use this to store data regarding + // what they have matched and where to backtrack to. Each "frame" on + // the stack takes the form of [CodePosition Data1 Data2...], where + // CodePosition is the position of the current opcode and + // the data values are all optional. The CodePosition can be negative, and + // these values (also called "back2") are used by the BranchMark family of opcodes + // to indicate whether they are backtracking after a successful or failed + // match. + // When we backtrack, we pop the CodePosition off the stack, set the current + // instruction pointer to that code position, and mark the opcode + // with a backtracking flag ("Back"). Each opcode then knows how to + // handle its own data. + runtrack []int + runtrackpos int + + // This stack is used to track text positions across different opcodes. + // For example, in /(a*b)+/, the parentheses result in a SetMark/CaptureMark + // pair. SetMark records the text position before we match a*b. Then + // CaptureMark uses that position to figure out where the capture starts. + // Opcodes which push onto this stack are always paired with other opcodes + // which will pop the value from it later. A successful match should mean + // that this stack is empty. + runstack []int + runstackpos int + + // The crawl stack is used to keep track of captures. Every time a group + // has a capture, we push its group number onto the runcrawl stack. In + // the case of a balanced match, we push BOTH groups onto the stack. + runcrawl []int + runcrawlpos int + + runtrackcount int // count of states that may do backtracking + + runmatch *Match // result object + + ignoreTimeout bool + timeout time.Duration // timeout in milliseconds (needed for actual) + timeoutChecksToSkip int + timeoutAt time.Time + + operator syntax.InstOp + codepos int + rightToLeft bool + caseInsensitive bool +} + +// run searches for matches and can continue from the previous match +// +// quick is usually false, but can be true to not return matches, just put it in caches +// textstart is -1 to start at the "beginning" (depending on Right-To-Left), otherwise an index in input +// input is the string to search for our regex pattern +func (re *Regexp) run(quick bool, textstart int, input []rune) (*Match, error) { + + // get a cached runner + runner := re.getRunner() + defer re.putRunner(runner) + + if textstart < 0 { + if re.RightToLeft() { + textstart = len(input) + } else { + textstart = 0 + } + } + + return runner.scan(input, textstart, quick, re.MatchTimeout) +} + +// Scans the string to find the first match. Uses the Match object +// both to feed text in and as a place to store matches that come out. +// +// All the action is in the Go() method. Our +// responsibility is to load up the class members before +// calling Go. +// +// The optimizer can compute a set of candidate starting characters, +// and we could use a separate method Skip() that will quickly scan past +// any characters that we know can't match. +func (r *runner) scan(rt []rune, textstart int, quick bool, timeout time.Duration) (*Match, error) { + r.timeout = timeout + r.ignoreTimeout = (time.Duration(math.MaxInt64) == timeout) + r.runtextstart = textstart + r.runtext = rt + r.runtextend = len(rt) + + stoppos := r.runtextend + bump := 1 + + if r.re.RightToLeft() { + bump = -1 + stoppos = 0 + } + + r.runtextpos = textstart + initted := false + + r.startTimeoutWatch() + for { + if r.re.Debug() { + //fmt.Printf("\nSearch content: %v\n", string(r.runtext)) + fmt.Printf("\nSearch range: from 0 to %v\n", r.runtextend) + fmt.Printf("Firstchar search starting at %v stopping at %v\n", r.runtextpos, stoppos) + } + + if r.findFirstChar() { + if err := r.checkTimeout(); err != nil { + return nil, err + } + + if !initted { + r.initMatch() + initted = true + } + + if r.re.Debug() { + fmt.Printf("Executing engine starting at %v\n\n", r.runtextpos) + } + + if err := r.execute(); err != nil { + return nil, err + } + + if r.runmatch.matchcount[0] > 0 { + // We'll return a match even if it touches a previous empty match + return r.tidyMatch(quick), nil + } + + // reset state for another go + r.runtrackpos = len(r.runtrack) + r.runstackpos = len(r.runstack) + r.runcrawlpos = len(r.runcrawl) + } + + // failure! + + if r.runtextpos == stoppos { + r.tidyMatch(true) + return nil, nil + } + + // Recognize leading []* and various anchors, and bump on failure accordingly + + // r.bump by one and start again + + r.runtextpos += bump + } + // We never get here +} + +func (r *runner) execute() error { + + r.goTo(0) + + for { + + if r.re.Debug() { + r.dumpState() + } + + if err := r.checkTimeout(); err != nil { + return err + } + + switch r.operator { + case syntax.Stop: + return nil + + case syntax.Nothing: + break + + case syntax.Goto: + r.goTo(r.operand(0)) + continue + + case syntax.Testref: + if !r.runmatch.isMatched(r.operand(0)) { + break + } + r.advance(1) + continue + + case syntax.Lazybranch: + r.trackPush1(r.textPos()) + r.advance(1) + continue + + case syntax.Lazybranch | syntax.Back: + r.trackPop() + r.textto(r.trackPeek()) + r.goTo(r.operand(0)) + continue + + case syntax.Setmark: + r.stackPush(r.textPos()) + r.trackPush() + r.advance(0) + continue + + case syntax.Nullmark: + r.stackPush(-1) + r.trackPush() + r.advance(0) + continue + + case syntax.Setmark | syntax.Back, syntax.Nullmark | syntax.Back: + r.stackPop() + break + + case syntax.Getmark: + r.stackPop() + r.trackPush1(r.stackPeek()) + r.textto(r.stackPeek()) + r.advance(0) + continue + + case syntax.Getmark | syntax.Back: + r.trackPop() + r.stackPush(r.trackPeek()) + break + + case syntax.Capturemark: + if r.operand(1) != -1 && !r.runmatch.isMatched(r.operand(1)) { + break + } + r.stackPop() + if r.operand(1) != -1 { + r.transferCapture(r.operand(0), r.operand(1), r.stackPeek(), r.textPos()) + } else { + r.capture(r.operand(0), r.stackPeek(), r.textPos()) + } + r.trackPush1(r.stackPeek()) + + r.advance(2) + + continue + + case syntax.Capturemark | syntax.Back: + r.trackPop() + r.stackPush(r.trackPeek()) + r.uncapture() + if r.operand(0) != -1 && r.operand(1) != -1 { + r.uncapture() + } + + break + + case syntax.Branchmark: + r.stackPop() + + matched := r.textPos() - r.stackPeek() + + if matched != 0 { // Nonempty match -> loop now + r.trackPush2(r.stackPeek(), r.textPos()) // Save old mark, textpos + r.stackPush(r.textPos()) // Make new mark + r.goTo(r.operand(0)) // Loop + } else { // Empty match -> straight now + r.trackPushNeg1(r.stackPeek()) // Save old mark + r.advance(1) // Straight + } + continue + + case syntax.Branchmark | syntax.Back: + r.trackPopN(2) + r.stackPop() + r.textto(r.trackPeekN(1)) // Recall position + r.trackPushNeg1(r.trackPeek()) // Save old mark + r.advance(1) // Straight + continue + + case syntax.Branchmark | syntax.Back2: + r.trackPop() + r.stackPush(r.trackPeek()) // Recall old mark + break // Backtrack + + case syntax.Lazybranchmark: + { + // We hit this the first time through a lazy loop and after each + // successful match of the inner expression. It simply continues + // on and doesn't loop. + r.stackPop() + + oldMarkPos := r.stackPeek() + + if r.textPos() != oldMarkPos { // Nonempty match -> try to loop again by going to 'back' state + if oldMarkPos != -1 { + r.trackPush2(oldMarkPos, r.textPos()) // Save old mark, textpos + } else { + r.trackPush2(r.textPos(), r.textPos()) + } + } else { + // The inner expression found an empty match, so we'll go directly to 'back2' if we + // backtrack. In this case, we need to push something on the stack, since back2 pops. + // However, in the case of ()+? or similar, this empty match may be legitimate, so push the text + // position associated with that empty match. + r.stackPush(oldMarkPos) + + r.trackPushNeg1(r.stackPeek()) // Save old mark + } + r.advance(1) + continue + } + + case syntax.Lazybranchmark | syntax.Back: + + // After the first time, Lazybranchmark | syntax.Back occurs + // with each iteration of the loop, and therefore with every attempted + // match of the inner expression. We'll try to match the inner expression, + // then go back to Lazybranchmark if successful. If the inner expression + // fails, we go to Lazybranchmark | syntax.Back2 + + r.trackPopN(2) + pos := r.trackPeekN(1) + r.trackPushNeg1(r.trackPeek()) // Save old mark + r.stackPush(pos) // Make new mark + r.textto(pos) // Recall position + r.goTo(r.operand(0)) // Loop + continue + + case syntax.Lazybranchmark | syntax.Back2: + // The lazy loop has failed. We'll do a true backtrack and + // start over before the lazy loop. + r.stackPop() + r.trackPop() + r.stackPush(r.trackPeek()) // Recall old mark + break + + case syntax.Setcount: + r.stackPush2(r.textPos(), r.operand(0)) + r.trackPush() + r.advance(1) + continue + + case syntax.Nullcount: + r.stackPush2(-1, r.operand(0)) + r.trackPush() + r.advance(1) + continue + + case syntax.Setcount | syntax.Back: + r.stackPopN(2) + break + + case syntax.Nullcount | syntax.Back: + r.stackPopN(2) + break + + case syntax.Branchcount: + // r.stackPush: + // 0: Mark + // 1: Count + + r.stackPopN(2) + mark := r.stackPeek() + count := r.stackPeekN(1) + matched := r.textPos() - mark + + if count >= r.operand(1) || (matched == 0 && count >= 0) { // Max loops or empty match -> straight now + r.trackPushNeg2(mark, count) // Save old mark, count + r.advance(2) // Straight + } else { // Nonempty match -> count+loop now + r.trackPush1(mark) // remember mark + r.stackPush2(r.textPos(), count+1) // Make new mark, incr count + r.goTo(r.operand(0)) // Loop + } + continue + + case syntax.Branchcount | syntax.Back: + // r.trackPush: + // 0: Previous mark + // r.stackPush: + // 0: Mark (= current pos, discarded) + // 1: Count + r.trackPop() + r.stackPopN(2) + if r.stackPeekN(1) > 0 { // Positive -> can go straight + r.textto(r.stackPeek()) // Zap to mark + r.trackPushNeg2(r.trackPeek(), r.stackPeekN(1)-1) // Save old mark, old count + r.advance(2) // Straight + continue + } + r.stackPush2(r.trackPeek(), r.stackPeekN(1)-1) // recall old mark, old count + break + + case syntax.Branchcount | syntax.Back2: + // r.trackPush: + // 0: Previous mark + // 1: Previous count + r.trackPopN(2) + r.stackPush2(r.trackPeek(), r.trackPeekN(1)) // Recall old mark, old count + break // Backtrack + + case syntax.Lazybranchcount: + // r.stackPush: + // 0: Mark + // 1: Count + + r.stackPopN(2) + mark := r.stackPeek() + count := r.stackPeekN(1) + + if count < 0 { // Negative count -> loop now + r.trackPushNeg1(mark) // Save old mark + r.stackPush2(r.textPos(), count+1) // Make new mark, incr count + r.goTo(r.operand(0)) // Loop + } else { // Nonneg count -> straight now + r.trackPush3(mark, count, r.textPos()) // Save mark, count, position + r.advance(2) // Straight + } + continue + + case syntax.Lazybranchcount | syntax.Back: + // r.trackPush: + // 0: Mark + // 1: Count + // 2: r.textPos + + r.trackPopN(3) + mark := r.trackPeek() + textpos := r.trackPeekN(2) + + if r.trackPeekN(1) < r.operand(1) && textpos != mark { // Under limit and not empty match -> loop + r.textto(textpos) // Recall position + r.stackPush2(textpos, r.trackPeekN(1)+1) // Make new mark, incr count + r.trackPushNeg1(mark) // Save old mark + r.goTo(r.operand(0)) // Loop + continue + } else { // Max loops or empty match -> backtrack + r.stackPush2(r.trackPeek(), r.trackPeekN(1)) // Recall old mark, count + break // backtrack + } + + case syntax.Lazybranchcount | syntax.Back2: + // r.trackPush: + // 0: Previous mark + // r.stackPush: + // 0: Mark (== current pos, discarded) + // 1: Count + r.trackPop() + r.stackPopN(2) + r.stackPush2(r.trackPeek(), r.stackPeekN(1)-1) // Recall old mark, count + break // Backtrack + + case syntax.Setjump: + r.stackPush2(r.trackpos(), r.crawlpos()) + r.trackPush() + r.advance(0) + continue + + case syntax.Setjump | syntax.Back: + r.stackPopN(2) + break + + case syntax.Backjump: + // r.stackPush: + // 0: Saved trackpos + // 1: r.crawlpos + r.stackPopN(2) + r.trackto(r.stackPeek()) + + for r.crawlpos() != r.stackPeekN(1) { + r.uncapture() + } + + break + + case syntax.Forejump: + // r.stackPush: + // 0: Saved trackpos + // 1: r.crawlpos + r.stackPopN(2) + r.trackto(r.stackPeek()) + r.trackPush1(r.stackPeekN(1)) + r.advance(0) + continue + + case syntax.Forejump | syntax.Back: + // r.trackPush: + // 0: r.crawlpos + r.trackPop() + + for r.crawlpos() != r.trackPeek() { + r.uncapture() + } + + break + + case syntax.Bol: + if r.leftchars() > 0 && r.charAt(r.textPos()-1) != '\n' { + break + } + r.advance(0) + continue + + case syntax.Eol: + if r.rightchars() > 0 && r.charAt(r.textPos()) != '\n' { + break + } + r.advance(0) + continue + + case syntax.Boundary: + if !r.isBoundary(r.textPos(), 0, r.runtextend) { + break + } + r.advance(0) + continue + + case syntax.Nonboundary: + if r.isBoundary(r.textPos(), 0, r.runtextend) { + break + } + r.advance(0) + continue + + case syntax.ECMABoundary: + if !r.isECMABoundary(r.textPos(), 0, r.runtextend) { + break + } + r.advance(0) + continue + + case syntax.NonECMABoundary: + if r.isECMABoundary(r.textPos(), 0, r.runtextend) { + break + } + r.advance(0) + continue + + case syntax.Beginning: + if r.leftchars() > 0 { + break + } + r.advance(0) + continue + + case syntax.Start: + if r.textPos() != r.textstart() { + break + } + r.advance(0) + continue + + case syntax.EndZ: + rchars := r.rightchars() + if rchars > 1 { + break + } + // RE2 and EcmaScript define $ as "asserts position at the end of the string" + // PCRE/.NET adds "or before the line terminator right at the end of the string (if any)" + if (r.re.options & (RE2 | ECMAScript)) != 0 { + // RE2/Ecmascript mode + if rchars > 0 { + break + } + } else if rchars == 1 && r.charAt(r.textPos()) != '\n' { + // "regular" mode + break + } + + r.advance(0) + continue + + case syntax.End: + if r.rightchars() > 0 { + break + } + r.advance(0) + continue + + case syntax.One: + if r.forwardchars() < 1 || r.forwardcharnext() != rune(r.operand(0)) { + break + } + + r.advance(1) + continue + + case syntax.Notone: + if r.forwardchars() < 1 || r.forwardcharnext() == rune(r.operand(0)) { + break + } + + r.advance(1) + continue + + case syntax.Set: + + if r.forwardchars() < 1 || !r.code.Sets[r.operand(0)].CharIn(r.forwardcharnext()) { + break + } + + r.advance(1) + continue + + case syntax.Multi: + if !r.runematch(r.code.Strings[r.operand(0)]) { + break + } + + r.advance(1) + continue + + case syntax.Ref: + + capnum := r.operand(0) + + if r.runmatch.isMatched(capnum) { + if !r.refmatch(r.runmatch.matchIndex(capnum), r.runmatch.matchLength(capnum)) { + break + } + } else { + if (r.re.options & ECMAScript) == 0 { + break + } + } + + r.advance(1) + continue + + case syntax.Onerep: + + c := r.operand(1) + + if r.forwardchars() < c { + break + } + + ch := rune(r.operand(0)) + + for c > 0 { + if r.forwardcharnext() != ch { + goto BreakBackward + } + c-- + } + + r.advance(2) + continue + + case syntax.Notonerep: + + c := r.operand(1) + + if r.forwardchars() < c { + break + } + ch := rune(r.operand(0)) + + for c > 0 { + if r.forwardcharnext() == ch { + goto BreakBackward + } + c-- + } + + r.advance(2) + continue + + case syntax.Setrep: + + c := r.operand(1) + + if r.forwardchars() < c { + break + } + + set := r.code.Sets[r.operand(0)] + + for c > 0 { + if !set.CharIn(r.forwardcharnext()) { + goto BreakBackward + } + c-- + } + + r.advance(2) + continue + + case syntax.Oneloop: + + c := r.operand(1) + + if c > r.forwardchars() { + c = r.forwardchars() + } + + ch := rune(r.operand(0)) + i := c + + for ; i > 0; i-- { + if r.forwardcharnext() != ch { + r.backwardnext() + break + } + } + + if c > i { + r.trackPush2(c-i-1, r.textPos()-r.bump()) + } + + r.advance(2) + continue + + case syntax.Notoneloop: + + c := r.operand(1) + + if c > r.forwardchars() { + c = r.forwardchars() + } + + ch := rune(r.operand(0)) + i := c + + for ; i > 0; i-- { + if r.forwardcharnext() == ch { + r.backwardnext() + break + } + } + + if c > i { + r.trackPush2(c-i-1, r.textPos()-r.bump()) + } + + r.advance(2) + continue + + case syntax.Setloop: + + c := r.operand(1) + + if c > r.forwardchars() { + c = r.forwardchars() + } + + set := r.code.Sets[r.operand(0)] + i := c + + for ; i > 0; i-- { + if !set.CharIn(r.forwardcharnext()) { + r.backwardnext() + break + } + } + + if c > i { + r.trackPush2(c-i-1, r.textPos()-r.bump()) + } + + r.advance(2) + continue + + case syntax.Oneloop | syntax.Back, syntax.Notoneloop | syntax.Back: + + r.trackPopN(2) + i := r.trackPeek() + pos := r.trackPeekN(1) + + r.textto(pos) + + if i > 0 { + r.trackPush2(i-1, pos-r.bump()) + } + + r.advance(2) + continue + + case syntax.Setloop | syntax.Back: + + r.trackPopN(2) + i := r.trackPeek() + pos := r.trackPeekN(1) + + r.textto(pos) + + if i > 0 { + r.trackPush2(i-1, pos-r.bump()) + } + + r.advance(2) + continue + + case syntax.Onelazy, syntax.Notonelazy: + + c := r.operand(1) + + if c > r.forwardchars() { + c = r.forwardchars() + } + + if c > 0 { + r.trackPush2(c-1, r.textPos()) + } + + r.advance(2) + continue + + case syntax.Setlazy: + + c := r.operand(1) + + if c > r.forwardchars() { + c = r.forwardchars() + } + + if c > 0 { + r.trackPush2(c-1, r.textPos()) + } + + r.advance(2) + continue + + case syntax.Onelazy | syntax.Back: + + r.trackPopN(2) + pos := r.trackPeekN(1) + r.textto(pos) + + if r.forwardcharnext() != rune(r.operand(0)) { + break + } + + i := r.trackPeek() + + if i > 0 { + r.trackPush2(i-1, pos+r.bump()) + } + + r.advance(2) + continue + + case syntax.Notonelazy | syntax.Back: + + r.trackPopN(2) + pos := r.trackPeekN(1) + r.textto(pos) + + if r.forwardcharnext() == rune(r.operand(0)) { + break + } + + i := r.trackPeek() + + if i > 0 { + r.trackPush2(i-1, pos+r.bump()) + } + + r.advance(2) + continue + + case syntax.Setlazy | syntax.Back: + + r.trackPopN(2) + pos := r.trackPeekN(1) + r.textto(pos) + + if !r.code.Sets[r.operand(0)].CharIn(r.forwardcharnext()) { + break + } + + i := r.trackPeek() + + if i > 0 { + r.trackPush2(i-1, pos+r.bump()) + } + + r.advance(2) + continue + + default: + return errors.New("unknown state in regex runner") + } + + BreakBackward: + ; + + // "break Backward" comes here: + r.backtrack() + } +} + +// increase the size of stack and track storage +func (r *runner) ensureStorage() { + if r.runstackpos < r.runtrackcount*4 { + doubleIntSlice(&r.runstack, &r.runstackpos) + } + if r.runtrackpos < r.runtrackcount*4 { + doubleIntSlice(&r.runtrack, &r.runtrackpos) + } +} + +func doubleIntSlice(s *[]int, pos *int) { + oldLen := len(*s) + newS := make([]int, oldLen*2) + + copy(newS[oldLen:], *s) + *pos += oldLen + *s = newS +} + +// Save a number on the longjump unrolling stack +func (r *runner) crawl(i int) { + if r.runcrawlpos == 0 { + doubleIntSlice(&r.runcrawl, &r.runcrawlpos) + } + r.runcrawlpos-- + r.runcrawl[r.runcrawlpos] = i +} + +// Remove a number from the longjump unrolling stack +func (r *runner) popcrawl() int { + val := r.runcrawl[r.runcrawlpos] + r.runcrawlpos++ + return val +} + +// Get the height of the stack +func (r *runner) crawlpos() int { + return len(r.runcrawl) - r.runcrawlpos +} + +func (r *runner) advance(i int) { + r.codepos += (i + 1) + r.setOperator(r.code.Codes[r.codepos]) +} + +func (r *runner) goTo(newpos int) { + // when branching backward or in place, ensure storage + if newpos <= r.codepos { + r.ensureStorage() + } + + r.setOperator(r.code.Codes[newpos]) + r.codepos = newpos +} + +func (r *runner) textto(newpos int) { + r.runtextpos = newpos +} + +func (r *runner) trackto(newpos int) { + r.runtrackpos = len(r.runtrack) - newpos +} + +func (r *runner) textstart() int { + return r.runtextstart +} + +func (r *runner) textPos() int { + return r.runtextpos +} + +// push onto the backtracking stack +func (r *runner) trackpos() int { + return len(r.runtrack) - r.runtrackpos +} + +func (r *runner) trackPush() { + r.runtrackpos-- + r.runtrack[r.runtrackpos] = r.codepos +} + +func (r *runner) trackPush1(I1 int) { + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I1 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = r.codepos +} + +func (r *runner) trackPush2(I1, I2 int) { + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I1 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I2 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = r.codepos +} + +func (r *runner) trackPush3(I1, I2, I3 int) { + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I1 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I2 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I3 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = r.codepos +} + +func (r *runner) trackPushNeg1(I1 int) { + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I1 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = -r.codepos +} + +func (r *runner) trackPushNeg2(I1, I2 int) { + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I1 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = I2 + r.runtrackpos-- + r.runtrack[r.runtrackpos] = -r.codepos +} + +func (r *runner) backtrack() { + newpos := r.runtrack[r.runtrackpos] + r.runtrackpos++ + + if r.re.Debug() { + if newpos < 0 { + fmt.Printf(" Backtracking (back2) to code position %v\n", -newpos) + } else { + fmt.Printf(" Backtracking to code position %v\n", newpos) + } + } + + if newpos < 0 { + newpos = -newpos + r.setOperator(r.code.Codes[newpos] | syntax.Back2) + } else { + r.setOperator(r.code.Codes[newpos] | syntax.Back) + } + + // When branching backward, ensure storage + if newpos < r.codepos { + r.ensureStorage() + } + + r.codepos = newpos +} + +func (r *runner) setOperator(op int) { + r.caseInsensitive = (0 != (op & syntax.Ci)) + r.rightToLeft = (0 != (op & syntax.Rtl)) + r.operator = syntax.InstOp(op & ^(syntax.Rtl | syntax.Ci)) +} + +func (r *runner) trackPop() { + r.runtrackpos++ +} + +// pop framesize items from the backtracking stack +func (r *runner) trackPopN(framesize int) { + r.runtrackpos += framesize +} + +// Technically we are actually peeking at items already popped. So if you want to +// get and pop the top item from the stack, you do +// r.trackPop(); +// r.trackPeek(); +func (r *runner) trackPeek() int { + return r.runtrack[r.runtrackpos-1] +} + +// get the ith element down on the backtracking stack +func (r *runner) trackPeekN(i int) int { + return r.runtrack[r.runtrackpos-i-1] +} + +// Push onto the grouping stack +func (r *runner) stackPush(I1 int) { + r.runstackpos-- + r.runstack[r.runstackpos] = I1 +} + +func (r *runner) stackPush2(I1, I2 int) { + r.runstackpos-- + r.runstack[r.runstackpos] = I1 + r.runstackpos-- + r.runstack[r.runstackpos] = I2 +} + +func (r *runner) stackPop() { + r.runstackpos++ +} + +// pop framesize items from the grouping stack +func (r *runner) stackPopN(framesize int) { + r.runstackpos += framesize +} + +// Technically we are actually peeking at items already popped. So if you want to +// get and pop the top item from the stack, you do +// r.stackPop(); +// r.stackPeek(); +func (r *runner) stackPeek() int { + return r.runstack[r.runstackpos-1] +} + +// get the ith element down on the grouping stack +func (r *runner) stackPeekN(i int) int { + return r.runstack[r.runstackpos-i-1] +} + +func (r *runner) operand(i int) int { + return r.code.Codes[r.codepos+i+1] +} + +func (r *runner) leftchars() int { + return r.runtextpos +} + +func (r *runner) rightchars() int { + return r.runtextend - r.runtextpos +} + +func (r *runner) bump() int { + if r.rightToLeft { + return -1 + } + return 1 +} + +func (r *runner) forwardchars() int { + if r.rightToLeft { + return r.runtextpos + } + return r.runtextend - r.runtextpos +} + +func (r *runner) forwardcharnext() rune { + var ch rune + if r.rightToLeft { + r.runtextpos-- + ch = r.runtext[r.runtextpos] + } else { + ch = r.runtext[r.runtextpos] + r.runtextpos++ + } + + if r.caseInsensitive { + return unicode.ToLower(ch) + } + return ch +} + +func (r *runner) runematch(str []rune) bool { + var pos int + + c := len(str) + if !r.rightToLeft { + if r.runtextend-r.runtextpos < c { + return false + } + + pos = r.runtextpos + c + } else { + if r.runtextpos-0 < c { + return false + } + + pos = r.runtextpos + } + + if !r.caseInsensitive { + for c != 0 { + c-- + pos-- + if str[c] != r.runtext[pos] { + return false + } + } + } else { + for c != 0 { + c-- + pos-- + if str[c] != unicode.ToLower(r.runtext[pos]) { + return false + } + } + } + + if !r.rightToLeft { + pos += len(str) + } + + r.runtextpos = pos + + return true +} + +func (r *runner) refmatch(index, len int) bool { + var c, pos, cmpos int + + if !r.rightToLeft { + if r.runtextend-r.runtextpos < len { + return false + } + + pos = r.runtextpos + len + } else { + if r.runtextpos-0 < len { + return false + } + + pos = r.runtextpos + } + cmpos = index + len + + c = len + + if !r.caseInsensitive { + for c != 0 { + c-- + cmpos-- + pos-- + if r.runtext[cmpos] != r.runtext[pos] { + return false + } + + } + } else { + for c != 0 { + c-- + cmpos-- + pos-- + + if unicode.ToLower(r.runtext[cmpos]) != unicode.ToLower(r.runtext[pos]) { + return false + } + } + } + + if !r.rightToLeft { + pos += len + } + + r.runtextpos = pos + + return true +} + +func (r *runner) backwardnext() { + if r.rightToLeft { + r.runtextpos++ + } else { + r.runtextpos-- + } +} + +func (r *runner) charAt(j int) rune { + return r.runtext[j] +} + +func (r *runner) findFirstChar() bool { + + if 0 != (r.code.Anchors & (syntax.AnchorBeginning | syntax.AnchorStart | syntax.AnchorEndZ | syntax.AnchorEnd)) { + if !r.code.RightToLeft { + if (0 != (r.code.Anchors&syntax.AnchorBeginning) && r.runtextpos > 0) || + (0 != (r.code.Anchors&syntax.AnchorStart) && r.runtextpos > r.runtextstart) { + r.runtextpos = r.runtextend + return false + } + if 0 != (r.code.Anchors&syntax.AnchorEndZ) && r.runtextpos < r.runtextend-1 { + r.runtextpos = r.runtextend - 1 + } else if 0 != (r.code.Anchors&syntax.AnchorEnd) && r.runtextpos < r.runtextend { + r.runtextpos = r.runtextend + } + } else { + if (0 != (r.code.Anchors&syntax.AnchorEnd) && r.runtextpos < r.runtextend) || + (0 != (r.code.Anchors&syntax.AnchorEndZ) && (r.runtextpos < r.runtextend-1 || + (r.runtextpos == r.runtextend-1 && r.charAt(r.runtextpos) != '\n'))) || + (0 != (r.code.Anchors&syntax.AnchorStart) && r.runtextpos < r.runtextstart) { + r.runtextpos = 0 + return false + } + if 0 != (r.code.Anchors&syntax.AnchorBeginning) && r.runtextpos > 0 { + r.runtextpos = 0 + } + } + + if r.code.BmPrefix != nil { + return r.code.BmPrefix.IsMatch(r.runtext, r.runtextpos, 0, r.runtextend) + } + + return true // found a valid start or end anchor + } else if r.code.BmPrefix != nil { + r.runtextpos = r.code.BmPrefix.Scan(r.runtext, r.runtextpos, 0, r.runtextend) + + if r.runtextpos == -1 { + if r.code.RightToLeft { + r.runtextpos = 0 + } else { + r.runtextpos = r.runtextend + } + return false + } + + return true + } else if r.code.FcPrefix == nil { + return true + } + + r.rightToLeft = r.code.RightToLeft + r.caseInsensitive = r.code.FcPrefix.CaseInsensitive + + set := r.code.FcPrefix.PrefixSet + if set.IsSingleton() { + ch := set.SingletonChar() + for i := r.forwardchars(); i > 0; i-- { + if ch == r.forwardcharnext() { + r.backwardnext() + return true + } + } + } else { + for i := r.forwardchars(); i > 0; i-- { + n := r.forwardcharnext() + //fmt.Printf("%v in %v: %v\n", string(n), set.String(), set.CharIn(n)) + if set.CharIn(n) { + r.backwardnext() + return true + } + } + } + + return false +} + +func (r *runner) initMatch() { + // Use a hashtable'ed Match object if the capture numbers are sparse + + if r.runmatch == nil { + if r.re.caps != nil { + r.runmatch = newMatchSparse(r.re, r.re.caps, r.re.capsize, r.runtext, r.runtextstart) + } else { + r.runmatch = newMatch(r.re, r.re.capsize, r.runtext, r.runtextstart) + } + } else { + r.runmatch.reset(r.runtext, r.runtextstart) + } + + // note we test runcrawl, because it is the last one to be allocated + // If there is an alloc failure in the middle of the three allocations, + // we may still return to reuse this instance, and we want to behave + // as if the allocations didn't occur. (we used to test _trackcount != 0) + + if r.runcrawl != nil { + r.runtrackpos = len(r.runtrack) + r.runstackpos = len(r.runstack) + r.runcrawlpos = len(r.runcrawl) + return + } + + r.initTrackCount() + + tracksize := r.runtrackcount * 8 + stacksize := r.runtrackcount * 8 + + if tracksize < 32 { + tracksize = 32 + } + if stacksize < 16 { + stacksize = 16 + } + + r.runtrack = make([]int, tracksize) + r.runtrackpos = tracksize + + r.runstack = make([]int, stacksize) + r.runstackpos = stacksize + + r.runcrawl = make([]int, 32) + r.runcrawlpos = 32 +} + +func (r *runner) tidyMatch(quick bool) *Match { + if !quick { + match := r.runmatch + + r.runmatch = nil + + match.tidy(r.runtextpos) + return match + } else { + // send back our match -- it's not leaving the package, so it's safe to not clean it up + // this reduces allocs for frequent calls to the "IsMatch" bool-only functions + return r.runmatch + } +} + +// capture captures a subexpression. Note that the +// capnum used here has already been mapped to a non-sparse +// index (by the code generator RegexWriter). +func (r *runner) capture(capnum, start, end int) { + if end < start { + T := end + end = start + start = T + } + + r.crawl(capnum) + r.runmatch.addMatch(capnum, start, end-start) +} + +// transferCapture captures a subexpression. Note that the +// capnum used here has already been mapped to a non-sparse +// index (by the code generator RegexWriter). +func (r *runner) transferCapture(capnum, uncapnum, start, end int) { + var start2, end2 int + + // these are the two intervals that are cancelling each other + + if end < start { + T := end + end = start + start = T + } + + start2 = r.runmatch.matchIndex(uncapnum) + end2 = start2 + r.runmatch.matchLength(uncapnum) + + // The new capture gets the innermost defined interval + + if start >= end2 { + end = start + start = end2 + } else if end <= start2 { + start = start2 + } else { + if end > end2 { + end = end2 + } + if start2 > start { + start = start2 + } + } + + r.crawl(uncapnum) + r.runmatch.balanceMatch(uncapnum) + + if capnum != -1 { + r.crawl(capnum) + r.runmatch.addMatch(capnum, start, end-start) + } +} + +// revert the last capture +func (r *runner) uncapture() { + capnum := r.popcrawl() + r.runmatch.removeMatch(capnum) +} + +//debug + +func (r *runner) dumpState() { + back := "" + if r.operator&syntax.Back != 0 { + back = " Back" + } + if r.operator&syntax.Back2 != 0 { + back += " Back2" + } + fmt.Printf("Text: %v\nTrack: %v\nStack: %v\n %s%s\n\n", + r.textposDescription(), + r.stackDescription(r.runtrack, r.runtrackpos), + r.stackDescription(r.runstack, r.runstackpos), + r.code.OpcodeDescription(r.codepos), + back) +} + +func (r *runner) stackDescription(a []int, index int) string { + buf := &bytes.Buffer{} + + fmt.Fprintf(buf, "%v/%v", len(a)-index, len(a)) + if buf.Len() < 8 { + buf.WriteString(strings.Repeat(" ", 8-buf.Len())) + } + + buf.WriteRune('(') + for i := index; i < len(a); i++ { + if i > index { + buf.WriteRune(' ') + } + + buf.WriteString(strconv.Itoa(a[i])) + } + + buf.WriteRune(')') + + return buf.String() +} + +func (r *runner) textposDescription() string { + buf := &bytes.Buffer{} + + buf.WriteString(strconv.Itoa(r.runtextpos)) + + if buf.Len() < 8 { + buf.WriteString(strings.Repeat(" ", 8-buf.Len())) + } + + if r.runtextpos > 0 { + buf.WriteString(syntax.CharDescription(r.runtext[r.runtextpos-1])) + } else { + buf.WriteRune('^') + } + + buf.WriteRune('>') + + for i := r.runtextpos; i < r.runtextend; i++ { + buf.WriteString(syntax.CharDescription(r.runtext[i])) + } + if buf.Len() >= 64 { + buf.Truncate(61) + buf.WriteString("...") + } else { + buf.WriteRune('$') + } + + return buf.String() +} + +// decide whether the pos +// at the specified index is a boundary or not. It's just not worth +// emitting inline code for this logic. +func (r *runner) isBoundary(index, startpos, endpos int) bool { + return (index > startpos && syntax.IsWordChar(r.runtext[index-1])) != + (index < endpos && syntax.IsWordChar(r.runtext[index])) +} + +func (r *runner) isECMABoundary(index, startpos, endpos int) bool { + return (index > startpos && syntax.IsECMAWordChar(r.runtext[index-1])) != + (index < endpos && syntax.IsECMAWordChar(r.runtext[index])) +} + +// this seems like a comment to justify randomly picking 1000 :-P +// We have determined this value in a series of experiments where x86 retail +// builds (ono-lab-optimized) were run on different pattern/input pairs. Larger values +// of TimeoutCheckFrequency did not tend to increase performance; smaller values +// of TimeoutCheckFrequency tended to slow down the execution. +const timeoutCheckFrequency int = 1000 + +func (r *runner) startTimeoutWatch() { + if r.ignoreTimeout { + return + } + + r.timeoutChecksToSkip = timeoutCheckFrequency + r.timeoutAt = time.Now().Add(r.timeout) +} + +func (r *runner) checkTimeout() error { + if r.ignoreTimeout { + return nil + } + r.timeoutChecksToSkip-- + if r.timeoutChecksToSkip != 0 { + return nil + } + + r.timeoutChecksToSkip = timeoutCheckFrequency + return r.doCheckTimeout() +} + +func (r *runner) doCheckTimeout() error { + current := time.Now() + + if current.Before(r.timeoutAt) { + return nil + } + + if r.re.Debug() { + //Debug.WriteLine("") + //Debug.WriteLine("RegEx match timeout occurred!") + //Debug.WriteLine("Specified timeout: " + TimeSpan.FromMilliseconds(_timeout).ToString()) + //Debug.WriteLine("Timeout check frequency: " + TimeoutCheckFrequency) + //Debug.WriteLine("Search pattern: " + _runregex._pattern) + //Debug.WriteLine("Input: " + r.runtext) + //Debug.WriteLine("About to throw RegexMatchTimeoutException.") + } + + return fmt.Errorf("match timeout after %v on input `%v`", r.timeout, string(r.runtext)) +} + +func (r *runner) initTrackCount() { + r.runtrackcount = r.code.TrackCount +} + +// getRunner returns a run to use for matching re. +// It uses the re's runner cache if possible, to avoid +// unnecessary allocation. +func (re *Regexp) getRunner() *runner { + re.muRun.Lock() + if n := len(re.runner); n > 0 { + z := re.runner[n-1] + re.runner = re.runner[:n-1] + re.muRun.Unlock() + return z + } + re.muRun.Unlock() + z := &runner{ + re: re, + code: re.code, + } + return z +} + +// putRunner returns a runner to the re's cache. +// There is no attempt to limit the size of the cache, so it will +// grow to the maximum number of simultaneous matches +// run using re. (The cache empties when re gets garbage collected.) +func (re *Regexp) putRunner(r *runner) { + re.muRun.Lock() + re.runner = append(re.runner, r) + re.muRun.Unlock() +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/charclass.go b/vendor/github.com/dlclark/regexp2/syntax/charclass.go new file mode 100644 index 0000000000..53974d1013 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/charclass.go @@ -0,0 +1,854 @@ +package syntax + +import ( + "bytes" + "encoding/binary" + "fmt" + "sort" + "unicode" + "unicode/utf8" +) + +// CharSet combines start-end rune ranges and unicode categories representing a set of characters +type CharSet struct { + ranges []singleRange + categories []category + sub *CharSet //optional subtractor + negate bool + anything bool +} + +type category struct { + negate bool + cat string +} + +type singleRange struct { + first rune + last rune +} + +const ( + spaceCategoryText = " " + wordCategoryText = "W" +) + +var ( + ecmaSpace = []rune{0x0009, 0x000e, 0x0020, 0x0021, 0x00a0, 0x00a1, 0x1680, 0x1681, 0x2000, 0x200b, 0x2028, 0x202a, 0x202f, 0x2030, 0x205f, 0x2060, 0x3000, 0x3001, 0xfeff, 0xff00} + ecmaWord = []rune{0x0030, 0x003a, 0x0041, 0x005b, 0x005f, 0x0060, 0x0061, 0x007b} + ecmaDigit = []rune{0x0030, 0x003a} +) + +var ( + AnyClass = getCharSetFromOldString([]rune{0}, false) + ECMAAnyClass = getCharSetFromOldString([]rune{0, 0x000a, 0x000b, 0x000d, 0x000e}, false) + NoneClass = getCharSetFromOldString(nil, false) + ECMAWordClass = getCharSetFromOldString(ecmaWord, false) + NotECMAWordClass = getCharSetFromOldString(ecmaWord, true) + ECMASpaceClass = getCharSetFromOldString(ecmaSpace, false) + NotECMASpaceClass = getCharSetFromOldString(ecmaSpace, true) + ECMADigitClass = getCharSetFromOldString(ecmaDigit, false) + NotECMADigitClass = getCharSetFromOldString(ecmaDigit, true) + + WordClass = getCharSetFromCategoryString(false, false, wordCategoryText) + NotWordClass = getCharSetFromCategoryString(true, false, wordCategoryText) + SpaceClass = getCharSetFromCategoryString(false, false, spaceCategoryText) + NotSpaceClass = getCharSetFromCategoryString(true, false, spaceCategoryText) + DigitClass = getCharSetFromCategoryString(false, false, "Nd") + NotDigitClass = getCharSetFromCategoryString(false, true, "Nd") +) + +var unicodeCategories = func() map[string]*unicode.RangeTable { + retVal := make(map[string]*unicode.RangeTable) + for k, v := range unicode.Scripts { + retVal[k] = v + } + for k, v := range unicode.Categories { + retVal[k] = v + } + for k, v := range unicode.Properties { + retVal[k] = v + } + return retVal +}() + +func getCharSetFromCategoryString(negateSet bool, negateCat bool, cats ...string) func() *CharSet { + if negateCat && negateSet { + panic("BUG! You should only negate the set OR the category in a constant setup, but not both") + } + + c := CharSet{negate: negateSet} + + c.categories = make([]category, len(cats)) + for i, cat := range cats { + c.categories[i] = category{cat: cat, negate: negateCat} + } + return func() *CharSet { + //make a copy each time + local := c + //return that address + return &local + } +} + +func getCharSetFromOldString(setText []rune, negate bool) func() *CharSet { + c := CharSet{} + if len(setText) > 0 { + fillFirst := false + l := len(setText) + if negate { + if setText[0] == 0 { + setText = setText[1:] + } else { + l++ + fillFirst = true + } + } + + if l%2 == 0 { + c.ranges = make([]singleRange, l/2) + } else { + c.ranges = make([]singleRange, l/2+1) + } + + first := true + if fillFirst { + c.ranges[0] = singleRange{first: 0} + first = false + } + + i := 0 + for _, r := range setText { + if first { + // lower bound in a new range + c.ranges[i] = singleRange{first: r} + first = false + } else { + c.ranges[i].last = r - 1 + i++ + first = true + } + } + if !first { + c.ranges[i].last = utf8.MaxRune + } + } + + return func() *CharSet { + local := c + return &local + } +} + +// Copy makes a deep copy to prevent accidental mutation of a set +func (c CharSet) Copy() CharSet { + ret := CharSet{ + anything: c.anything, + negate: c.negate, + } + + ret.ranges = append(ret.ranges, c.ranges...) + ret.categories = append(ret.categories, c.categories...) + + if c.sub != nil { + sub := c.sub.Copy() + ret.sub = &sub + } + + return ret +} + +// gets a human-readable description for a set string +func (c CharSet) String() string { + buf := &bytes.Buffer{} + buf.WriteRune('[') + + if c.IsNegated() { + buf.WriteRune('^') + } + + for _, r := range c.ranges { + + buf.WriteString(CharDescription(r.first)) + if r.first != r.last { + if r.last-r.first != 1 { + //groups that are 1 char apart skip the dash + buf.WriteRune('-') + } + buf.WriteString(CharDescription(r.last)) + } + } + + for _, c := range c.categories { + buf.WriteString(c.String()) + } + + if c.sub != nil { + buf.WriteRune('-') + buf.WriteString(c.sub.String()) + } + + buf.WriteRune(']') + + return buf.String() +} + +// mapHashFill converts a charset into a buffer for use in maps +func (c CharSet) mapHashFill(buf *bytes.Buffer) { + if c.negate { + buf.WriteByte(0) + } else { + buf.WriteByte(1) + } + + binary.Write(buf, binary.LittleEndian, len(c.ranges)) + binary.Write(buf, binary.LittleEndian, len(c.categories)) + for _, r := range c.ranges { + buf.WriteRune(r.first) + buf.WriteRune(r.last) + } + for _, ct := range c.categories { + buf.WriteString(ct.cat) + if ct.negate { + buf.WriteByte(1) + } else { + buf.WriteByte(0) + } + } + + if c.sub != nil { + c.sub.mapHashFill(buf) + } +} + +// CharIn returns true if the rune is in our character set (either ranges or categories). +// It handles negations and subtracted sub-charsets. +func (c CharSet) CharIn(ch rune) bool { + val := false + // in s && !s.subtracted + + //check ranges + for _, r := range c.ranges { + if ch < r.first { + continue + } + if ch <= r.last { + val = true + break + } + } + + //check categories if we haven't already found a range + if !val && len(c.categories) > 0 { + for _, ct := range c.categories { + // special categories...then unicode + if ct.cat == spaceCategoryText { + if unicode.IsSpace(ch) { + // we found a space so we're done + // negate means this is a "bad" thing + val = !ct.negate + break + } else if ct.negate { + val = true + break + } + } else if ct.cat == wordCategoryText { + if IsWordChar(ch) { + val = !ct.negate + break + } else if ct.negate { + val = true + break + } + } else if unicode.Is(unicodeCategories[ct.cat], ch) { + // if we're in this unicode category then we're done + // if negate=true on this category then we "failed" our test + // otherwise we're good that we found it + val = !ct.negate + break + } else if ct.negate { + val = true + break + } + } + } + + // negate the whole char set + if c.negate { + val = !val + } + + // get subtracted recurse + if val && c.sub != nil { + val = !c.sub.CharIn(ch) + } + + //log.Printf("Char '%v' in %v == %v", string(ch), c.String(), val) + return val +} + +func (c category) String() string { + switch c.cat { + case spaceCategoryText: + if c.negate { + return "\\S" + } + return "\\s" + case wordCategoryText: + if c.negate { + return "\\W" + } + return "\\w" + } + if _, ok := unicodeCategories[c.cat]; ok { + + if c.negate { + return "\\P{" + c.cat + "}" + } + return "\\p{" + c.cat + "}" + } + return "Unknown category: " + c.cat +} + +// CharDescription Produces a human-readable description for a single character. +func CharDescription(ch rune) string { + /*if ch == '\\' { + return "\\\\" + } + + if ch > ' ' && ch <= '~' { + return string(ch) + } else if ch == '\n' { + return "\\n" + } else if ch == ' ' { + return "\\ " + }*/ + + b := &bytes.Buffer{} + escape(b, ch, false) //fmt.Sprintf("%U", ch) + return b.String() +} + +// According to UTS#18 Unicode Regular Expressions (http://www.unicode.org/reports/tr18/) +// RL 1.4 Simple Word Boundaries The class of includes all Alphabetic +// values from the Unicode character database, from UnicodeData.txt [UData], plus the U+200C +// ZERO WIDTH NON-JOINER and U+200D ZERO WIDTH JOINER. +func IsWordChar(r rune) bool { + //"L", "Mn", "Nd", "Pc" + return unicode.In(r, + unicode.Categories["L"], unicode.Categories["Mn"], + unicode.Categories["Nd"], unicode.Categories["Pc"]) || r == '\u200D' || r == '\u200C' + //return 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' || '0' <= r && r <= '9' || r == '_' +} + +func IsECMAWordChar(r rune) bool { + return unicode.In(r, + unicode.Categories["L"], unicode.Categories["Mn"], + unicode.Categories["Nd"], unicode.Categories["Pc"]) + + //return 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' || '0' <= r && r <= '9' || r == '_' +} + +// SingletonChar will return the char from the first range without validation. +// It assumes you have checked for IsSingleton or IsSingletonInverse and will panic given bad input +func (c CharSet) SingletonChar() rune { + return c.ranges[0].first +} + +func (c CharSet) IsSingleton() bool { + return !c.negate && //negated is multiple chars + len(c.categories) == 0 && len(c.ranges) == 1 && // multiple ranges and unicode classes represent multiple chars + c.sub == nil && // subtraction means we've got multiple chars + c.ranges[0].first == c.ranges[0].last // first and last equal means we're just 1 char +} + +func (c CharSet) IsSingletonInverse() bool { + return c.negate && //same as above, but requires negated + len(c.categories) == 0 && len(c.ranges) == 1 && // multiple ranges and unicode classes represent multiple chars + c.sub == nil && // subtraction means we've got multiple chars + c.ranges[0].first == c.ranges[0].last // first and last equal means we're just 1 char +} + +func (c CharSet) IsMergeable() bool { + return !c.IsNegated() && !c.HasSubtraction() +} + +func (c CharSet) IsNegated() bool { + return c.negate +} + +func (c CharSet) HasSubtraction() bool { + return c.sub != nil +} + +func (c CharSet) IsEmpty() bool { + return len(c.ranges) == 0 && len(c.categories) == 0 && c.sub == nil +} + +func (c *CharSet) addDigit(ecma, negate bool, pattern string) { + if ecma { + if negate { + c.addRanges(NotECMADigitClass().ranges) + } else { + c.addRanges(ECMADigitClass().ranges) + } + } else { + c.addCategories(category{cat: "Nd", negate: negate}) + } +} + +func (c *CharSet) addChar(ch rune) { + c.addRange(ch, ch) +} + +func (c *CharSet) addSpace(ecma, negate bool) { + if ecma { + if negate { + c.addRanges(NotECMASpaceClass().ranges) + } else { + c.addRanges(ECMASpaceClass().ranges) + } + } else { + c.addCategories(category{cat: spaceCategoryText, negate: negate}) + } +} + +func (c *CharSet) addWord(ecma, negate bool) { + if ecma { + if negate { + c.addRanges(NotECMAWordClass().ranges) + } else { + c.addRanges(ECMAWordClass().ranges) + } + } else { + c.addCategories(category{cat: wordCategoryText, negate: negate}) + } +} + +// Add set ranges and categories into ours -- no deduping or anything +func (c *CharSet) addSet(set CharSet) { + if c.anything { + return + } + if set.anything { + c.makeAnything() + return + } + // just append here to prevent double-canon + c.ranges = append(c.ranges, set.ranges...) + c.addCategories(set.categories...) + c.canonicalize() +} + +func (c *CharSet) makeAnything() { + c.anything = true + c.categories = []category{} + c.ranges = AnyClass().ranges +} + +func (c *CharSet) addCategories(cats ...category) { + // don't add dupes and remove positive+negative + if c.anything { + // if we've had a previous positive+negative group then + // just return, we're as broad as we can get + return + } + + for _, ct := range cats { + found := false + for _, ct2 := range c.categories { + if ct.cat == ct2.cat { + if ct.negate != ct2.negate { + // oposite negations...this mean we just + // take us as anything and move on + c.makeAnything() + return + } + found = true + break + } + } + + if !found { + c.categories = append(c.categories, ct) + } + } +} + +// Merges new ranges to our own +func (c *CharSet) addRanges(ranges []singleRange) { + if c.anything { + return + } + c.ranges = append(c.ranges, ranges...) + c.canonicalize() +} + +// Merges everything but the new ranges into our own +func (c *CharSet) addNegativeRanges(ranges []singleRange) { + if c.anything { + return + } + + var hi rune + + // convert incoming ranges into opposites, assume they are in order + for _, r := range ranges { + if hi < r.first { + c.ranges = append(c.ranges, singleRange{hi, r.first - 1}) + } + hi = r.last + 1 + } + + if hi < utf8.MaxRune { + c.ranges = append(c.ranges, singleRange{hi, utf8.MaxRune}) + } + + c.canonicalize() +} + +func isValidUnicodeCat(catName string) bool { + _, ok := unicodeCategories[catName] + return ok +} + +func (c *CharSet) addCategory(categoryName string, negate, caseInsensitive bool, pattern string) { + if !isValidUnicodeCat(categoryName) { + // unknown unicode category, script, or property "blah" + panic(fmt.Errorf("Unknown unicode category, script, or property '%v'", categoryName)) + + } + + if caseInsensitive && (categoryName == "Ll" || categoryName == "Lu" || categoryName == "Lt") { + // when RegexOptions.IgnoreCase is specified then {Ll} {Lu} and {Lt} cases should all match + c.addCategories( + category{cat: "Ll", negate: negate}, + category{cat: "Lu", negate: negate}, + category{cat: "Lt", negate: negate}) + } + c.addCategories(category{cat: categoryName, negate: negate}) +} + +func (c *CharSet) addSubtraction(sub *CharSet) { + c.sub = sub +} + +func (c *CharSet) addRange(chMin, chMax rune) { + c.ranges = append(c.ranges, singleRange{first: chMin, last: chMax}) + c.canonicalize() +} + +func (c *CharSet) addNamedASCII(name string, negate bool) bool { + var rs []singleRange + + switch name { + case "alnum": + rs = []singleRange{singleRange{'0', '9'}, singleRange{'A', 'Z'}, singleRange{'a', 'z'}} + case "alpha": + rs = []singleRange{singleRange{'A', 'Z'}, singleRange{'a', 'z'}} + case "ascii": + rs = []singleRange{singleRange{0, 0x7f}} + case "blank": + rs = []singleRange{singleRange{'\t', '\t'}, singleRange{' ', ' '}} + case "cntrl": + rs = []singleRange{singleRange{0, 0x1f}, singleRange{0x7f, 0x7f}} + case "digit": + c.addDigit(false, negate, "") + case "graph": + rs = []singleRange{singleRange{'!', '~'}} + case "lower": + rs = []singleRange{singleRange{'a', 'z'}} + case "print": + rs = []singleRange{singleRange{' ', '~'}} + case "punct": //[!-/:-@[-`{-~] + rs = []singleRange{singleRange{'!', '/'}, singleRange{':', '@'}, singleRange{'[', '`'}, singleRange{'{', '~'}} + case "space": + c.addSpace(true, negate) + case "upper": + rs = []singleRange{singleRange{'A', 'Z'}} + case "word": + c.addWord(true, negate) + case "xdigit": + rs = []singleRange{singleRange{'0', '9'}, singleRange{'A', 'F'}, singleRange{'a', 'f'}} + default: + return false + } + + if len(rs) > 0 { + if negate { + c.addNegativeRanges(rs) + } else { + c.addRanges(rs) + } + } + + return true +} + +type singleRangeSorter []singleRange + +func (p singleRangeSorter) Len() int { return len(p) } +func (p singleRangeSorter) Less(i, j int) bool { return p[i].first < p[j].first } +func (p singleRangeSorter) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// Logic to reduce a character class to a unique, sorted form. +func (c *CharSet) canonicalize() { + var i, j int + var last rune + + // + // Find and eliminate overlapping or abutting ranges + // + + if len(c.ranges) > 1 { + sort.Sort(singleRangeSorter(c.ranges)) + + done := false + + for i, j = 1, 0; ; i++ { + for last = c.ranges[j].last; ; i++ { + if i == len(c.ranges) || last == utf8.MaxRune { + done = true + break + } + + CurrentRange := c.ranges[i] + if CurrentRange.first > last+1 { + break + } + + if last < CurrentRange.last { + last = CurrentRange.last + } + } + + c.ranges[j] = singleRange{first: c.ranges[j].first, last: last} + + j++ + + if done { + break + } + + if j < i { + c.ranges[j] = c.ranges[i] + } + } + + c.ranges = append(c.ranges[:j], c.ranges[len(c.ranges):]...) + } +} + +// Adds to the class any lowercase versions of characters already +// in the class. Used for case-insensitivity. +func (c *CharSet) addLowercase() { + if c.anything { + return + } + toAdd := []singleRange{} + for i := 0; i < len(c.ranges); i++ { + r := c.ranges[i] + if r.first == r.last { + lower := unicode.ToLower(r.first) + c.ranges[i] = singleRange{first: lower, last: lower} + } else { + toAdd = append(toAdd, r) + } + } + + for _, r := range toAdd { + c.addLowercaseRange(r.first, r.last) + } + c.canonicalize() +} + +/************************************************************************** + Let U be the set of Unicode character values and let L be the lowercase + function, mapping from U to U. To perform case insensitive matching of + character sets, we need to be able to map an interval I in U, say + + I = [chMin, chMax] = { ch : chMin <= ch <= chMax } + + to a set A such that A contains L(I) and A is contained in the union of + I and L(I). + + The table below partitions U into intervals on which L is non-decreasing. + Thus, for any interval J = [a, b] contained in one of these intervals, + L(J) is contained in [L(a), L(b)]. + + It is also true that for any such J, [L(a), L(b)] is contained in the + union of J and L(J). This does not follow from L being non-decreasing on + these intervals. It follows from the nature of the L on each interval. + On each interval, L has one of the following forms: + + (1) L(ch) = constant (LowercaseSet) + (2) L(ch) = ch + offset (LowercaseAdd) + (3) L(ch) = ch | 1 (LowercaseBor) + (4) L(ch) = ch + (ch & 1) (LowercaseBad) + + It is easy to verify that for any of these forms [L(a), L(b)] is + contained in the union of [a, b] and L([a, b]). +***************************************************************************/ + +const ( + LowercaseSet = 0 // Set to arg. + LowercaseAdd = 1 // Add arg. + LowercaseBor = 2 // Bitwise or with 1. + LowercaseBad = 3 // Bitwise and with 1 and add original. +) + +type lcMap struct { + chMin, chMax rune + op, data int32 +} + +var lcTable = []lcMap{ + lcMap{'\u0041', '\u005A', LowercaseAdd, 32}, + lcMap{'\u00C0', '\u00DE', LowercaseAdd, 32}, + lcMap{'\u0100', '\u012E', LowercaseBor, 0}, + lcMap{'\u0130', '\u0130', LowercaseSet, 0x0069}, + lcMap{'\u0132', '\u0136', LowercaseBor, 0}, + lcMap{'\u0139', '\u0147', LowercaseBad, 0}, + lcMap{'\u014A', '\u0176', LowercaseBor, 0}, + lcMap{'\u0178', '\u0178', LowercaseSet, 0x00FF}, + lcMap{'\u0179', '\u017D', LowercaseBad, 0}, + lcMap{'\u0181', '\u0181', LowercaseSet, 0x0253}, + lcMap{'\u0182', '\u0184', LowercaseBor, 0}, + lcMap{'\u0186', '\u0186', LowercaseSet, 0x0254}, + lcMap{'\u0187', '\u0187', LowercaseSet, 0x0188}, + lcMap{'\u0189', '\u018A', LowercaseAdd, 205}, + lcMap{'\u018B', '\u018B', LowercaseSet, 0x018C}, + lcMap{'\u018E', '\u018E', LowercaseSet, 0x01DD}, + lcMap{'\u018F', '\u018F', LowercaseSet, 0x0259}, + lcMap{'\u0190', '\u0190', LowercaseSet, 0x025B}, + lcMap{'\u0191', '\u0191', LowercaseSet, 0x0192}, + lcMap{'\u0193', '\u0193', LowercaseSet, 0x0260}, + lcMap{'\u0194', '\u0194', LowercaseSet, 0x0263}, + lcMap{'\u0196', '\u0196', LowercaseSet, 0x0269}, + lcMap{'\u0197', '\u0197', LowercaseSet, 0x0268}, + lcMap{'\u0198', '\u0198', LowercaseSet, 0x0199}, + lcMap{'\u019C', '\u019C', LowercaseSet, 0x026F}, + lcMap{'\u019D', '\u019D', LowercaseSet, 0x0272}, + lcMap{'\u019F', '\u019F', LowercaseSet, 0x0275}, + lcMap{'\u01A0', '\u01A4', LowercaseBor, 0}, + lcMap{'\u01A7', '\u01A7', LowercaseSet, 0x01A8}, + lcMap{'\u01A9', '\u01A9', LowercaseSet, 0x0283}, + lcMap{'\u01AC', '\u01AC', LowercaseSet, 0x01AD}, + lcMap{'\u01AE', '\u01AE', LowercaseSet, 0x0288}, + lcMap{'\u01AF', '\u01AF', LowercaseSet, 0x01B0}, + lcMap{'\u01B1', '\u01B2', LowercaseAdd, 217}, + lcMap{'\u01B3', '\u01B5', LowercaseBad, 0}, + lcMap{'\u01B7', '\u01B7', LowercaseSet, 0x0292}, + lcMap{'\u01B8', '\u01B8', LowercaseSet, 0x01B9}, + lcMap{'\u01BC', '\u01BC', LowercaseSet, 0x01BD}, + lcMap{'\u01C4', '\u01C5', LowercaseSet, 0x01C6}, + lcMap{'\u01C7', '\u01C8', LowercaseSet, 0x01C9}, + lcMap{'\u01CA', '\u01CB', LowercaseSet, 0x01CC}, + lcMap{'\u01CD', '\u01DB', LowercaseBad, 0}, + lcMap{'\u01DE', '\u01EE', LowercaseBor, 0}, + lcMap{'\u01F1', '\u01F2', LowercaseSet, 0x01F3}, + lcMap{'\u01F4', '\u01F4', LowercaseSet, 0x01F5}, + lcMap{'\u01FA', '\u0216', LowercaseBor, 0}, + lcMap{'\u0386', '\u0386', LowercaseSet, 0x03AC}, + lcMap{'\u0388', '\u038A', LowercaseAdd, 37}, + lcMap{'\u038C', '\u038C', LowercaseSet, 0x03CC}, + lcMap{'\u038E', '\u038F', LowercaseAdd, 63}, + lcMap{'\u0391', '\u03AB', LowercaseAdd, 32}, + lcMap{'\u03E2', '\u03EE', LowercaseBor, 0}, + lcMap{'\u0401', '\u040F', LowercaseAdd, 80}, + lcMap{'\u0410', '\u042F', LowercaseAdd, 32}, + lcMap{'\u0460', '\u0480', LowercaseBor, 0}, + lcMap{'\u0490', '\u04BE', LowercaseBor, 0}, + lcMap{'\u04C1', '\u04C3', LowercaseBad, 0}, + lcMap{'\u04C7', '\u04C7', LowercaseSet, 0x04C8}, + lcMap{'\u04CB', '\u04CB', LowercaseSet, 0x04CC}, + lcMap{'\u04D0', '\u04EA', LowercaseBor, 0}, + lcMap{'\u04EE', '\u04F4', LowercaseBor, 0}, + lcMap{'\u04F8', '\u04F8', LowercaseSet, 0x04F9}, + lcMap{'\u0531', '\u0556', LowercaseAdd, 48}, + lcMap{'\u10A0', '\u10C5', LowercaseAdd, 48}, + lcMap{'\u1E00', '\u1EF8', LowercaseBor, 0}, + lcMap{'\u1F08', '\u1F0F', LowercaseAdd, -8}, + lcMap{'\u1F18', '\u1F1F', LowercaseAdd, -8}, + lcMap{'\u1F28', '\u1F2F', LowercaseAdd, -8}, + lcMap{'\u1F38', '\u1F3F', LowercaseAdd, -8}, + lcMap{'\u1F48', '\u1F4D', LowercaseAdd, -8}, + lcMap{'\u1F59', '\u1F59', LowercaseSet, 0x1F51}, + lcMap{'\u1F5B', '\u1F5B', LowercaseSet, 0x1F53}, + lcMap{'\u1F5D', '\u1F5D', LowercaseSet, 0x1F55}, + lcMap{'\u1F5F', '\u1F5F', LowercaseSet, 0x1F57}, + lcMap{'\u1F68', '\u1F6F', LowercaseAdd, -8}, + lcMap{'\u1F88', '\u1F8F', LowercaseAdd, -8}, + lcMap{'\u1F98', '\u1F9F', LowercaseAdd, -8}, + lcMap{'\u1FA8', '\u1FAF', LowercaseAdd, -8}, + lcMap{'\u1FB8', '\u1FB9', LowercaseAdd, -8}, + lcMap{'\u1FBA', '\u1FBB', LowercaseAdd, -74}, + lcMap{'\u1FBC', '\u1FBC', LowercaseSet, 0x1FB3}, + lcMap{'\u1FC8', '\u1FCB', LowercaseAdd, -86}, + lcMap{'\u1FCC', '\u1FCC', LowercaseSet, 0x1FC3}, + lcMap{'\u1FD8', '\u1FD9', LowercaseAdd, -8}, + lcMap{'\u1FDA', '\u1FDB', LowercaseAdd, -100}, + lcMap{'\u1FE8', '\u1FE9', LowercaseAdd, -8}, + lcMap{'\u1FEA', '\u1FEB', LowercaseAdd, -112}, + lcMap{'\u1FEC', '\u1FEC', LowercaseSet, 0x1FE5}, + lcMap{'\u1FF8', '\u1FF9', LowercaseAdd, -128}, + lcMap{'\u1FFA', '\u1FFB', LowercaseAdd, -126}, + lcMap{'\u1FFC', '\u1FFC', LowercaseSet, 0x1FF3}, + lcMap{'\u2160', '\u216F', LowercaseAdd, 16}, + lcMap{'\u24B6', '\u24D0', LowercaseAdd, 26}, + lcMap{'\uFF21', '\uFF3A', LowercaseAdd, 32}, +} + +func (c *CharSet) addLowercaseRange(chMin, chMax rune) { + var i, iMax, iMid int + var chMinT, chMaxT rune + var lc lcMap + + for i, iMax = 0, len(lcTable); i < iMax; { + iMid = (i + iMax) / 2 + if lcTable[iMid].chMax < chMin { + i = iMid + 1 + } else { + iMax = iMid + } + } + + for ; i < len(lcTable); i++ { + lc = lcTable[i] + if lc.chMin > chMax { + return + } + chMinT = lc.chMin + if chMinT < chMin { + chMinT = chMin + } + + chMaxT = lc.chMax + if chMaxT > chMax { + chMaxT = chMax + } + + switch lc.op { + case LowercaseSet: + chMinT = rune(lc.data) + chMaxT = rune(lc.data) + break + case LowercaseAdd: + chMinT += lc.data + chMaxT += lc.data + break + case LowercaseBor: + chMinT |= 1 + chMaxT |= 1 + break + case LowercaseBad: + chMinT += (chMinT & 1) + chMaxT += (chMaxT & 1) + break + } + + if chMinT < chMin || chMaxT > chMax { + c.addRange(chMinT, chMaxT) + } + } +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/code.go b/vendor/github.com/dlclark/regexp2/syntax/code.go new file mode 100644 index 0000000000..686e822af8 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/code.go @@ -0,0 +1,274 @@ +package syntax + +import ( + "bytes" + "fmt" + "math" +) + +// similar to prog.go in the go regex package...also with comment 'may not belong in this package' + +// File provides operator constants for use by the Builder and the Machine. + +// Implementation notes: +// +// Regexps are built into RegexCodes, which contain an operation array, +// a string table, and some constants. +// +// Each operation is one of the codes below, followed by the integer +// operands specified for each op. +// +// Strings and sets are indices into a string table. + +type InstOp int + +const ( + // lef/back operands description + + Onerep InstOp = 0 // lef,back char,min,max a {n} + Notonerep = 1 // lef,back char,min,max .{n} + Setrep = 2 // lef,back set,min,max [\d]{n} + + Oneloop = 3 // lef,back char,min,max a {,n} + Notoneloop = 4 // lef,back char,min,max .{,n} + Setloop = 5 // lef,back set,min,max [\d]{,n} + + Onelazy = 6 // lef,back char,min,max a {,n}? + Notonelazy = 7 // lef,back char,min,max .{,n}? + Setlazy = 8 // lef,back set,min,max [\d]{,n}? + + One = 9 // lef char a + Notone = 10 // lef char [^a] + Set = 11 // lef set [a-z\s] \w \s \d + + Multi = 12 // lef string abcd + Ref = 13 // lef group \# + + Bol = 14 // ^ + Eol = 15 // $ + Boundary = 16 // \b + Nonboundary = 17 // \B + Beginning = 18 // \A + Start = 19 // \G + EndZ = 20 // \Z + End = 21 // \Z + + Nothing = 22 // Reject! + + // Primitive control structures + + Lazybranch = 23 // back jump straight first + Branchmark = 24 // back jump branch first for loop + Lazybranchmark = 25 // back jump straight first for loop + Nullcount = 26 // back val set counter, null mark + Setcount = 27 // back val set counter, make mark + Branchcount = 28 // back jump,limit branch++ if zero<=c impl group slots + Capsize int // number of impl group slots + FcPrefix *Prefix // the set of candidate first characters (may be null) + BmPrefix *BmPrefix // the fixed prefix string as a Boyer-Moore machine (may be null) + Anchors AnchorLoc // the set of zero-length start anchors (RegexFCD.Bol, etc) + RightToLeft bool // true if right to left +} + +func opcodeBacktracks(op InstOp) bool { + op &= Mask + + switch op { + case Oneloop, Notoneloop, Setloop, Onelazy, Notonelazy, Setlazy, Lazybranch, Branchmark, Lazybranchmark, + Nullcount, Setcount, Branchcount, Lazybranchcount, Setmark, Capturemark, Getmark, Setjump, Backjump, + Forejump, Goto: + return true + + default: + return false + } +} + +func opcodeSize(op InstOp) int { + op &= Mask + + switch op { + case Nothing, Bol, Eol, Boundary, Nonboundary, ECMABoundary, NonECMABoundary, Beginning, Start, EndZ, + End, Nullmark, Setmark, Getmark, Setjump, Backjump, Forejump, Stop: + return 1 + + case One, Notone, Multi, Ref, Testref, Goto, Nullcount, Setcount, Lazybranch, Branchmark, Lazybranchmark, + Prune, Set: + return 2 + + case Capturemark, Branchcount, Lazybranchcount, Onerep, Notonerep, Oneloop, Notoneloop, Onelazy, Notonelazy, + Setlazy, Setrep, Setloop: + return 3 + + default: + panic(fmt.Errorf("Unexpected op code: %v", op)) + } +} + +var codeStr = []string{ + "Onerep", "Notonerep", "Setrep", + "Oneloop", "Notoneloop", "Setloop", + "Onelazy", "Notonelazy", "Setlazy", + "One", "Notone", "Set", + "Multi", "Ref", + "Bol", "Eol", "Boundary", "Nonboundary", "Beginning", "Start", "EndZ", "End", + "Nothing", + "Lazybranch", "Branchmark", "Lazybranchmark", + "Nullcount", "Setcount", "Branchcount", "Lazybranchcount", + "Nullmark", "Setmark", "Capturemark", "Getmark", + "Setjump", "Backjump", "Forejump", "Testref", "Goto", + "Prune", "Stop", + "ECMABoundary", "NonECMABoundary", +} + +func operatorDescription(op InstOp) string { + desc := codeStr[op&Mask] + if (op & Ci) != 0 { + desc += "-Ci" + } + if (op & Rtl) != 0 { + desc += "-Rtl" + } + if (op & Back) != 0 { + desc += "-Back" + } + if (op & Back2) != 0 { + desc += "-Back2" + } + + return desc +} + +// OpcodeDescription is a humman readable string of the specific offset +func (c *Code) OpcodeDescription(offset int) string { + buf := &bytes.Buffer{} + + op := InstOp(c.Codes[offset]) + fmt.Fprintf(buf, "%06d ", offset) + + if opcodeBacktracks(op & Mask) { + buf.WriteString("*") + } else { + buf.WriteString(" ") + } + buf.WriteString(operatorDescription(op)) + buf.WriteString("(") + op &= Mask + + switch op { + case One, Notone, Onerep, Notonerep, Oneloop, Notoneloop, Onelazy, Notonelazy: + buf.WriteString("Ch = ") + buf.WriteString(CharDescription(rune(c.Codes[offset+1]))) + + case Set, Setrep, Setloop, Setlazy: + buf.WriteString("Set = ") + buf.WriteString(c.Sets[c.Codes[offset+1]].String()) + + case Multi: + fmt.Fprintf(buf, "String = %s", string(c.Strings[c.Codes[offset+1]])) + + case Ref, Testref: + fmt.Fprintf(buf, "Index = %d", c.Codes[offset+1]) + + case Capturemark: + fmt.Fprintf(buf, "Index = %d", c.Codes[offset+1]) + if c.Codes[offset+2] != -1 { + fmt.Fprintf(buf, ", Unindex = %d", c.Codes[offset+2]) + } + + case Nullcount, Setcount: + fmt.Fprintf(buf, "Value = %d", c.Codes[offset+1]) + + case Goto, Lazybranch, Branchmark, Lazybranchmark, Branchcount, Lazybranchcount: + fmt.Fprintf(buf, "Addr = %d", c.Codes[offset+1]) + } + + switch op { + case Onerep, Notonerep, Oneloop, Notoneloop, Onelazy, Notonelazy, Setrep, Setloop, Setlazy: + buf.WriteString(", Rep = ") + if c.Codes[offset+2] == math.MaxInt32 { + buf.WriteString("inf") + } else { + fmt.Fprintf(buf, "%d", c.Codes[offset+2]) + } + + case Branchcount, Lazybranchcount: + buf.WriteString(", Limit = ") + if c.Codes[offset+2] == math.MaxInt32 { + buf.WriteString("inf") + } else { + fmt.Fprintf(buf, "%d", c.Codes[offset+2]) + } + + } + + buf.WriteString(")") + + return buf.String() +} + +func (c *Code) Dump() string { + buf := &bytes.Buffer{} + + if c.RightToLeft { + fmt.Fprintln(buf, "Direction: right-to-left") + } else { + fmt.Fprintln(buf, "Direction: left-to-right") + } + if c.FcPrefix == nil { + fmt.Fprintln(buf, "Firstchars: n/a") + } else { + fmt.Fprintf(buf, "Firstchars: %v\n", c.FcPrefix.PrefixSet.String()) + } + + if c.BmPrefix == nil { + fmt.Fprintln(buf, "Prefix: n/a") + } else { + fmt.Fprintf(buf, "Prefix: %v\n", Escape(c.BmPrefix.String())) + } + + fmt.Fprintf(buf, "Anchors: %v\n", c.Anchors) + fmt.Fprintln(buf) + + if c.BmPrefix != nil { + fmt.Fprintln(buf, "BoyerMoore:") + fmt.Fprintln(buf, c.BmPrefix.Dump(" ")) + } + for i := 0; i < len(c.Codes); i += opcodeSize(InstOp(c.Codes[i])) { + fmt.Fprintln(buf, c.OpcodeDescription(i)) + } + + return buf.String() +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/escape.go b/vendor/github.com/dlclark/regexp2/syntax/escape.go new file mode 100644 index 0000000000..609df10731 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/escape.go @@ -0,0 +1,94 @@ +package syntax + +import ( + "bytes" + "strconv" + "strings" + "unicode" +) + +func Escape(input string) string { + b := &bytes.Buffer{} + for _, r := range input { + escape(b, r, false) + } + return b.String() +} + +const meta = `\.+*?()|[]{}^$# ` + +func escape(b *bytes.Buffer, r rune, force bool) { + if unicode.IsPrint(r) { + if strings.IndexRune(meta, r) >= 0 || force { + b.WriteRune('\\') + } + b.WriteRune(r) + return + } + + switch r { + case '\a': + b.WriteString(`\a`) + case '\f': + b.WriteString(`\f`) + case '\n': + b.WriteString(`\n`) + case '\r': + b.WriteString(`\r`) + case '\t': + b.WriteString(`\t`) + case '\v': + b.WriteString(`\v`) + default: + if r < 0x100 { + b.WriteString(`\x`) + s := strconv.FormatInt(int64(r), 16) + if len(s) == 1 { + b.WriteRune('0') + } + b.WriteString(s) + break + } + b.WriteString(`\u`) + b.WriteString(strconv.FormatInt(int64(r), 16)) + } +} + +func Unescape(input string) (string, error) { + idx := strings.IndexRune(input, '\\') + // no slashes means no unescape needed + if idx == -1 { + return input, nil + } + + buf := bytes.NewBufferString(input[:idx]) + // get the runes for the rest of the string -- we're going full parser scan on this + + p := parser{} + p.setPattern(input[idx+1:]) + for { + if p.rightMost() { + return "", p.getErr(ErrIllegalEndEscape) + } + r, err := p.scanCharEscape() + if err != nil { + return "", err + } + buf.WriteRune(r) + // are we done? + if p.rightMost() { + return buf.String(), nil + } + + r = p.moveRightGetChar() + for r != '\\' { + buf.WriteRune(r) + if p.rightMost() { + // we're done, no more slashes + return buf.String(), nil + } + // keep scanning until we get another slash + r = p.moveRightGetChar() + } + } +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/fuzz.go b/vendor/github.com/dlclark/regexp2/syntax/fuzz.go new file mode 100644 index 0000000000..ee863866db --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/fuzz.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package syntax + +// Fuzz is the input point for go-fuzz +func Fuzz(data []byte) int { + sdata := string(data) + tree, err := Parse(sdata, RegexOptions(0)) + if err != nil { + return 0 + } + + // translate it to code + _, err = Write(tree) + if err != nil { + panic(err) + } + + return 1 +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/parser.go b/vendor/github.com/dlclark/regexp2/syntax/parser.go new file mode 100644 index 0000000000..d86f332823 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/parser.go @@ -0,0 +1,2232 @@ +package syntax + +import ( + "fmt" + "math" + "os" + "sort" + "strconv" + "unicode" +) + +type RegexOptions int32 + +const ( + IgnoreCase RegexOptions = 0x0001 // "i" + Multiline = 0x0002 // "m" + ExplicitCapture = 0x0004 // "n" + Compiled = 0x0008 // "c" + Singleline = 0x0010 // "s" + IgnorePatternWhitespace = 0x0020 // "x" + RightToLeft = 0x0040 // "r" + Debug = 0x0080 // "d" + ECMAScript = 0x0100 // "e" + RE2 = 0x0200 // RE2 compat mode +) + +func optionFromCode(ch rune) RegexOptions { + // case-insensitive + switch ch { + case 'i', 'I': + return IgnoreCase + case 'r', 'R': + return RightToLeft + case 'm', 'M': + return Multiline + case 'n', 'N': + return ExplicitCapture + case 's', 'S': + return Singleline + case 'x', 'X': + return IgnorePatternWhitespace + case 'd', 'D': + return Debug + case 'e', 'E': + return ECMAScript + default: + return 0 + } +} + +// An Error describes a failure to parse a regular expression +// and gives the offending expression. +type Error struct { + Code ErrorCode + Expr string + Args []interface{} +} + +func (e *Error) Error() string { + if len(e.Args) == 0 { + return "error parsing regexp: " + e.Code.String() + " in `" + e.Expr + "`" + } + return "error parsing regexp: " + fmt.Sprintf(e.Code.String(), e.Args...) + " in `" + e.Expr + "`" +} + +// An ErrorCode describes a failure to parse a regular expression. +type ErrorCode string + +const ( + // internal issue + ErrInternalError ErrorCode = "regexp/syntax: internal error" + // Parser errors + ErrUnterminatedComment = "unterminated comment" + ErrInvalidCharRange = "invalid character class range" + ErrInvalidRepeatSize = "invalid repeat count" + ErrInvalidUTF8 = "invalid UTF-8" + ErrCaptureGroupOutOfRange = "capture group number out of range" + ErrUnexpectedParen = "unexpected )" + ErrMissingParen = "missing closing )" + ErrMissingBrace = "missing closing }" + ErrInvalidRepeatOp = "invalid nested repetition operator" + ErrMissingRepeatArgument = "missing argument to repetition operator" + ErrConditionalExpression = "illegal conditional (?(...)) expression" + ErrTooManyAlternates = "too many | in (?()|)" + ErrUnrecognizedGrouping = "unrecognized grouping construct: (%v" + ErrInvalidGroupName = "invalid group name: group names must begin with a word character and have a matching terminator" + ErrCapNumNotZero = "capture number cannot be zero" + ErrUndefinedBackRef = "reference to undefined group number %v" + ErrUndefinedNameRef = "reference to undefined group name %v" + ErrAlternationCantCapture = "alternation conditions do not capture and cannot be named" + ErrAlternationCantHaveComment = "alternation conditions cannot be comments" + ErrMalformedReference = "(?(%v) ) malformed" + ErrUndefinedReference = "(?(%v) ) reference to undefined group" + ErrIllegalEndEscape = "illegal \\ at end of pattern" + ErrMalformedSlashP = "malformed \\p{X} character escape" + ErrIncompleteSlashP = "incomplete \\p{X} character escape" + ErrUnknownSlashP = "unknown unicode category, script, or property '%v'" + ErrUnrecognizedEscape = "unrecognized escape sequence \\%v" + ErrMissingControl = "missing control character" + ErrUnrecognizedControl = "unrecognized control character" + ErrTooFewHex = "insufficient hexadecimal digits" + ErrInvalidHex = "hex values may not be larger than 0x10FFFF" + ErrMalformedNameRef = "malformed \\k<...> named back reference" + ErrBadClassInCharRange = "cannot include class \\%v in character range" + ErrUnterminatedBracket = "unterminated [] set" + ErrSubtractionMustBeLast = "a subtraction must be the last element in a character class" + ErrReversedCharRange = "[x-y] range in reverse order" +) + +func (e ErrorCode) String() string { + return string(e) +} + +type parser struct { + stack *regexNode + group *regexNode + alternation *regexNode + concatenation *regexNode + unit *regexNode + + patternRaw string + pattern []rune + + currentPos int + specialCase *unicode.SpecialCase + + autocap int + capcount int + captop int + capsize int + + caps map[int]int + capnames map[string]int + + capnumlist []int + capnamelist []string + + options RegexOptions + optionsStack []RegexOptions + ignoreNextParen bool +} + +const ( + maxValueDiv10 int = math.MaxInt32 / 10 + maxValueMod10 = math.MaxInt32 % 10 +) + +// Parse converts a regex string into a parse tree +func Parse(re string, op RegexOptions) (*RegexTree, error) { + p := parser{ + options: op, + caps: make(map[int]int), + } + p.setPattern(re) + + if err := p.countCaptures(); err != nil { + return nil, err + } + + p.reset(op) + root, err := p.scanRegex() + + if err != nil { + return nil, err + } + tree := &RegexTree{ + root: root, + caps: p.caps, + capnumlist: p.capnumlist, + captop: p.captop, + Capnames: p.capnames, + Caplist: p.capnamelist, + options: op, + } + + if tree.options&Debug > 0 { + os.Stdout.WriteString(tree.Dump()) + } + + return tree, nil +} + +func (p *parser) setPattern(pattern string) { + p.patternRaw = pattern + p.pattern = make([]rune, 0, len(pattern)) + + //populate our rune array to handle utf8 encoding + for _, r := range pattern { + p.pattern = append(p.pattern, r) + } +} +func (p *parser) getErr(code ErrorCode, args ...interface{}) error { + return &Error{Code: code, Expr: p.patternRaw, Args: args} +} + +func (p *parser) noteCaptureSlot(i, pos int) { + if _, ok := p.caps[i]; !ok { + // the rhs of the hashtable isn't used in the parser + p.caps[i] = pos + p.capcount++ + + if p.captop <= i { + if i == math.MaxInt32 { + p.captop = i + } else { + p.captop = i + 1 + } + } + } +} + +func (p *parser) noteCaptureName(name string, pos int) { + if p.capnames == nil { + p.capnames = make(map[string]int) + } + + if _, ok := p.capnames[name]; !ok { + p.capnames[name] = pos + p.capnamelist = append(p.capnamelist, name) + } +} + +func (p *parser) assignNameSlots() { + if p.capnames != nil { + for _, name := range p.capnamelist { + for p.isCaptureSlot(p.autocap) { + p.autocap++ + } + pos := p.capnames[name] + p.capnames[name] = p.autocap + p.noteCaptureSlot(p.autocap, pos) + + p.autocap++ + } + } + + // if the caps array has at least one gap, construct the list of used slots + if p.capcount < p.captop { + p.capnumlist = make([]int, p.capcount) + i := 0 + + for k := range p.caps { + p.capnumlist[i] = k + i++ + } + + sort.Ints(p.capnumlist) + } + + // merge capsnumlist into capnamelist + if p.capnames != nil || p.capnumlist != nil { + var oldcapnamelist []string + var next int + var k int + + if p.capnames == nil { + oldcapnamelist = nil + p.capnames = make(map[string]int) + p.capnamelist = []string{} + next = -1 + } else { + oldcapnamelist = p.capnamelist + p.capnamelist = []string{} + next = p.capnames[oldcapnamelist[0]] + } + + for i := 0; i < p.capcount; i++ { + j := i + if p.capnumlist != nil { + j = p.capnumlist[i] + } + + if next == j { + p.capnamelist = append(p.capnamelist, oldcapnamelist[k]) + k++ + + if k == len(oldcapnamelist) { + next = -1 + } else { + next = p.capnames[oldcapnamelist[k]] + } + + } else { + //feature: culture? + str := strconv.Itoa(j) + p.capnamelist = append(p.capnamelist, str) + p.capnames[str] = j + } + } + } +} + +func (p *parser) consumeAutocap() int { + r := p.autocap + p.autocap++ + return r +} + +// CountCaptures is a prescanner for deducing the slots used for +// captures by doing a partial tokenization of the pattern. +func (p *parser) countCaptures() error { + var ch rune + + p.noteCaptureSlot(0, 0) + + p.autocap = 1 + + for p.charsRight() > 0 { + pos := p.textpos() + ch = p.moveRightGetChar() + switch ch { + case '\\': + if p.charsRight() > 0 { + p.scanBackslash(true) + } + + case '#': + if p.useOptionX() { + p.moveLeft() + p.scanBlank() + } + + case '[': + p.scanCharSet(false, true) + + case ')': + if !p.emptyOptionsStack() { + p.popOptions() + } + + case '(': + if p.charsRight() >= 2 && p.rightChar(1) == '#' && p.rightChar(0) == '?' { + p.moveLeft() + p.scanBlank() + } else { + p.pushOptions() + if p.charsRight() > 0 && p.rightChar(0) == '?' { + // we have (?... + p.moveRight(1) + + if p.charsRight() > 1 && (p.rightChar(0) == '<' || p.rightChar(0) == '\'') { + // named group: (?<... or (?'... + + p.moveRight(1) + ch = p.rightChar(0) + + if ch != '0' && IsWordChar(ch) { + if ch >= '1' && ch <= '9' { + dec, err := p.scanDecimal() + if err != nil { + return err + } + p.noteCaptureSlot(dec, pos) + } else { + p.noteCaptureName(p.scanCapname(), pos) + } + } + } else if p.useRE2() && p.charsRight() > 2 && (p.rightChar(0) == 'P' && p.rightChar(1) == '<') { + // RE2-compat (?P<) + p.moveRight(2) + ch = p.rightChar(0) + if IsWordChar(ch) { + p.noteCaptureName(p.scanCapname(), pos) + } + + } else { + // (?... + + // get the options if it's an option construct (?cimsx-cimsx...) + p.scanOptions() + + if p.charsRight() > 0 { + if p.rightChar(0) == ')' { + // (?cimsx-cimsx) + p.moveRight(1) + p.popKeepOptions() + } else if p.rightChar(0) == '(' { + // alternation construct: (?(foo)yes|no) + // ignore the next paren so we don't capture the condition + p.ignoreNextParen = true + + // break from here so we don't reset ignoreNextParen + continue + } + } + } + } else { + if !p.useOptionN() && !p.ignoreNextParen { + p.noteCaptureSlot(p.consumeAutocap(), pos) + } + } + } + + p.ignoreNextParen = false + + } + } + + p.assignNameSlots() + return nil +} + +func (p *parser) reset(topopts RegexOptions) { + p.currentPos = 0 + p.autocap = 1 + p.ignoreNextParen = false + + if len(p.optionsStack) > 0 { + p.optionsStack = p.optionsStack[:0] + } + + p.options = topopts + p.stack = nil +} + +func (p *parser) scanRegex() (*regexNode, error) { + ch := '@' // nonspecial ch, means at beginning + isQuant := false + + p.startGroup(newRegexNodeMN(ntCapture, p.options, 0, -1)) + + for p.charsRight() > 0 { + wasPrevQuantifier := isQuant + isQuant = false + + if err := p.scanBlank(); err != nil { + return nil, err + } + + startpos := p.textpos() + + // move past all of the normal characters. We'll stop when we hit some kind of control character, + // or if IgnorePatternWhiteSpace is on, we'll stop when we see some whitespace. + if p.useOptionX() { + for p.charsRight() > 0 { + ch = p.rightChar(0) + //UGLY: clean up, this is ugly + if !(!isStopperX(ch) || (ch == '{' && !p.isTrueQuantifier())) { + break + } + p.moveRight(1) + } + } else { + for p.charsRight() > 0 { + ch = p.rightChar(0) + if !(!isSpecial(ch) || ch == '{' && !p.isTrueQuantifier()) { + break + } + p.moveRight(1) + } + } + + endpos := p.textpos() + + p.scanBlank() + + if p.charsRight() == 0 { + ch = '!' // nonspecial, means at end + } else if ch = p.rightChar(0); isSpecial(ch) { + isQuant = isQuantifier(ch) + p.moveRight(1) + } else { + ch = ' ' // nonspecial, means at ordinary char + } + + if startpos < endpos { + cchUnquantified := endpos - startpos + if isQuant { + cchUnquantified-- + } + wasPrevQuantifier = false + + if cchUnquantified > 0 { + p.addToConcatenate(startpos, cchUnquantified, false) + } + + if isQuant { + p.addUnitOne(p.charAt(endpos - 1)) + } + } + + switch ch { + case '!': + goto BreakOuterScan + + case ' ': + goto ContinueOuterScan + + case '[': + cc, err := p.scanCharSet(p.useOptionI(), false) + if err != nil { + return nil, err + } + p.addUnitSet(cc) + + case '(': + p.pushOptions() + + if grouper, err := p.scanGroupOpen(); err != nil { + return nil, err + } else if grouper == nil { + p.popKeepOptions() + } else { + p.pushGroup() + p.startGroup(grouper) + } + + continue + + case '|': + p.addAlternate() + goto ContinueOuterScan + + case ')': + if p.emptyStack() { + return nil, p.getErr(ErrUnexpectedParen) + } + + if err := p.addGroup(); err != nil { + return nil, err + } + if err := p.popGroup(); err != nil { + return nil, err + } + p.popOptions() + + if p.unit == nil { + goto ContinueOuterScan + } + + case '\\': + n, err := p.scanBackslash(false) + if err != nil { + return nil, err + } + p.addUnitNode(n) + + case '^': + if p.useOptionM() { + p.addUnitType(ntBol) + } else { + p.addUnitType(ntBeginning) + } + + case '$': + if p.useOptionM() { + p.addUnitType(ntEol) + } else { + p.addUnitType(ntEndZ) + } + + case '.': + if p.useOptionE() { + p.addUnitSet(ECMAAnyClass()) + } else if p.useOptionS() { + p.addUnitSet(AnyClass()) + } else { + p.addUnitNotone('\n') + } + + case '{', '*', '+', '?': + if p.unit == nil { + if wasPrevQuantifier { + return nil, p.getErr(ErrInvalidRepeatOp) + } else { + return nil, p.getErr(ErrMissingRepeatArgument) + } + } + p.moveLeft() + + default: + return nil, p.getErr(ErrInternalError) + } + + if err := p.scanBlank(); err != nil { + return nil, err + } + + if p.charsRight() > 0 { + isQuant = p.isTrueQuantifier() + } + if p.charsRight() == 0 || !isQuant { + //maintain odd C# assignment order -- not sure if required, could clean up? + p.addConcatenate() + goto ContinueOuterScan + } + + ch = p.moveRightGetChar() + + // Handle quantifiers + for p.unit != nil { + var min, max int + var lazy bool + + switch ch { + case '*': + min = 0 + max = math.MaxInt32 + + case '?': + min = 0 + max = 1 + + case '+': + min = 1 + max = math.MaxInt32 + + case '{': + { + var err error + startpos = p.textpos() + if min, err = p.scanDecimal(); err != nil { + return nil, err + } + max = min + if startpos < p.textpos() { + if p.charsRight() > 0 && p.rightChar(0) == ',' { + p.moveRight(1) + if p.charsRight() == 0 || p.rightChar(0) == '}' { + max = math.MaxInt32 + } else { + if max, err = p.scanDecimal(); err != nil { + return nil, err + } + } + } + } + + if startpos == p.textpos() || p.charsRight() == 0 || p.moveRightGetChar() != '}' { + p.addConcatenate() + p.textto(startpos - 1) + goto ContinueOuterScan + } + } + + default: + return nil, p.getErr(ErrInternalError) + } + + if err := p.scanBlank(); err != nil { + return nil, err + } + + if p.charsRight() == 0 || p.rightChar(0) != '?' { + lazy = false + } else { + p.moveRight(1) + lazy = true + } + + if min > max { + return nil, p.getErr(ErrInvalidRepeatSize) + } + + p.addConcatenate3(lazy, min, max) + } + + ContinueOuterScan: + } + +BreakOuterScan: + ; + + if !p.emptyStack() { + return nil, p.getErr(ErrMissingParen) + } + + if err := p.addGroup(); err != nil { + return nil, err + } + + return p.unit, nil + +} + +/* + * Simple parsing for replacement patterns + */ +func (p *parser) scanReplacement() (*regexNode, error) { + var c, startpos int + + p.concatenation = newRegexNode(ntConcatenate, p.options) + + for { + c = p.charsRight() + if c == 0 { + break + } + + startpos = p.textpos() + + for c > 0 && p.rightChar(0) != '$' { + p.moveRight(1) + c-- + } + + p.addToConcatenate(startpos, p.textpos()-startpos, true) + + if c > 0 { + if p.moveRightGetChar() == '$' { + n, err := p.scanDollar() + if err != nil { + return nil, err + } + p.addUnitNode(n) + } + p.addConcatenate() + } + } + + return p.concatenation, nil +} + +/* + * Scans $ patterns recognized within replacement patterns + */ +func (p *parser) scanDollar() (*regexNode, error) { + if p.charsRight() == 0 { + return newRegexNodeCh(ntOne, p.options, '$'), nil + } + + ch := p.rightChar(0) + angled := false + backpos := p.textpos() + lastEndPos := backpos + + // Note angle + + if ch == '{' && p.charsRight() > 1 { + angled = true + p.moveRight(1) + ch = p.rightChar(0) + } + + // Try to parse backreference: \1 or \{1} or \{cap} + + if ch >= '0' && ch <= '9' { + if !angled && p.useOptionE() { + capnum := -1 + newcapnum := int(ch - '0') + p.moveRight(1) + if p.isCaptureSlot(newcapnum) { + capnum = newcapnum + lastEndPos = p.textpos() + } + + for p.charsRight() > 0 { + ch = p.rightChar(0) + if ch < '0' || ch > '9' { + break + } + digit := int(ch - '0') + if newcapnum > maxValueDiv10 || (newcapnum == maxValueDiv10 && digit > maxValueMod10) { + return nil, p.getErr(ErrCaptureGroupOutOfRange) + } + + newcapnum = newcapnum*10 + digit + + p.moveRight(1) + if p.isCaptureSlot(newcapnum) { + capnum = newcapnum + lastEndPos = p.textpos() + } + } + p.textto(lastEndPos) + if capnum >= 0 { + return newRegexNodeM(ntRef, p.options, capnum), nil + } + } else { + capnum, err := p.scanDecimal() + if err != nil { + return nil, err + } + if !angled || p.charsRight() > 0 && p.moveRightGetChar() == '}' { + if p.isCaptureSlot(capnum) { + return newRegexNodeM(ntRef, p.options, capnum), nil + } + } + } + } else if angled && IsWordChar(ch) { + capname := p.scanCapname() + + if p.charsRight() > 0 && p.moveRightGetChar() == '}' { + if p.isCaptureName(capname) { + return newRegexNodeM(ntRef, p.options, p.captureSlotFromName(capname)), nil + } + } + } else if !angled { + capnum := 1 + + switch ch { + case '$': + p.moveRight(1) + return newRegexNodeCh(ntOne, p.options, '$'), nil + case '&': + capnum = 0 + case '`': + capnum = replaceLeftPortion + case '\'': + capnum = replaceRightPortion + case '+': + capnum = replaceLastGroup + case '_': + capnum = replaceWholeString + } + + if capnum != 1 { + p.moveRight(1) + return newRegexNodeM(ntRef, p.options, capnum), nil + } + } + + // unrecognized $: literalize + + p.textto(backpos) + return newRegexNodeCh(ntOne, p.options, '$'), nil +} + +// scanGroupOpen scans chars following a '(' (not counting the '('), and returns +// a RegexNode for the type of group scanned, or nil if the group +// simply changed options (?cimsx-cimsx) or was a comment (#...). +func (p *parser) scanGroupOpen() (*regexNode, error) { + var ch rune + var nt nodeType + var err error + close := '>' + start := p.textpos() + + // just return a RegexNode if we have: + // 1. "(" followed by nothing + // 2. "(x" where x != ? + // 3. "(?)" + if p.charsRight() == 0 || p.rightChar(0) != '?' || (p.rightChar(0) == '?' && (p.charsRight() > 1 && p.rightChar(1) == ')')) { + if p.useOptionN() || p.ignoreNextParen { + p.ignoreNextParen = false + return newRegexNode(ntGroup, p.options), nil + } + return newRegexNodeMN(ntCapture, p.options, p.consumeAutocap(), -1), nil + } + + p.moveRight(1) + + for { + if p.charsRight() == 0 { + break + } + + switch ch = p.moveRightGetChar(); ch { + case ':': + nt = ntGroup + + case '=': + p.options &= ^RightToLeft + nt = ntRequire + + case '!': + p.options &= ^RightToLeft + nt = ntPrevent + + case '>': + nt = ntGreedy + + case '\'': + close = '\'' + fallthrough + + case '<': + if p.charsRight() == 0 { + goto BreakRecognize + } + + switch ch = p.moveRightGetChar(); ch { + case '=': + if close == '\'' { + goto BreakRecognize + } + + p.options |= RightToLeft + nt = ntRequire + + case '!': + if close == '\'' { + goto BreakRecognize + } + + p.options |= RightToLeft + nt = ntPrevent + + default: + p.moveLeft() + capnum := -1 + uncapnum := -1 + proceed := false + + // grab part before - + + if ch >= '0' && ch <= '9' { + if capnum, err = p.scanDecimal(); err != nil { + return nil, err + } + + if !p.isCaptureSlot(capnum) { + capnum = -1 + } + + // check if we have bogus characters after the number + if p.charsRight() > 0 && !(p.rightChar(0) == close || p.rightChar(0) == '-') { + return nil, p.getErr(ErrInvalidGroupName) + } + if capnum == 0 { + return nil, p.getErr(ErrCapNumNotZero) + } + } else if IsWordChar(ch) { + capname := p.scanCapname() + + if p.isCaptureName(capname) { + capnum = p.captureSlotFromName(capname) + } + + // check if we have bogus character after the name + if p.charsRight() > 0 && !(p.rightChar(0) == close || p.rightChar(0) == '-') { + return nil, p.getErr(ErrInvalidGroupName) + } + } else if ch == '-' { + proceed = true + } else { + // bad group name - starts with something other than a word character and isn't a number + return nil, p.getErr(ErrInvalidGroupName) + } + + // grab part after - if any + + if (capnum != -1 || proceed == true) && p.charsRight() > 0 && p.rightChar(0) == '-' { + p.moveRight(1) + + //no more chars left, no closing char, etc + if p.charsRight() == 0 { + return nil, p.getErr(ErrInvalidGroupName) + } + + ch = p.rightChar(0) + if ch >= '0' && ch <= '9' { + if uncapnum, err = p.scanDecimal(); err != nil { + return nil, err + } + + if !p.isCaptureSlot(uncapnum) { + return nil, p.getErr(ErrUndefinedBackRef, uncapnum) + } + + // check if we have bogus characters after the number + if p.charsRight() > 0 && p.rightChar(0) != close { + return nil, p.getErr(ErrInvalidGroupName) + } + } else if IsWordChar(ch) { + uncapname := p.scanCapname() + + if !p.isCaptureName(uncapname) { + return nil, p.getErr(ErrUndefinedNameRef, uncapname) + } + uncapnum = p.captureSlotFromName(uncapname) + + // check if we have bogus character after the name + if p.charsRight() > 0 && p.rightChar(0) != close { + return nil, p.getErr(ErrInvalidGroupName) + } + } else { + // bad group name - starts with something other than a word character and isn't a number + return nil, p.getErr(ErrInvalidGroupName) + } + } + + // actually make the node + + if (capnum != -1 || uncapnum != -1) && p.charsRight() > 0 && p.moveRightGetChar() == close { + return newRegexNodeMN(ntCapture, p.options, capnum, uncapnum), nil + } + goto BreakRecognize + } + + case '(': + // alternation construct (?(...) | ) + + parenPos := p.textpos() + if p.charsRight() > 0 { + ch = p.rightChar(0) + + // check if the alternation condition is a backref + if ch >= '0' && ch <= '9' { + var capnum int + if capnum, err = p.scanDecimal(); err != nil { + return nil, err + } + if p.charsRight() > 0 && p.moveRightGetChar() == ')' { + if p.isCaptureSlot(capnum) { + return newRegexNodeM(ntTestref, p.options, capnum), nil + } + return nil, p.getErr(ErrUndefinedReference, capnum) + } + + return nil, p.getErr(ErrMalformedReference, capnum) + + } else if IsWordChar(ch) { + capname := p.scanCapname() + + if p.isCaptureName(capname) && p.charsRight() > 0 && p.moveRightGetChar() == ')' { + return newRegexNodeM(ntTestref, p.options, p.captureSlotFromName(capname)), nil + } + } + } + // not a backref + nt = ntTestgroup + p.textto(parenPos - 1) // jump to the start of the parentheses + p.ignoreNextParen = true // but make sure we don't try to capture the insides + + charsRight := p.charsRight() + if charsRight >= 3 && p.rightChar(1) == '?' { + rightchar2 := p.rightChar(2) + // disallow comments in the condition + if rightchar2 == '#' { + return nil, p.getErr(ErrAlternationCantHaveComment) + } + + // disallow named capture group (?<..>..) in the condition + if rightchar2 == '\'' { + return nil, p.getErr(ErrAlternationCantCapture) + } + + if charsRight >= 4 && (rightchar2 == '<' && p.rightChar(3) != '!' && p.rightChar(3) != '=') { + return nil, p.getErr(ErrAlternationCantCapture) + } + } + + case 'P': + if p.useRE2() { + // support for P syntax + if p.charsRight() < 3 { + goto BreakRecognize + } + + ch = p.moveRightGetChar() + if ch != '<' { + goto BreakRecognize + } + + ch = p.moveRightGetChar() + p.moveLeft() + + if IsWordChar(ch) { + capnum := -1 + capname := p.scanCapname() + + if p.isCaptureName(capname) { + capnum = p.captureSlotFromName(capname) + } + + // check if we have bogus character after the name + if p.charsRight() > 0 && p.rightChar(0) != '>' { + return nil, p.getErr(ErrInvalidGroupName) + } + + // actually make the node + + if capnum != -1 && p.charsRight() > 0 && p.moveRightGetChar() == '>' { + return newRegexNodeMN(ntCapture, p.options, capnum, -1), nil + } + goto BreakRecognize + + } else { + // bad group name - starts with something other than a word character and isn't a number + return nil, p.getErr(ErrInvalidGroupName) + } + } + // if we're not using RE2 compat mode then + // we just behave like normal + fallthrough + + default: + p.moveLeft() + + nt = ntGroup + // disallow options in the children of a testgroup node + if p.group.t != ntTestgroup { + p.scanOptions() + } + if p.charsRight() == 0 { + goto BreakRecognize + } + + if ch = p.moveRightGetChar(); ch == ')' { + return nil, nil + } + + if ch != ':' { + goto BreakRecognize + } + + } + + return newRegexNode(nt, p.options), nil + } + +BreakRecognize: + + // break Recognize comes here + + return nil, p.getErr(ErrUnrecognizedGrouping, string(p.pattern[start:p.textpos()])) +} + +// scans backslash specials and basics +func (p *parser) scanBackslash(scanOnly bool) (*regexNode, error) { + + if p.charsRight() == 0 { + return nil, p.getErr(ErrIllegalEndEscape) + } + + switch ch := p.rightChar(0); ch { + case 'b', 'B', 'A', 'G', 'Z', 'z': + p.moveRight(1) + return newRegexNode(p.typeFromCode(ch), p.options), nil + + case 'w': + p.moveRight(1) + if p.useOptionE() { + return newRegexNodeSet(ntSet, p.options, ECMAWordClass()), nil + } + return newRegexNodeSet(ntSet, p.options, WordClass()), nil + + case 'W': + p.moveRight(1) + if p.useOptionE() { + return newRegexNodeSet(ntSet, p.options, NotECMAWordClass()), nil + } + return newRegexNodeSet(ntSet, p.options, NotWordClass()), nil + + case 's': + p.moveRight(1) + if p.useOptionE() { + return newRegexNodeSet(ntSet, p.options, ECMASpaceClass()), nil + } + return newRegexNodeSet(ntSet, p.options, SpaceClass()), nil + + case 'S': + p.moveRight(1) + if p.useOptionE() { + return newRegexNodeSet(ntSet, p.options, NotECMASpaceClass()), nil + } + return newRegexNodeSet(ntSet, p.options, NotSpaceClass()), nil + + case 'd': + p.moveRight(1) + if p.useOptionE() { + return newRegexNodeSet(ntSet, p.options, ECMADigitClass()), nil + } + return newRegexNodeSet(ntSet, p.options, DigitClass()), nil + + case 'D': + p.moveRight(1) + if p.useOptionE() { + return newRegexNodeSet(ntSet, p.options, NotECMADigitClass()), nil + } + return newRegexNodeSet(ntSet, p.options, NotDigitClass()), nil + + case 'p', 'P': + p.moveRight(1) + prop, err := p.parseProperty() + if err != nil { + return nil, err + } + cc := &CharSet{} + cc.addCategory(prop, (ch != 'p'), p.useOptionI(), p.patternRaw) + if p.useOptionI() { + cc.addLowercase() + } + + return newRegexNodeSet(ntSet, p.options, cc), nil + + default: + return p.scanBasicBackslash(scanOnly) + } +} + +// Scans \-style backreferences and character escapes +func (p *parser) scanBasicBackslash(scanOnly bool) (*regexNode, error) { + if p.charsRight() == 0 { + return nil, p.getErr(ErrIllegalEndEscape) + } + angled := false + k := false + close := '\x00' + + backpos := p.textpos() + ch := p.rightChar(0) + + // Allow \k instead of \, which is now deprecated. + + // According to ECMAScript specification, \k is only parsed as a named group reference if + // there is at least one group name in the regexp. + // See https://www.ecma-international.org/ecma-262/#sec-isvalidregularexpressionliteral, step 7. + // Note, during the first (scanOnly) run we may not have all group names scanned, but that's ok. + if ch == 'k' && (!p.useOptionE() || len(p.capnames) > 0) { + if p.charsRight() >= 2 { + p.moveRight(1) + ch = p.moveRightGetChar() + + if ch == '<' || (!p.useOptionE() && ch == '\'') { // No support for \k'name' in ECMAScript + angled = true + if ch == '\'' { + close = '\'' + } else { + close = '>' + } + } + } + + if !angled || p.charsRight() <= 0 { + return nil, p.getErr(ErrMalformedNameRef) + } + + ch = p.rightChar(0) + k = true + + } else if !p.useOptionE() && (ch == '<' || ch == '\'') && p.charsRight() > 1 { // Note angle without \g + angled = true + if ch == '\'' { + close = '\'' + } else { + close = '>' + } + + p.moveRight(1) + ch = p.rightChar(0) + } + + // Try to parse backreference: \<1> or \ + + if angled && ch >= '0' && ch <= '9' { + capnum, err := p.scanDecimal() + if err != nil { + return nil, err + } + + if p.charsRight() > 0 && p.moveRightGetChar() == close { + if p.isCaptureSlot(capnum) { + return newRegexNodeM(ntRef, p.options, capnum), nil + } + return nil, p.getErr(ErrUndefinedBackRef, capnum) + } + } else if !angled && ch >= '1' && ch <= '9' { // Try to parse backreference or octal: \1 + capnum, err := p.scanDecimal() + if err != nil { + return nil, err + } + + if scanOnly { + return nil, nil + } + + if p.isCaptureSlot(capnum) { + return newRegexNodeM(ntRef, p.options, capnum), nil + } + if capnum <= 9 && !p.useOptionE() { + return nil, p.getErr(ErrUndefinedBackRef, capnum) + } + + } else if angled { + capname := p.scanCapname() + + if capname != "" && p.charsRight() > 0 && p.moveRightGetChar() == close { + + if scanOnly { + return nil, nil + } + + if p.isCaptureName(capname) { + return newRegexNodeM(ntRef, p.options, p.captureSlotFromName(capname)), nil + } + return nil, p.getErr(ErrUndefinedNameRef, capname) + } else { + if k { + return nil, p.getErr(ErrMalformedNameRef) + } + } + } + + // Not backreference: must be char code + + p.textto(backpos) + ch, err := p.scanCharEscape() + if err != nil { + return nil, err + } + + if scanOnly { + return nil, nil + } + + if p.useOptionI() { + ch = unicode.ToLower(ch) + } + + return newRegexNodeCh(ntOne, p.options, ch), nil +} + +// Scans X for \p{X} or \P{X} +func (p *parser) parseProperty() (string, error) { + if p.charsRight() < 3 { + return "", p.getErr(ErrIncompleteSlashP) + } + ch := p.moveRightGetChar() + if ch != '{' { + return "", p.getErr(ErrMalformedSlashP) + } + + startpos := p.textpos() + for p.charsRight() > 0 { + ch = p.moveRightGetChar() + if !(IsWordChar(ch) || ch == '-') { + p.moveLeft() + break + } + } + capname := string(p.pattern[startpos:p.textpos()]) + + if p.charsRight() == 0 || p.moveRightGetChar() != '}' { + return "", p.getErr(ErrIncompleteSlashP) + } + + if !isValidUnicodeCat(capname) { + return "", p.getErr(ErrUnknownSlashP, capname) + } + + return capname, nil +} + +// Returns ReNode type for zero-length assertions with a \ code. +func (p *parser) typeFromCode(ch rune) nodeType { + switch ch { + case 'b': + if p.useOptionE() { + return ntECMABoundary + } + return ntBoundary + case 'B': + if p.useOptionE() { + return ntNonECMABoundary + } + return ntNonboundary + case 'A': + return ntBeginning + case 'G': + return ntStart + case 'Z': + return ntEndZ + case 'z': + return ntEnd + default: + return ntNothing + } +} + +// Scans whitespace or x-mode comments. +func (p *parser) scanBlank() error { + if p.useOptionX() { + for { + for p.charsRight() > 0 && isSpace(p.rightChar(0)) { + p.moveRight(1) + } + + if p.charsRight() == 0 { + break + } + + if p.rightChar(0) == '#' { + for p.charsRight() > 0 && p.rightChar(0) != '\n' { + p.moveRight(1) + } + } else if p.charsRight() >= 3 && p.rightChar(2) == '#' && + p.rightChar(1) == '?' && p.rightChar(0) == '(' { + for p.charsRight() > 0 && p.rightChar(0) != ')' { + p.moveRight(1) + } + if p.charsRight() == 0 { + return p.getErr(ErrUnterminatedComment) + } + p.moveRight(1) + } else { + break + } + } + } else { + for { + if p.charsRight() < 3 || p.rightChar(2) != '#' || + p.rightChar(1) != '?' || p.rightChar(0) != '(' { + return nil + } + + for p.charsRight() > 0 && p.rightChar(0) != ')' { + p.moveRight(1) + } + if p.charsRight() == 0 { + return p.getErr(ErrUnterminatedComment) + } + p.moveRight(1) + } + } + return nil +} + +func (p *parser) scanCapname() string { + startpos := p.textpos() + + for p.charsRight() > 0 { + if !IsWordChar(p.moveRightGetChar()) { + p.moveLeft() + break + } + } + + return string(p.pattern[startpos:p.textpos()]) +} + +//Scans contents of [] (not including []'s), and converts to a set. +func (p *parser) scanCharSet(caseInsensitive, scanOnly bool) (*CharSet, error) { + ch := '\x00' + chPrev := '\x00' + inRange := false + firstChar := true + closed := false + + var cc *CharSet + if !scanOnly { + cc = &CharSet{} + } + + if p.charsRight() > 0 && p.rightChar(0) == '^' { + p.moveRight(1) + if !scanOnly { + cc.negate = true + } + } + + for ; p.charsRight() > 0; firstChar = false { + fTranslatedChar := false + ch = p.moveRightGetChar() + if ch == ']' { + if !firstChar { + closed = true + break + } else if p.useOptionE() { + if !scanOnly { + cc.addRanges(NoneClass().ranges) + } + closed = true + break + } + + } else if ch == '\\' && p.charsRight() > 0 { + switch ch = p.moveRightGetChar(); ch { + case 'D', 'd': + if !scanOnly { + if inRange { + return nil, p.getErr(ErrBadClassInCharRange, ch) + } + cc.addDigit(p.useOptionE(), ch == 'D', p.patternRaw) + } + continue + + case 'S', 's': + if !scanOnly { + if inRange { + return nil, p.getErr(ErrBadClassInCharRange, ch) + } + cc.addSpace(p.useOptionE(), ch == 'S') + } + continue + + case 'W', 'w': + if !scanOnly { + if inRange { + return nil, p.getErr(ErrBadClassInCharRange, ch) + } + + cc.addWord(p.useOptionE(), ch == 'W') + } + continue + + case 'p', 'P': + if !scanOnly { + if inRange { + return nil, p.getErr(ErrBadClassInCharRange, ch) + } + prop, err := p.parseProperty() + if err != nil { + return nil, err + } + cc.addCategory(prop, (ch != 'p'), caseInsensitive, p.patternRaw) + } else { + p.parseProperty() + } + + continue + + case '-': + if !scanOnly { + cc.addRange(ch, ch) + } + continue + + default: + p.moveLeft() + var err error + ch, err = p.scanCharEscape() // non-literal character + if err != nil { + return nil, err + } + fTranslatedChar = true + break // this break will only break out of the switch + } + } else if ch == '[' { + // This is code for Posix style properties - [:Ll:] or [:IsTibetan:]. + // It currently doesn't do anything other than skip the whole thing! + if p.charsRight() > 0 && p.rightChar(0) == ':' && !inRange { + savePos := p.textpos() + + p.moveRight(1) + negate := false + if p.charsRight() > 1 && p.rightChar(0) == '^' { + negate = true + p.moveRight(1) + } + + nm := p.scanCapname() // snag the name + if !scanOnly && p.useRE2() { + // look up the name since these are valid for RE2 + // add the group based on the name + if ok := cc.addNamedASCII(nm, negate); !ok { + return nil, p.getErr(ErrInvalidCharRange) + } + } + if p.charsRight() < 2 || p.moveRightGetChar() != ':' || p.moveRightGetChar() != ']' { + p.textto(savePos) + } else if p.useRE2() { + // move on + continue + } + } + } + + if inRange { + inRange = false + if !scanOnly { + if ch == '[' && !fTranslatedChar && !firstChar { + // We thought we were in a range, but we're actually starting a subtraction. + // In that case, we'll add chPrev to our char class, skip the opening [, and + // scan the new character class recursively. + cc.addChar(chPrev) + sub, err := p.scanCharSet(caseInsensitive, false) + if err != nil { + return nil, err + } + cc.addSubtraction(sub) + + if p.charsRight() > 0 && p.rightChar(0) != ']' { + return nil, p.getErr(ErrSubtractionMustBeLast) + } + } else { + // a regular range, like a-z + if chPrev > ch { + return nil, p.getErr(ErrReversedCharRange) + } + cc.addRange(chPrev, ch) + } + } + } else if p.charsRight() >= 2 && p.rightChar(0) == '-' && p.rightChar(1) != ']' { + // this could be the start of a range + chPrev = ch + inRange = true + p.moveRight(1) + } else if p.charsRight() >= 1 && ch == '-' && !fTranslatedChar && p.rightChar(0) == '[' && !firstChar { + // we aren't in a range, and now there is a subtraction. Usually this happens + // only when a subtraction follows a range, like [a-z-[b]] + if !scanOnly { + p.moveRight(1) + sub, err := p.scanCharSet(caseInsensitive, false) + if err != nil { + return nil, err + } + cc.addSubtraction(sub) + + if p.charsRight() > 0 && p.rightChar(0) != ']' { + return nil, p.getErr(ErrSubtractionMustBeLast) + } + } else { + p.moveRight(1) + p.scanCharSet(caseInsensitive, true) + } + } else { + if !scanOnly { + cc.addRange(ch, ch) + } + } + } + + if !closed { + return nil, p.getErr(ErrUnterminatedBracket) + } + + if !scanOnly && caseInsensitive { + cc.addLowercase() + } + + return cc, nil +} + +// Scans any number of decimal digits (pegs value at 2^31-1 if too large) +func (p *parser) scanDecimal() (int, error) { + i := 0 + var d int + + for p.charsRight() > 0 { + d = int(p.rightChar(0) - '0') + if d < 0 || d > 9 { + break + } + p.moveRight(1) + + if i > maxValueDiv10 || (i == maxValueDiv10 && d > maxValueMod10) { + return 0, p.getErr(ErrCaptureGroupOutOfRange) + } + + i *= 10 + i += d + } + + return int(i), nil +} + +// Returns true for options allowed only at the top level +func isOnlyTopOption(option RegexOptions) bool { + return option == RightToLeft || option == ECMAScript || option == RE2 +} + +// Scans cimsx-cimsx option string, stops at the first unrecognized char. +func (p *parser) scanOptions() { + + for off := false; p.charsRight() > 0; p.moveRight(1) { + ch := p.rightChar(0) + + if ch == '-' { + off = true + } else if ch == '+' { + off = false + } else { + option := optionFromCode(ch) + if option == 0 || isOnlyTopOption(option) { + return + } + + if off { + p.options &= ^option + } else { + p.options |= option + } + } + } +} + +// Scans \ code for escape codes that map to single unicode chars. +func (p *parser) scanCharEscape() (r rune, err error) { + + ch := p.moveRightGetChar() + + if ch >= '0' && ch <= '7' { + p.moveLeft() + return p.scanOctal(), nil + } + + pos := p.textpos() + + switch ch { + case 'x': + // support for \x{HEX} syntax from Perl and PCRE + if p.charsRight() > 0 && p.rightChar(0) == '{' { + if p.useOptionE() { + return ch, nil + } + p.moveRight(1) + return p.scanHexUntilBrace() + } else { + r, err = p.scanHex(2) + } + case 'u': + r, err = p.scanHex(4) + case 'a': + return '\u0007', nil + case 'b': + return '\b', nil + case 'e': + return '\u001B', nil + case 'f': + return '\f', nil + case 'n': + return '\n', nil + case 'r': + return '\r', nil + case 't': + return '\t', nil + case 'v': + return '\u000B', nil + case 'c': + r, err = p.scanControl() + default: + if !p.useOptionE() && IsWordChar(ch) { + return 0, p.getErr(ErrUnrecognizedEscape, string(ch)) + } + return ch, nil + } + if err != nil && p.useOptionE() { + p.textto(pos) + return ch, nil + } + return +} + +// Grabs and converts an ascii control character +func (p *parser) scanControl() (rune, error) { + if p.charsRight() <= 0 { + return 0, p.getErr(ErrMissingControl) + } + + ch := p.moveRightGetChar() + + // \ca interpreted as \cA + + if ch >= 'a' && ch <= 'z' { + ch = (ch - ('a' - 'A')) + } + ch = (ch - '@') + if ch >= 0 && ch < ' ' { + return ch, nil + } + + return 0, p.getErr(ErrUnrecognizedControl) + +} + +// Scan hex digits until we hit a closing brace. +// Non-hex digits, hex value too large for UTF-8, or running out of chars are errors +func (p *parser) scanHexUntilBrace() (rune, error) { + // PCRE spec reads like unlimited hex digits are allowed, but unicode has a limit + // so we can enforce that + i := 0 + hasContent := false + + for p.charsRight() > 0 { + ch := p.moveRightGetChar() + if ch == '}' { + // hit our close brace, we're done here + // prevent \x{} + if !hasContent { + return 0, p.getErr(ErrTooFewHex) + } + return rune(i), nil + } + hasContent = true + // no brace needs to be hex digit + d := hexDigit(ch) + if d < 0 { + return 0, p.getErr(ErrMissingBrace) + } + + i *= 0x10 + i += d + + if i > unicode.MaxRune { + return 0, p.getErr(ErrInvalidHex) + } + } + + // we only make it here if we run out of digits without finding the brace + return 0, p.getErr(ErrMissingBrace) +} + +// Scans exactly c hex digits (c=2 for \xFF, c=4 for \uFFFF) +func (p *parser) scanHex(c int) (rune, error) { + + i := 0 + + if p.charsRight() >= c { + for c > 0 { + d := hexDigit(p.moveRightGetChar()) + if d < 0 { + break + } + i *= 0x10 + i += d + c-- + } + } + + if c > 0 { + return 0, p.getErr(ErrTooFewHex) + } + + return rune(i), nil +} + +// Returns n <= 0xF for a hex digit. +func hexDigit(ch rune) int { + + if d := uint(ch - '0'); d <= 9 { + return int(d) + } + + if d := uint(ch - 'a'); d <= 5 { + return int(d + 0xa) + } + + if d := uint(ch - 'A'); d <= 5 { + return int(d + 0xa) + } + + return -1 +} + +// Scans up to three octal digits (stops before exceeding 0377). +func (p *parser) scanOctal() rune { + // Consume octal chars only up to 3 digits and value 0377 + + c := 3 + + if c > p.charsRight() { + c = p.charsRight() + } + + //we know the first char is good because the caller had to check + i := 0 + d := int(p.rightChar(0) - '0') + for c > 0 && d <= 7 && d >= 0 { + if i >= 0x20 && p.useOptionE() { + break + } + i *= 8 + i += d + c-- + + p.moveRight(1) + if !p.rightMost() { + d = int(p.rightChar(0) - '0') + } + } + + // Octal codes only go up to 255. Any larger and the behavior that Perl follows + // is simply to truncate the high bits. + i &= 0xFF + + return rune(i) +} + +// Returns the current parsing position. +func (p *parser) textpos() int { + return p.currentPos +} + +// Zaps to a specific parsing position. +func (p *parser) textto(pos int) { + p.currentPos = pos +} + +// Returns the char at the right of the current parsing position and advances to the right. +func (p *parser) moveRightGetChar() rune { + ch := p.pattern[p.currentPos] + p.currentPos++ + return ch +} + +// Moves the current position to the right. +func (p *parser) moveRight(i int) { + // default would be 1 + p.currentPos += i +} + +// Moves the current parsing position one to the left. +func (p *parser) moveLeft() { + p.currentPos-- +} + +// Returns the char left of the current parsing position. +func (p *parser) charAt(i int) rune { + return p.pattern[i] +} + +// Returns the char i chars right of the current parsing position. +func (p *parser) rightChar(i int) rune { + // default would be 0 + return p.pattern[p.currentPos+i] +} + +// Number of characters to the right of the current parsing position. +func (p *parser) charsRight() int { + return len(p.pattern) - p.currentPos +} + +func (p *parser) rightMost() bool { + return p.currentPos == len(p.pattern) +} + +// Looks up the slot number for a given name +func (p *parser) captureSlotFromName(capname string) int { + return p.capnames[capname] +} + +// True if the capture slot was noted +func (p *parser) isCaptureSlot(i int) bool { + if p.caps != nil { + _, ok := p.caps[i] + return ok + } + + return (i >= 0 && i < p.capsize) +} + +// Looks up the slot number for a given name +func (p *parser) isCaptureName(capname string) bool { + if p.capnames == nil { + return false + } + + _, ok := p.capnames[capname] + return ok +} + +// option shortcuts + +// True if N option disabling '(' autocapture is on. +func (p *parser) useOptionN() bool { + return (p.options & ExplicitCapture) != 0 +} + +// True if I option enabling case-insensitivity is on. +func (p *parser) useOptionI() bool { + return (p.options & IgnoreCase) != 0 +} + +// True if M option altering meaning of $ and ^ is on. +func (p *parser) useOptionM() bool { + return (p.options & Multiline) != 0 +} + +// True if S option altering meaning of . is on. +func (p *parser) useOptionS() bool { + return (p.options & Singleline) != 0 +} + +// True if X option enabling whitespace/comment mode is on. +func (p *parser) useOptionX() bool { + return (p.options & IgnorePatternWhitespace) != 0 +} + +// True if E option enabling ECMAScript behavior on. +func (p *parser) useOptionE() bool { + return (p.options & ECMAScript) != 0 +} + +// true to use RE2 compatibility parsing behavior. +func (p *parser) useRE2() bool { + return (p.options & RE2) != 0 +} + +// True if options stack is empty. +func (p *parser) emptyOptionsStack() bool { + return len(p.optionsStack) == 0 +} + +// Finish the current quantifiable (when a quantifier is not found or is not possible) +func (p *parser) addConcatenate() { + // The first (| inside a Testgroup group goes directly to the group + p.concatenation.addChild(p.unit) + p.unit = nil +} + +// Finish the current quantifiable (when a quantifier is found) +func (p *parser) addConcatenate3(lazy bool, min, max int) { + p.concatenation.addChild(p.unit.makeQuantifier(lazy, min, max)) + p.unit = nil +} + +// Sets the current unit to a single char node +func (p *parser) addUnitOne(ch rune) { + if p.useOptionI() { + ch = unicode.ToLower(ch) + } + + p.unit = newRegexNodeCh(ntOne, p.options, ch) +} + +// Sets the current unit to a single inverse-char node +func (p *parser) addUnitNotone(ch rune) { + if p.useOptionI() { + ch = unicode.ToLower(ch) + } + + p.unit = newRegexNodeCh(ntNotone, p.options, ch) +} + +// Sets the current unit to a single set node +func (p *parser) addUnitSet(set *CharSet) { + p.unit = newRegexNodeSet(ntSet, p.options, set) +} + +// Sets the current unit to a subtree +func (p *parser) addUnitNode(node *regexNode) { + p.unit = node +} + +// Sets the current unit to an assertion of the specified type +func (p *parser) addUnitType(t nodeType) { + p.unit = newRegexNode(t, p.options) +} + +// Finish the current group (in response to a ')' or end) +func (p *parser) addGroup() error { + if p.group.t == ntTestgroup || p.group.t == ntTestref { + p.group.addChild(p.concatenation.reverseLeft()) + if (p.group.t == ntTestref && len(p.group.children) > 2) || len(p.group.children) > 3 { + return p.getErr(ErrTooManyAlternates) + } + } else { + p.alternation.addChild(p.concatenation.reverseLeft()) + p.group.addChild(p.alternation) + } + + p.unit = p.group + return nil +} + +// Pops the option stack, but keeps the current options unchanged. +func (p *parser) popKeepOptions() { + lastIdx := len(p.optionsStack) - 1 + p.optionsStack = p.optionsStack[:lastIdx] +} + +// Recalls options from the stack. +func (p *parser) popOptions() { + lastIdx := len(p.optionsStack) - 1 + // get the last item on the stack and then remove it by reslicing + p.options = p.optionsStack[lastIdx] + p.optionsStack = p.optionsStack[:lastIdx] +} + +// Saves options on a stack. +func (p *parser) pushOptions() { + p.optionsStack = append(p.optionsStack, p.options) +} + +// Add a string to the last concatenate. +func (p *parser) addToConcatenate(pos, cch int, isReplacement bool) { + var node *regexNode + + if cch == 0 { + return + } + + if cch > 1 { + str := p.pattern[pos : pos+cch] + + if p.useOptionI() && !isReplacement { + // We do the ToLower character by character for consistency. With surrogate chars, doing + // a ToLower on the entire string could actually change the surrogate pair. This is more correct + // linguistically, but since Regex doesn't support surrogates, it's more important to be + // consistent. + for i := 0; i < len(str); i++ { + str[i] = unicode.ToLower(str[i]) + } + } + + node = newRegexNodeStr(ntMulti, p.options, str) + } else { + ch := p.charAt(pos) + + if p.useOptionI() && !isReplacement { + ch = unicode.ToLower(ch) + } + + node = newRegexNodeCh(ntOne, p.options, ch) + } + + p.concatenation.addChild(node) +} + +// Push the parser state (in response to an open paren) +func (p *parser) pushGroup() { + p.group.next = p.stack + p.alternation.next = p.group + p.concatenation.next = p.alternation + p.stack = p.concatenation +} + +// Remember the pushed state (in response to a ')') +func (p *parser) popGroup() error { + p.concatenation = p.stack + p.alternation = p.concatenation.next + p.group = p.alternation.next + p.stack = p.group.next + + // The first () inside a Testgroup group goes directly to the group + if p.group.t == ntTestgroup && len(p.group.children) == 0 { + if p.unit == nil { + return p.getErr(ErrConditionalExpression) + } + + p.group.addChild(p.unit) + p.unit = nil + } + return nil +} + +// True if the group stack is empty. +func (p *parser) emptyStack() bool { + return p.stack == nil +} + +// Start a new round for the parser state (in response to an open paren or string start) +func (p *parser) startGroup(openGroup *regexNode) { + p.group = openGroup + p.alternation = newRegexNode(ntAlternate, p.options) + p.concatenation = newRegexNode(ntConcatenate, p.options) +} + +// Finish the current concatenation (in response to a |) +func (p *parser) addAlternate() { + // The | parts inside a Testgroup group go directly to the group + + if p.group.t == ntTestgroup || p.group.t == ntTestref { + p.group.addChild(p.concatenation.reverseLeft()) + } else { + p.alternation.addChild(p.concatenation.reverseLeft()) + } + + p.concatenation = newRegexNode(ntConcatenate, p.options) +} + +// For categorizing ascii characters. + +const ( + Q byte = 5 // quantifier + S = 4 // ordinary stopper + Z = 3 // ScanBlank stopper + X = 2 // whitespace + E = 1 // should be escaped +) + +var _category = []byte{ + //01 2 3 4 5 6 7 8 9 A B C D E F 0 1 2 3 4 5 6 7 8 9 A B C D E F + 0, 0, 0, 0, 0, 0, 0, 0, 0, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? + X, 0, 0, Z, S, 0, 0, 0, S, S, Q, Q, 0, 0, S, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Q, + //@A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, S, 0, + //'a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Q, S, 0, 0, 0, +} + +func isSpace(ch rune) bool { + return (ch <= ' ' && _category[ch] == X) +} + +// Returns true for those characters that terminate a string of ordinary chars. +func isSpecial(ch rune) bool { + return (ch <= '|' && _category[ch] >= S) +} + +// Returns true for those characters that terminate a string of ordinary chars. +func isStopperX(ch rune) bool { + return (ch <= '|' && _category[ch] >= X) +} + +// Returns true for those characters that begin a quantifier. +func isQuantifier(ch rune) bool { + return (ch <= '{' && _category[ch] >= Q) +} + +func (p *parser) isTrueQuantifier() bool { + nChars := p.charsRight() + if nChars == 0 { + return false + } + + startpos := p.textpos() + ch := p.charAt(startpos) + if ch != '{' { + return ch <= '{' && _category[ch] >= Q + } + + //UGLY: this is ugly -- the original code was ugly too + pos := startpos + for { + nChars-- + if nChars <= 0 { + break + } + pos++ + ch = p.charAt(pos) + if ch < '0' || ch > '9' { + break + } + } + + if nChars == 0 || pos-startpos == 1 { + return false + } + if ch == '}' { + return true + } + if ch != ',' { + return false + } + for { + nChars-- + if nChars <= 0 { + break + } + pos++ + ch = p.charAt(pos) + if ch < '0' || ch > '9' { + break + } + } + + return nChars > 0 && ch == '}' +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/prefix.go b/vendor/github.com/dlclark/regexp2/syntax/prefix.go new file mode 100644 index 0000000000..f671688629 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/prefix.go @@ -0,0 +1,896 @@ +package syntax + +import ( + "bytes" + "fmt" + "strconv" + "unicode" + "unicode/utf8" +) + +type Prefix struct { + PrefixStr []rune + PrefixSet CharSet + CaseInsensitive bool +} + +// It takes a RegexTree and computes the set of chars that can start it. +func getFirstCharsPrefix(tree *RegexTree) *Prefix { + s := regexFcd{ + fcStack: make([]regexFc, 32), + intStack: make([]int, 32), + } + fc := s.regexFCFromRegexTree(tree) + + if fc == nil || fc.nullable || fc.cc.IsEmpty() { + return nil + } + fcSet := fc.getFirstChars() + return &Prefix{PrefixSet: fcSet, CaseInsensitive: fc.caseInsensitive} +} + +type regexFcd struct { + intStack []int + intDepth int + fcStack []regexFc + fcDepth int + skipAllChildren bool // don't process any more children at the current level + skipchild bool // don't process the current child. + failed bool +} + +/* + * The main FC computation. It does a shortcutted depth-first walk + * through the tree and calls CalculateFC to emits code before + * and after each child of an interior node, and at each leaf. + */ +func (s *regexFcd) regexFCFromRegexTree(tree *RegexTree) *regexFc { + curNode := tree.root + curChild := 0 + + for { + if len(curNode.children) == 0 { + // This is a leaf node + s.calculateFC(curNode.t, curNode, 0) + } else if curChild < len(curNode.children) && !s.skipAllChildren { + // This is an interior node, and we have more children to analyze + s.calculateFC(curNode.t|beforeChild, curNode, curChild) + + if !s.skipchild { + curNode = curNode.children[curChild] + // this stack is how we get a depth first walk of the tree. + s.pushInt(curChild) + curChild = 0 + } else { + curChild++ + s.skipchild = false + } + continue + } + + // This is an interior node where we've finished analyzing all the children, or + // the end of a leaf node. + s.skipAllChildren = false + + if s.intIsEmpty() { + break + } + + curChild = s.popInt() + curNode = curNode.next + + s.calculateFC(curNode.t|afterChild, curNode, curChild) + if s.failed { + return nil + } + + curChild++ + } + + if s.fcIsEmpty() { + return nil + } + + return s.popFC() +} + +// To avoid recursion, we use a simple integer stack. +// This is the push. +func (s *regexFcd) pushInt(I int) { + if s.intDepth >= len(s.intStack) { + expanded := make([]int, s.intDepth*2) + copy(expanded, s.intStack) + s.intStack = expanded + } + + s.intStack[s.intDepth] = I + s.intDepth++ +} + +// True if the stack is empty. +func (s *regexFcd) intIsEmpty() bool { + return s.intDepth == 0 +} + +// This is the pop. +func (s *regexFcd) popInt() int { + s.intDepth-- + return s.intStack[s.intDepth] +} + +// We also use a stack of RegexFC objects. +// This is the push. +func (s *regexFcd) pushFC(fc regexFc) { + if s.fcDepth >= len(s.fcStack) { + expanded := make([]regexFc, s.fcDepth*2) + copy(expanded, s.fcStack) + s.fcStack = expanded + } + + s.fcStack[s.fcDepth] = fc + s.fcDepth++ +} + +// True if the stack is empty. +func (s *regexFcd) fcIsEmpty() bool { + return s.fcDepth == 0 +} + +// This is the pop. +func (s *regexFcd) popFC() *regexFc { + s.fcDepth-- + return &s.fcStack[s.fcDepth] +} + +// This is the top. +func (s *regexFcd) topFC() *regexFc { + return &s.fcStack[s.fcDepth-1] +} + +// Called in Beforechild to prevent further processing of the current child +func (s *regexFcd) skipChild() { + s.skipchild = true +} + +// FC computation and shortcut cases for each node type +func (s *regexFcd) calculateFC(nt nodeType, node *regexNode, CurIndex int) { + //fmt.Printf("NodeType: %v, CurIndex: %v, Desc: %v\n", nt, CurIndex, node.description()) + ci := false + rtl := false + + if nt <= ntRef { + if (node.options & IgnoreCase) != 0 { + ci = true + } + if (node.options & RightToLeft) != 0 { + rtl = true + } + } + + switch nt { + case ntConcatenate | beforeChild, ntAlternate | beforeChild, ntTestref | beforeChild, ntLoop | beforeChild, ntLazyloop | beforeChild: + break + + case ntTestgroup | beforeChild: + if CurIndex == 0 { + s.skipChild() + } + break + + case ntEmpty: + s.pushFC(regexFc{nullable: true}) + break + + case ntConcatenate | afterChild: + if CurIndex != 0 { + child := s.popFC() + cumul := s.topFC() + + s.failed = !cumul.addFC(*child, true) + } + + fc := s.topFC() + if !fc.nullable { + s.skipAllChildren = true + } + break + + case ntTestgroup | afterChild: + if CurIndex > 1 { + child := s.popFC() + cumul := s.topFC() + + s.failed = !cumul.addFC(*child, false) + } + break + + case ntAlternate | afterChild, ntTestref | afterChild: + if CurIndex != 0 { + child := s.popFC() + cumul := s.topFC() + + s.failed = !cumul.addFC(*child, false) + } + break + + case ntLoop | afterChild, ntLazyloop | afterChild: + if node.m == 0 { + fc := s.topFC() + fc.nullable = true + } + break + + case ntGroup | beforeChild, ntGroup | afterChild, ntCapture | beforeChild, ntCapture | afterChild, ntGreedy | beforeChild, ntGreedy | afterChild: + break + + case ntRequire | beforeChild, ntPrevent | beforeChild: + s.skipChild() + s.pushFC(regexFc{nullable: true}) + break + + case ntRequire | afterChild, ntPrevent | afterChild: + break + + case ntOne, ntNotone: + s.pushFC(newRegexFc(node.ch, nt == ntNotone, false, ci)) + break + + case ntOneloop, ntOnelazy: + s.pushFC(newRegexFc(node.ch, false, node.m == 0, ci)) + break + + case ntNotoneloop, ntNotonelazy: + s.pushFC(newRegexFc(node.ch, true, node.m == 0, ci)) + break + + case ntMulti: + if len(node.str) == 0 { + s.pushFC(regexFc{nullable: true}) + } else if !rtl { + s.pushFC(newRegexFc(node.str[0], false, false, ci)) + } else { + s.pushFC(newRegexFc(node.str[len(node.str)-1], false, false, ci)) + } + break + + case ntSet: + s.pushFC(regexFc{cc: node.set.Copy(), nullable: false, caseInsensitive: ci}) + break + + case ntSetloop, ntSetlazy: + s.pushFC(regexFc{cc: node.set.Copy(), nullable: node.m == 0, caseInsensitive: ci}) + break + + case ntRef: + s.pushFC(regexFc{cc: *AnyClass(), nullable: true, caseInsensitive: false}) + break + + case ntNothing, ntBol, ntEol, ntBoundary, ntNonboundary, ntECMABoundary, ntNonECMABoundary, ntBeginning, ntStart, ntEndZ, ntEnd: + s.pushFC(regexFc{nullable: true}) + break + + default: + panic(fmt.Sprintf("unexpected op code: %v", nt)) + } +} + +type regexFc struct { + cc CharSet + nullable bool + caseInsensitive bool +} + +func newRegexFc(ch rune, not, nullable, caseInsensitive bool) regexFc { + r := regexFc{ + caseInsensitive: caseInsensitive, + nullable: nullable, + } + if not { + if ch > 0 { + r.cc.addRange('\x00', ch-1) + } + if ch < 0xFFFF { + r.cc.addRange(ch+1, utf8.MaxRune) + } + } else { + r.cc.addRange(ch, ch) + } + return r +} + +func (r *regexFc) getFirstChars() CharSet { + if r.caseInsensitive { + r.cc.addLowercase() + } + + return r.cc +} + +func (r *regexFc) addFC(fc regexFc, concatenate bool) bool { + if !r.cc.IsMergeable() || !fc.cc.IsMergeable() { + return false + } + + if concatenate { + if !r.nullable { + return true + } + + if !fc.nullable { + r.nullable = false + } + } else { + if fc.nullable { + r.nullable = true + } + } + + r.caseInsensitive = r.caseInsensitive || fc.caseInsensitive + r.cc.addSet(fc.cc) + + return true +} + +// This is a related computation: it takes a RegexTree and computes the +// leading substring if it sees one. It's quite trivial and gives up easily. +func getPrefix(tree *RegexTree) *Prefix { + var concatNode *regexNode + nextChild := 0 + + curNode := tree.root + + for { + switch curNode.t { + case ntConcatenate: + if len(curNode.children) > 0 { + concatNode = curNode + nextChild = 0 + } + + case ntGreedy, ntCapture: + curNode = curNode.children[0] + concatNode = nil + continue + + case ntOneloop, ntOnelazy: + if curNode.m > 0 { + return &Prefix{ + PrefixStr: repeat(curNode.ch, curNode.m), + CaseInsensitive: (curNode.options & IgnoreCase) != 0, + } + } + return nil + + case ntOne: + return &Prefix{ + PrefixStr: []rune{curNode.ch}, + CaseInsensitive: (curNode.options & IgnoreCase) != 0, + } + + case ntMulti: + return &Prefix{ + PrefixStr: curNode.str, + CaseInsensitive: (curNode.options & IgnoreCase) != 0, + } + + case ntBol, ntEol, ntBoundary, ntECMABoundary, ntBeginning, ntStart, + ntEndZ, ntEnd, ntEmpty, ntRequire, ntPrevent: + + default: + return nil + } + + if concatNode == nil || nextChild >= len(concatNode.children) { + return nil + } + + curNode = concatNode.children[nextChild] + nextChild++ + } +} + +// repeat the rune r, c times... up to the max of MaxPrefixSize +func repeat(r rune, c int) []rune { + if c > MaxPrefixSize { + c = MaxPrefixSize + } + + ret := make([]rune, c) + + // binary growth using copy for speed + ret[0] = r + bp := 1 + for bp < len(ret) { + copy(ret[bp:], ret[:bp]) + bp *= 2 + } + + return ret +} + +// BmPrefix precomputes the Boyer-Moore +// tables for fast string scanning. These tables allow +// you to scan for the first occurrence of a string within +// a large body of text without examining every character. +// The performance of the heuristic depends on the actual +// string and the text being searched, but usually, the longer +// the string that is being searched for, the fewer characters +// need to be examined. +type BmPrefix struct { + positive []int + negativeASCII []int + negativeUnicode [][]int + pattern []rune + lowASCII rune + highASCII rune + rightToLeft bool + caseInsensitive bool +} + +func newBmPrefix(pattern []rune, caseInsensitive, rightToLeft bool) *BmPrefix { + + b := &BmPrefix{ + rightToLeft: rightToLeft, + caseInsensitive: caseInsensitive, + pattern: pattern, + } + + if caseInsensitive { + for i := 0; i < len(b.pattern); i++ { + // We do the ToLower character by character for consistency. With surrogate chars, doing + // a ToLower on the entire string could actually change the surrogate pair. This is more correct + // linguistically, but since Regex doesn't support surrogates, it's more important to be + // consistent. + + b.pattern[i] = unicode.ToLower(b.pattern[i]) + } + } + + var beforefirst, last, bump int + var scan, match int + + if !rightToLeft { + beforefirst = -1 + last = len(b.pattern) - 1 + bump = 1 + } else { + beforefirst = len(b.pattern) + last = 0 + bump = -1 + } + + // PART I - the good-suffix shift table + // + // compute the positive requirement: + // if char "i" is the first one from the right that doesn't match, + // then we know the matcher can advance by _positive[i]. + // + // This algorithm is a simplified variant of the standard + // Boyer-Moore good suffix calculation. + + b.positive = make([]int, len(b.pattern)) + + examine := last + ch := b.pattern[examine] + b.positive[examine] = bump + examine -= bump + +Outerloop: + for { + // find an internal char (examine) that matches the tail + + for { + if examine == beforefirst { + break Outerloop + } + if b.pattern[examine] == ch { + break + } + examine -= bump + } + + match = last + scan = examine + + // find the length of the match + for { + if scan == beforefirst || b.pattern[match] != b.pattern[scan] { + // at the end of the match, note the difference in _positive + // this is not the length of the match, but the distance from the internal match + // to the tail suffix. + if b.positive[match] == 0 { + b.positive[match] = match - scan + } + + // System.Diagnostics.Debug.WriteLine("Set positive[" + match + "] to " + (match - scan)); + + break + } + + scan -= bump + match -= bump + } + + examine -= bump + } + + match = last - bump + + // scan for the chars for which there are no shifts that yield a different candidate + + // The inside of the if statement used to say + // "_positive[match] = last - beforefirst;" + // This is slightly less aggressive in how much we skip, but at worst it + // should mean a little more work rather than skipping a potential match. + for match != beforefirst { + if b.positive[match] == 0 { + b.positive[match] = bump + } + + match -= bump + } + + // PART II - the bad-character shift table + // + // compute the negative requirement: + // if char "ch" is the reject character when testing position "i", + // we can slide up by _negative[ch]; + // (_negative[ch] = str.Length - 1 - str.LastIndexOf(ch)) + // + // the lookup table is divided into ASCII and Unicode portions; + // only those parts of the Unicode 16-bit code set that actually + // appear in the string are in the table. (Maximum size with + // Unicode is 65K; ASCII only case is 512 bytes.) + + b.negativeASCII = make([]int, 128) + + for i := 0; i < len(b.negativeASCII); i++ { + b.negativeASCII[i] = last - beforefirst + } + + b.lowASCII = 127 + b.highASCII = 0 + + for examine = last; examine != beforefirst; examine -= bump { + ch = b.pattern[examine] + + switch { + case ch < 128: + if b.lowASCII > ch { + b.lowASCII = ch + } + + if b.highASCII < ch { + b.highASCII = ch + } + + if b.negativeASCII[ch] == last-beforefirst { + b.negativeASCII[ch] = last - examine + } + case ch <= 0xffff: + i, j := ch>>8, ch&0xFF + + if b.negativeUnicode == nil { + b.negativeUnicode = make([][]int, 256) + } + + if b.negativeUnicode[i] == nil { + newarray := make([]int, 256) + + for k := 0; k < len(newarray); k++ { + newarray[k] = last - beforefirst + } + + if i == 0 { + copy(newarray, b.negativeASCII) + //TODO: this line needed? + b.negativeASCII = newarray + } + + b.negativeUnicode[i] = newarray + } + + if b.negativeUnicode[i][j] == last-beforefirst { + b.negativeUnicode[i][j] = last - examine + } + default: + // we can't do the filter because this algo doesn't support + // unicode chars >0xffff + return nil + } + } + + return b +} + +func (b *BmPrefix) String() string { + return string(b.pattern) +} + +// Dump returns the contents of the filter as a human readable string +func (b *BmPrefix) Dump(indent string) string { + buf := &bytes.Buffer{} + + fmt.Fprintf(buf, "%sBM Pattern: %s\n%sPositive: ", indent, string(b.pattern), indent) + for i := 0; i < len(b.positive); i++ { + buf.WriteString(strconv.Itoa(b.positive[i])) + buf.WriteRune(' ') + } + buf.WriteRune('\n') + + if b.negativeASCII != nil { + buf.WriteString(indent) + buf.WriteString("Negative table\n") + for i := 0; i < len(b.negativeASCII); i++ { + if b.negativeASCII[i] != len(b.pattern) { + fmt.Fprintf(buf, "%s %s %s\n", indent, Escape(string(rune(i))), strconv.Itoa(b.negativeASCII[i])) + } + } + } + + return buf.String() +} + +// Scan uses the Boyer-Moore algorithm to find the first occurrence +// of the specified string within text, beginning at index, and +// constrained within beglimit and endlimit. +// +// The direction and case-sensitivity of the match is determined +// by the arguments to the RegexBoyerMoore constructor. +func (b *BmPrefix) Scan(text []rune, index, beglimit, endlimit int) int { + var ( + defadv, test, test2 int + match, startmatch, endmatch int + bump, advance int + chTest rune + unicodeLookup []int + ) + + if !b.rightToLeft { + defadv = len(b.pattern) + startmatch = len(b.pattern) - 1 + endmatch = 0 + test = index + defadv - 1 + bump = 1 + } else { + defadv = -len(b.pattern) + startmatch = 0 + endmatch = -defadv - 1 + test = index + defadv + bump = -1 + } + + chMatch := b.pattern[startmatch] + + for { + if test >= endlimit || test < beglimit { + return -1 + } + + chTest = text[test] + + if b.caseInsensitive { + chTest = unicode.ToLower(chTest) + } + + if chTest != chMatch { + if chTest < 128 { + advance = b.negativeASCII[chTest] + } else if chTest < 0xffff && len(b.negativeUnicode) > 0 { + unicodeLookup = b.negativeUnicode[chTest>>8] + if len(unicodeLookup) > 0 { + advance = unicodeLookup[chTest&0xFF] + } else { + advance = defadv + } + } else { + advance = defadv + } + + test += advance + } else { // if (chTest == chMatch) + test2 = test + match = startmatch + + for { + if match == endmatch { + if b.rightToLeft { + return test2 + 1 + } else { + return test2 + } + } + + match -= bump + test2 -= bump + + chTest = text[test2] + + if b.caseInsensitive { + chTest = unicode.ToLower(chTest) + } + + if chTest != b.pattern[match] { + advance = b.positive[match] + if chTest < 128 { + test2 = (match - startmatch) + b.negativeASCII[chTest] + } else if chTest < 0xffff && len(b.negativeUnicode) > 0 { + unicodeLookup = b.negativeUnicode[chTest>>8] + if len(unicodeLookup) > 0 { + test2 = (match - startmatch) + unicodeLookup[chTest&0xFF] + } else { + test += advance + break + } + } else { + test += advance + break + } + + if b.rightToLeft { + if test2 < advance { + advance = test2 + } + } else if test2 > advance { + advance = test2 + } + + test += advance + break + } + } + } + } +} + +// When a regex is anchored, we can do a quick IsMatch test instead of a Scan +func (b *BmPrefix) IsMatch(text []rune, index, beglimit, endlimit int) bool { + if !b.rightToLeft { + if index < beglimit || endlimit-index < len(b.pattern) { + return false + } + + return b.matchPattern(text, index) + } else { + if index > endlimit || index-beglimit < len(b.pattern) { + return false + } + + return b.matchPattern(text, index-len(b.pattern)) + } +} + +func (b *BmPrefix) matchPattern(text []rune, index int) bool { + if len(text)-index < len(b.pattern) { + return false + } + + if b.caseInsensitive { + for i := 0; i < len(b.pattern); i++ { + //Debug.Assert(textinfo.ToLower(_pattern[i]) == _pattern[i], "pattern should be converted to lower case in constructor!"); + if unicode.ToLower(text[index+i]) != b.pattern[i] { + return false + } + } + return true + } else { + for i := 0; i < len(b.pattern); i++ { + if text[index+i] != b.pattern[i] { + return false + } + } + return true + } +} + +type AnchorLoc int16 + +// where the regex can be pegged +const ( + AnchorBeginning AnchorLoc = 0x0001 + AnchorBol = 0x0002 + AnchorStart = 0x0004 + AnchorEol = 0x0008 + AnchorEndZ = 0x0010 + AnchorEnd = 0x0020 + AnchorBoundary = 0x0040 + AnchorECMABoundary = 0x0080 +) + +func getAnchors(tree *RegexTree) AnchorLoc { + + var concatNode *regexNode + nextChild, result := 0, AnchorLoc(0) + + curNode := tree.root + + for { + switch curNode.t { + case ntConcatenate: + if len(curNode.children) > 0 { + concatNode = curNode + nextChild = 0 + } + + case ntGreedy, ntCapture: + curNode = curNode.children[0] + concatNode = nil + continue + + case ntBol, ntEol, ntBoundary, ntECMABoundary, ntBeginning, + ntStart, ntEndZ, ntEnd: + return result | anchorFromType(curNode.t) + + case ntEmpty, ntRequire, ntPrevent: + + default: + return result + } + + if concatNode == nil || nextChild >= len(concatNode.children) { + return result + } + + curNode = concatNode.children[nextChild] + nextChild++ + } +} + +func anchorFromType(t nodeType) AnchorLoc { + switch t { + case ntBol: + return AnchorBol + case ntEol: + return AnchorEol + case ntBoundary: + return AnchorBoundary + case ntECMABoundary: + return AnchorECMABoundary + case ntBeginning: + return AnchorBeginning + case ntStart: + return AnchorStart + case ntEndZ: + return AnchorEndZ + case ntEnd: + return AnchorEnd + default: + return 0 + } +} + +// anchorDescription returns a human-readable description of the anchors +func (anchors AnchorLoc) String() string { + buf := &bytes.Buffer{} + + if 0 != (anchors & AnchorBeginning) { + buf.WriteString(", Beginning") + } + if 0 != (anchors & AnchorStart) { + buf.WriteString(", Start") + } + if 0 != (anchors & AnchorBol) { + buf.WriteString(", Bol") + } + if 0 != (anchors & AnchorBoundary) { + buf.WriteString(", Boundary") + } + if 0 != (anchors & AnchorECMABoundary) { + buf.WriteString(", ECMABoundary") + } + if 0 != (anchors & AnchorEol) { + buf.WriteString(", Eol") + } + if 0 != (anchors & AnchorEnd) { + buf.WriteString(", End") + } + if 0 != (anchors & AnchorEndZ) { + buf.WriteString(", EndZ") + } + + // trim off comma + if buf.Len() >= 2 { + return buf.String()[2:] + } + return "None" +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/replacerdata.go b/vendor/github.com/dlclark/regexp2/syntax/replacerdata.go new file mode 100644 index 0000000000..bcf4d3f257 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/replacerdata.go @@ -0,0 +1,87 @@ +package syntax + +import ( + "bytes" + "errors" +) + +type ReplacerData struct { + Rep string + Strings []string + Rules []int +} + +const ( + replaceSpecials = 4 + replaceLeftPortion = -1 + replaceRightPortion = -2 + replaceLastGroup = -3 + replaceWholeString = -4 +) + +//ErrReplacementError is a general error during parsing the replacement text +var ErrReplacementError = errors.New("Replacement pattern error.") + +// NewReplacerData will populate a reusable replacer data struct based on the given replacement string +// and the capture group data from a regexp +func NewReplacerData(rep string, caps map[int]int, capsize int, capnames map[string]int, op RegexOptions) (*ReplacerData, error) { + p := parser{ + options: op, + caps: caps, + capsize: capsize, + capnames: capnames, + } + p.setPattern(rep) + concat, err := p.scanReplacement() + if err != nil { + return nil, err + } + + if concat.t != ntConcatenate { + panic(ErrReplacementError) + } + + sb := &bytes.Buffer{} + var ( + strings []string + rules []int + ) + + for _, child := range concat.children { + switch child.t { + case ntMulti: + child.writeStrToBuf(sb) + + case ntOne: + sb.WriteRune(child.ch) + + case ntRef: + if sb.Len() > 0 { + rules = append(rules, len(strings)) + strings = append(strings, sb.String()) + sb.Reset() + } + slot := child.m + + if len(caps) > 0 && slot >= 0 { + slot = caps[slot] + } + + rules = append(rules, -replaceSpecials-1-slot) + + default: + panic(ErrReplacementError) + } + } + + if sb.Len() > 0 { + rules = append(rules, len(strings)) + strings = append(strings, sb.String()) + } + + return &ReplacerData{ + Rep: rep, + Strings: strings, + Rules: rules, + }, nil +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/tree.go b/vendor/github.com/dlclark/regexp2/syntax/tree.go new file mode 100644 index 0000000000..ea28829319 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/tree.go @@ -0,0 +1,654 @@ +package syntax + +import ( + "bytes" + "fmt" + "math" + "strconv" +) + +type RegexTree struct { + root *regexNode + caps map[int]int + capnumlist []int + captop int + Capnames map[string]int + Caplist []string + options RegexOptions +} + +// It is built into a parsed tree for a regular expression. + +// Implementation notes: +// +// Since the node tree is a temporary data structure only used +// during compilation of the regexp to integer codes, it's +// designed for clarity and convenience rather than +// space efficiency. +// +// RegexNodes are built into a tree, linked by the n.children list. +// Each node also has a n.parent and n.ichild member indicating +// its parent and which child # it is in its parent's list. +// +// RegexNodes come in as many types as there are constructs in +// a regular expression, for example, "concatenate", "alternate", +// "one", "rept", "group". There are also node types for basic +// peephole optimizations, e.g., "onerep", "notsetrep", etc. +// +// Because perl 5 allows "lookback" groups that scan backwards, +// each node also gets a "direction". Normally the value of +// boolean n.backward = false. +// +// During parsing, top-level nodes are also stacked onto a parse +// stack (a stack of trees). For this purpose we have a n.next +// pointer. [Note that to save a few bytes, we could overload the +// n.parent pointer instead.] +// +// On the parse stack, each tree has a "role" - basically, the +// nonterminal in the grammar that the parser has currently +// assigned to the tree. That code is stored in n.role. +// +// Finally, some of the different kinds of nodes have data. +// Two integers (for the looping constructs) are stored in +// n.operands, an an object (either a string or a set) +// is stored in n.data +type regexNode struct { + t nodeType + children []*regexNode + str []rune + set *CharSet + ch rune + m int + n int + options RegexOptions + next *regexNode +} + +type nodeType int32 + +const ( + // The following are leaves, and correspond to primitive operations + + ntOnerep nodeType = 0 // lef,back char,min,max a {n} + ntNotonerep = 1 // lef,back char,min,max .{n} + ntSetrep = 2 // lef,back set,min,max [\d]{n} + ntOneloop = 3 // lef,back char,min,max a {,n} + ntNotoneloop = 4 // lef,back char,min,max .{,n} + ntSetloop = 5 // lef,back set,min,max [\d]{,n} + ntOnelazy = 6 // lef,back char,min,max a {,n}? + ntNotonelazy = 7 // lef,back char,min,max .{,n}? + ntSetlazy = 8 // lef,back set,min,max [\d]{,n}? + ntOne = 9 // lef char a + ntNotone = 10 // lef char [^a] + ntSet = 11 // lef set [a-z\s] \w \s \d + ntMulti = 12 // lef string abcd + ntRef = 13 // lef group \# + ntBol = 14 // ^ + ntEol = 15 // $ + ntBoundary = 16 // \b + ntNonboundary = 17 // \B + ntBeginning = 18 // \A + ntStart = 19 // \G + ntEndZ = 20 // \Z + ntEnd = 21 // \Z + + // Interior nodes do not correspond to primitive operations, but + // control structures compositing other operations + + // Concat and alternate take n children, and can run forward or backwards + + ntNothing = 22 // [] + ntEmpty = 23 // () + ntAlternate = 24 // a|b + ntConcatenate = 25 // ab + ntLoop = 26 // m,x * + ? {,} + ntLazyloop = 27 // m,x *? +? ?? {,}? + ntCapture = 28 // n () + ntGroup = 29 // (?:) + ntRequire = 30 // (?=) (?<=) + ntPrevent = 31 // (?!) (?) (?<) + ntTestref = 33 // (?(n) | ) + ntTestgroup = 34 // (?(...) | ) + + ntECMABoundary = 41 // \b + ntNonECMABoundary = 42 // \B +) + +func newRegexNode(t nodeType, opt RegexOptions) *regexNode { + return ®exNode{ + t: t, + options: opt, + } +} + +func newRegexNodeCh(t nodeType, opt RegexOptions, ch rune) *regexNode { + return ®exNode{ + t: t, + options: opt, + ch: ch, + } +} + +func newRegexNodeStr(t nodeType, opt RegexOptions, str []rune) *regexNode { + return ®exNode{ + t: t, + options: opt, + str: str, + } +} + +func newRegexNodeSet(t nodeType, opt RegexOptions, set *CharSet) *regexNode { + return ®exNode{ + t: t, + options: opt, + set: set, + } +} + +func newRegexNodeM(t nodeType, opt RegexOptions, m int) *regexNode { + return ®exNode{ + t: t, + options: opt, + m: m, + } +} +func newRegexNodeMN(t nodeType, opt RegexOptions, m, n int) *regexNode { + return ®exNode{ + t: t, + options: opt, + m: m, + n: n, + } +} + +func (n *regexNode) writeStrToBuf(buf *bytes.Buffer) { + for i := 0; i < len(n.str); i++ { + buf.WriteRune(n.str[i]) + } +} + +func (n *regexNode) addChild(child *regexNode) { + reduced := child.reduce() + n.children = append(n.children, reduced) + reduced.next = n +} + +func (n *regexNode) insertChildren(afterIndex int, nodes []*regexNode) { + newChildren := make([]*regexNode, 0, len(n.children)+len(nodes)) + n.children = append(append(append(newChildren, n.children[:afterIndex]...), nodes...), n.children[afterIndex:]...) +} + +// removes children including the start but not the end index +func (n *regexNode) removeChildren(startIndex, endIndex int) { + n.children = append(n.children[:startIndex], n.children[endIndex:]...) +} + +// Pass type as OneLazy or OneLoop +func (n *regexNode) makeRep(t nodeType, min, max int) { + n.t += (t - ntOne) + n.m = min + n.n = max +} + +func (n *regexNode) reduce() *regexNode { + switch n.t { + case ntAlternate: + return n.reduceAlternation() + + case ntConcatenate: + return n.reduceConcatenation() + + case ntLoop, ntLazyloop: + return n.reduceRep() + + case ntGroup: + return n.reduceGroup() + + case ntSet, ntSetloop: + return n.reduceSet() + + default: + return n + } +} + +// Basic optimization. Single-letter alternations can be replaced +// by faster set specifications, and nested alternations with no +// intervening operators can be flattened: +// +// a|b|c|def|g|h -> [a-c]|def|[gh] +// apple|(?:orange|pear)|grape -> apple|orange|pear|grape +func (n *regexNode) reduceAlternation() *regexNode { + if len(n.children) == 0 { + return newRegexNode(ntNothing, n.options) + } + + wasLastSet := false + lastNodeCannotMerge := false + var optionsLast RegexOptions + var i, j int + + for i, j = 0, 0; i < len(n.children); i, j = i+1, j+1 { + at := n.children[i] + + if j < i { + n.children[j] = at + } + + for { + if at.t == ntAlternate { + for k := 0; k < len(at.children); k++ { + at.children[k].next = n + } + n.insertChildren(i+1, at.children) + + j-- + } else if at.t == ntSet || at.t == ntOne { + // Cannot merge sets if L or I options differ, or if either are negated. + optionsAt := at.options & (RightToLeft | IgnoreCase) + + if at.t == ntSet { + if !wasLastSet || optionsLast != optionsAt || lastNodeCannotMerge || !at.set.IsMergeable() { + wasLastSet = true + lastNodeCannotMerge = !at.set.IsMergeable() + optionsLast = optionsAt + break + } + } else if !wasLastSet || optionsLast != optionsAt || lastNodeCannotMerge { + wasLastSet = true + lastNodeCannotMerge = false + optionsLast = optionsAt + break + } + + // The last node was a Set or a One, we're a Set or One and our options are the same. + // Merge the two nodes. + j-- + prev := n.children[j] + + var prevCharClass *CharSet + if prev.t == ntOne { + prevCharClass = &CharSet{} + prevCharClass.addChar(prev.ch) + } else { + prevCharClass = prev.set + } + + if at.t == ntOne { + prevCharClass.addChar(at.ch) + } else { + prevCharClass.addSet(*at.set) + } + + prev.t = ntSet + prev.set = prevCharClass + } else if at.t == ntNothing { + j-- + } else { + wasLastSet = false + lastNodeCannotMerge = false + } + break + } + } + + if j < i { + n.removeChildren(j, i) + } + + return n.stripEnation(ntNothing) +} + +// Basic optimization. Adjacent strings can be concatenated. +// +// (?:abc)(?:def) -> abcdef +func (n *regexNode) reduceConcatenation() *regexNode { + // Eliminate empties and concat adjacent strings/chars + + var optionsLast RegexOptions + var optionsAt RegexOptions + var i, j int + + if len(n.children) == 0 { + return newRegexNode(ntEmpty, n.options) + } + + wasLastString := false + + for i, j = 0, 0; i < len(n.children); i, j = i+1, j+1 { + var at, prev *regexNode + + at = n.children[i] + + if j < i { + n.children[j] = at + } + + if at.t == ntConcatenate && + ((at.options & RightToLeft) == (n.options & RightToLeft)) { + for k := 0; k < len(at.children); k++ { + at.children[k].next = n + } + + //insert at.children at i+1 index in n.children + n.insertChildren(i+1, at.children) + + j-- + } else if at.t == ntMulti || at.t == ntOne { + // Cannot merge strings if L or I options differ + optionsAt = at.options & (RightToLeft | IgnoreCase) + + if !wasLastString || optionsLast != optionsAt { + wasLastString = true + optionsLast = optionsAt + continue + } + + j-- + prev = n.children[j] + + if prev.t == ntOne { + prev.t = ntMulti + prev.str = []rune{prev.ch} + } + + if (optionsAt & RightToLeft) == 0 { + if at.t == ntOne { + prev.str = append(prev.str, at.ch) + } else { + prev.str = append(prev.str, at.str...) + } + } else { + if at.t == ntOne { + // insert at the front by expanding our slice, copying the data over, and then setting the value + prev.str = append(prev.str, 0) + copy(prev.str[1:], prev.str) + prev.str[0] = at.ch + } else { + //insert at the front...this one we'll make a new slice and copy both into it + merge := make([]rune, len(prev.str)+len(at.str)) + copy(merge, at.str) + copy(merge[len(at.str):], prev.str) + prev.str = merge + } + } + } else if at.t == ntEmpty { + j-- + } else { + wasLastString = false + } + } + + if j < i { + // remove indices j through i from the children + n.removeChildren(j, i) + } + + return n.stripEnation(ntEmpty) +} + +// Nested repeaters just get multiplied with each other if they're not +// too lumpy +func (n *regexNode) reduceRep() *regexNode { + + u := n + t := n.t + min := n.m + max := n.n + + for { + if len(u.children) == 0 { + break + } + + child := u.children[0] + + // multiply reps of the same type only + if child.t != t { + childType := child.t + + if !(childType >= ntOneloop && childType <= ntSetloop && t == ntLoop || + childType >= ntOnelazy && childType <= ntSetlazy && t == ntLazyloop) { + break + } + } + + // child can be too lumpy to blur, e.g., (a {100,105}) {3} or (a {2,})? + // [but things like (a {2,})+ are not too lumpy...] + if u.m == 0 && child.m > 1 || child.n < child.m*2 { + break + } + + u = child + if u.m > 0 { + if (math.MaxInt32-1)/u.m < min { + u.m = math.MaxInt32 + } else { + u.m = u.m * min + } + } + if u.n > 0 { + if (math.MaxInt32-1)/u.n < max { + u.n = math.MaxInt32 + } else { + u.n = u.n * max + } + } + } + + if math.MaxInt32 == min { + return newRegexNode(ntNothing, n.options) + } + return u + +} + +// Simple optimization. If a concatenation or alternation has only +// one child strip out the intermediate node. If it has zero children, +// turn it into an empty. +func (n *regexNode) stripEnation(emptyType nodeType) *regexNode { + switch len(n.children) { + case 0: + return newRegexNode(emptyType, n.options) + case 1: + return n.children[0] + default: + return n + } +} + +func (n *regexNode) reduceGroup() *regexNode { + u := n + + for u.t == ntGroup { + u = u.children[0] + } + + return u +} + +// Simple optimization. If a set is a singleton, an inverse singleton, +// or empty, it's transformed accordingly. +func (n *regexNode) reduceSet() *regexNode { + // Extract empty-set, one and not-one case as special + + if n.set == nil { + n.t = ntNothing + } else if n.set.IsSingleton() { + n.ch = n.set.SingletonChar() + n.set = nil + n.t += (ntOne - ntSet) + } else if n.set.IsSingletonInverse() { + n.ch = n.set.SingletonChar() + n.set = nil + n.t += (ntNotone - ntSet) + } + + return n +} + +func (n *regexNode) reverseLeft() *regexNode { + if n.options&RightToLeft != 0 && n.t == ntConcatenate && len(n.children) > 0 { + //reverse children order + for left, right := 0, len(n.children)-1; left < right; left, right = left+1, right-1 { + n.children[left], n.children[right] = n.children[right], n.children[left] + } + } + + return n +} + +func (n *regexNode) makeQuantifier(lazy bool, min, max int) *regexNode { + if min == 0 && max == 0 { + return newRegexNode(ntEmpty, n.options) + } + + if min == 1 && max == 1 { + return n + } + + switch n.t { + case ntOne, ntNotone, ntSet: + if lazy { + n.makeRep(Onelazy, min, max) + } else { + n.makeRep(Oneloop, min, max) + } + return n + + default: + var t nodeType + if lazy { + t = ntLazyloop + } else { + t = ntLoop + } + result := newRegexNodeMN(t, n.options, min, max) + result.addChild(n) + return result + } +} + +// debug functions + +var typeStr = []string{ + "Onerep", "Notonerep", "Setrep", + "Oneloop", "Notoneloop", "Setloop", + "Onelazy", "Notonelazy", "Setlazy", + "One", "Notone", "Set", + "Multi", "Ref", + "Bol", "Eol", "Boundary", "Nonboundary", + "Beginning", "Start", "EndZ", "End", + "Nothing", "Empty", + "Alternate", "Concatenate", + "Loop", "Lazyloop", + "Capture", "Group", "Require", "Prevent", "Greedy", + "Testref", "Testgroup", + "Unknown", "Unknown", "Unknown", + "Unknown", "Unknown", "Unknown", + "ECMABoundary", "NonECMABoundary", +} + +func (n *regexNode) description() string { + buf := &bytes.Buffer{} + + buf.WriteString(typeStr[n.t]) + + if (n.options & ExplicitCapture) != 0 { + buf.WriteString("-C") + } + if (n.options & IgnoreCase) != 0 { + buf.WriteString("-I") + } + if (n.options & RightToLeft) != 0 { + buf.WriteString("-L") + } + if (n.options & Multiline) != 0 { + buf.WriteString("-M") + } + if (n.options & Singleline) != 0 { + buf.WriteString("-S") + } + if (n.options & IgnorePatternWhitespace) != 0 { + buf.WriteString("-X") + } + if (n.options & ECMAScript) != 0 { + buf.WriteString("-E") + } + + switch n.t { + case ntOneloop, ntNotoneloop, ntOnelazy, ntNotonelazy, ntOne, ntNotone: + buf.WriteString("(Ch = " + CharDescription(n.ch) + ")") + break + case ntCapture: + buf.WriteString("(index = " + strconv.Itoa(n.m) + ", unindex = " + strconv.Itoa(n.n) + ")") + break + case ntRef, ntTestref: + buf.WriteString("(index = " + strconv.Itoa(n.m) + ")") + break + case ntMulti: + fmt.Fprintf(buf, "(String = %s)", string(n.str)) + break + case ntSet, ntSetloop, ntSetlazy: + buf.WriteString("(Set = " + n.set.String() + ")") + break + } + + switch n.t { + case ntOneloop, ntNotoneloop, ntOnelazy, ntNotonelazy, ntSetloop, ntSetlazy, ntLoop, ntLazyloop: + buf.WriteString("(Min = ") + buf.WriteString(strconv.Itoa(n.m)) + buf.WriteString(", Max = ") + if n.n == math.MaxInt32 { + buf.WriteString("inf") + } else { + buf.WriteString(strconv.Itoa(n.n)) + } + buf.WriteString(")") + + break + } + + return buf.String() +} + +var padSpace = []byte(" ") + +func (t *RegexTree) Dump() string { + return t.root.dump() +} + +func (n *regexNode) dump() string { + var stack []int + CurNode := n + CurChild := 0 + + buf := bytes.NewBufferString(CurNode.description()) + buf.WriteRune('\n') + + for { + if CurNode.children != nil && CurChild < len(CurNode.children) { + stack = append(stack, CurChild+1) + CurNode = CurNode.children[CurChild] + CurChild = 0 + + Depth := len(stack) + if Depth > 32 { + Depth = 32 + } + buf.Write(padSpace[:Depth]) + buf.WriteString(CurNode.description()) + buf.WriteRune('\n') + } else { + if len(stack) == 0 { + break + } + + CurChild = stack[len(stack)-1] + stack = stack[:len(stack)-1] + CurNode = CurNode.next + } + } + return buf.String() +} diff --git a/vendor/github.com/dlclark/regexp2/syntax/writer.go b/vendor/github.com/dlclark/regexp2/syntax/writer.go new file mode 100644 index 0000000000..a5aa11ca06 --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/syntax/writer.go @@ -0,0 +1,500 @@ +package syntax + +import ( + "bytes" + "fmt" + "math" + "os" +) + +func Write(tree *RegexTree) (*Code, error) { + w := writer{ + intStack: make([]int, 0, 32), + emitted: make([]int, 2), + stringhash: make(map[string]int), + sethash: make(map[string]int), + } + + code, err := w.codeFromTree(tree) + + if tree.options&Debug > 0 && code != nil { + os.Stdout.WriteString(code.Dump()) + os.Stdout.WriteString("\n") + } + + return code, err +} + +type writer struct { + emitted []int + + intStack []int + curpos int + stringhash map[string]int + stringtable [][]rune + sethash map[string]int + settable []*CharSet + counting bool + count int + trackcount int + caps map[int]int +} + +const ( + beforeChild nodeType = 64 + afterChild = 128 + //MaxPrefixSize is the largest number of runes we'll use for a BoyerMoyer prefix + MaxPrefixSize = 50 +) + +// The top level RegexCode generator. It does a depth-first walk +// through the tree and calls EmitFragment to emits code before +// and after each child of an interior node, and at each leaf. +// +// It runs two passes, first to count the size of the generated +// code, and second to generate the code. +// +// We should time it against the alternative, which is +// to just generate the code and grow the array as we go. +func (w *writer) codeFromTree(tree *RegexTree) (*Code, error) { + var ( + curNode *regexNode + curChild int + capsize int + ) + // construct sparse capnum mapping if some numbers are unused + + if tree.capnumlist == nil || tree.captop == len(tree.capnumlist) { + capsize = tree.captop + w.caps = nil + } else { + capsize = len(tree.capnumlist) + w.caps = tree.caps + for i := 0; i < len(tree.capnumlist); i++ { + w.caps[tree.capnumlist[i]] = i + } + } + + w.counting = true + + for { + if !w.counting { + w.emitted = make([]int, w.count) + } + + curNode = tree.root + curChild = 0 + + w.emit1(Lazybranch, 0) + + for { + if len(curNode.children) == 0 { + w.emitFragment(curNode.t, curNode, 0) + } else if curChild < len(curNode.children) { + w.emitFragment(curNode.t|beforeChild, curNode, curChild) + + curNode = curNode.children[curChild] + + w.pushInt(curChild) + curChild = 0 + continue + } + + if w.emptyStack() { + break + } + + curChild = w.popInt() + curNode = curNode.next + + w.emitFragment(curNode.t|afterChild, curNode, curChild) + curChild++ + } + + w.patchJump(0, w.curPos()) + w.emit(Stop) + + if !w.counting { + break + } + + w.counting = false + } + + fcPrefix := getFirstCharsPrefix(tree) + prefix := getPrefix(tree) + rtl := (tree.options & RightToLeft) != 0 + + var bmPrefix *BmPrefix + //TODO: benchmark string prefixes + if prefix != nil && len(prefix.PrefixStr) > 0 && MaxPrefixSize > 0 { + if len(prefix.PrefixStr) > MaxPrefixSize { + // limit prefix changes to 10k + prefix.PrefixStr = prefix.PrefixStr[:MaxPrefixSize] + } + bmPrefix = newBmPrefix(prefix.PrefixStr, prefix.CaseInsensitive, rtl) + } else { + bmPrefix = nil + } + + return &Code{ + Codes: w.emitted, + Strings: w.stringtable, + Sets: w.settable, + TrackCount: w.trackcount, + Caps: w.caps, + Capsize: capsize, + FcPrefix: fcPrefix, + BmPrefix: bmPrefix, + Anchors: getAnchors(tree), + RightToLeft: rtl, + }, nil +} + +// The main RegexCode generator. It does a depth-first walk +// through the tree and calls EmitFragment to emits code before +// and after each child of an interior node, and at each leaf. +func (w *writer) emitFragment(nodetype nodeType, node *regexNode, curIndex int) error { + bits := InstOp(0) + + if nodetype <= ntRef { + if (node.options & RightToLeft) != 0 { + bits |= Rtl + } + if (node.options & IgnoreCase) != 0 { + bits |= Ci + } + } + ntBits := nodeType(bits) + + switch nodetype { + case ntConcatenate | beforeChild, ntConcatenate | afterChild, ntEmpty: + break + + case ntAlternate | beforeChild: + if curIndex < len(node.children)-1 { + w.pushInt(w.curPos()) + w.emit1(Lazybranch, 0) + } + + case ntAlternate | afterChild: + if curIndex < len(node.children)-1 { + lbPos := w.popInt() + w.pushInt(w.curPos()) + w.emit1(Goto, 0) + w.patchJump(lbPos, w.curPos()) + } else { + for i := 0; i < curIndex; i++ { + w.patchJump(w.popInt(), w.curPos()) + } + } + break + + case ntTestref | beforeChild: + if curIndex == 0 { + w.emit(Setjump) + w.pushInt(w.curPos()) + w.emit1(Lazybranch, 0) + w.emit1(Testref, w.mapCapnum(node.m)) + w.emit(Forejump) + } + + case ntTestref | afterChild: + if curIndex == 0 { + branchpos := w.popInt() + w.pushInt(w.curPos()) + w.emit1(Goto, 0) + w.patchJump(branchpos, w.curPos()) + w.emit(Forejump) + if len(node.children) <= 1 { + w.patchJump(w.popInt(), w.curPos()) + } + } else if curIndex == 1 { + w.patchJump(w.popInt(), w.curPos()) + } + + case ntTestgroup | beforeChild: + if curIndex == 0 { + w.emit(Setjump) + w.emit(Setmark) + w.pushInt(w.curPos()) + w.emit1(Lazybranch, 0) + } + + case ntTestgroup | afterChild: + if curIndex == 0 { + w.emit(Getmark) + w.emit(Forejump) + } else if curIndex == 1 { + Branchpos := w.popInt() + w.pushInt(w.curPos()) + w.emit1(Goto, 0) + w.patchJump(Branchpos, w.curPos()) + w.emit(Getmark) + w.emit(Forejump) + if len(node.children) <= 2 { + w.patchJump(w.popInt(), w.curPos()) + } + } else if curIndex == 2 { + w.patchJump(w.popInt(), w.curPos()) + } + + case ntLoop | beforeChild, ntLazyloop | beforeChild: + + if node.n < math.MaxInt32 || node.m > 1 { + if node.m == 0 { + w.emit1(Nullcount, 0) + } else { + w.emit1(Setcount, 1-node.m) + } + } else if node.m == 0 { + w.emit(Nullmark) + } else { + w.emit(Setmark) + } + + if node.m == 0 { + w.pushInt(w.curPos()) + w.emit1(Goto, 0) + } + w.pushInt(w.curPos()) + + case ntLoop | afterChild, ntLazyloop | afterChild: + + startJumpPos := w.curPos() + lazy := (nodetype - (ntLoop | afterChild)) + + if node.n < math.MaxInt32 || node.m > 1 { + if node.n == math.MaxInt32 { + w.emit2(InstOp(Branchcount+lazy), w.popInt(), math.MaxInt32) + } else { + w.emit2(InstOp(Branchcount+lazy), w.popInt(), node.n-node.m) + } + } else { + w.emit1(InstOp(Branchmark+lazy), w.popInt()) + } + + if node.m == 0 { + w.patchJump(w.popInt(), startJumpPos) + } + + case ntGroup | beforeChild, ntGroup | afterChild: + + case ntCapture | beforeChild: + w.emit(Setmark) + + case ntCapture | afterChild: + w.emit2(Capturemark, w.mapCapnum(node.m), w.mapCapnum(node.n)) + + case ntRequire | beforeChild: + // NOTE: the following line causes lookahead/lookbehind to be + // NON-BACKTRACKING. It can be commented out with (*) + w.emit(Setjump) + + w.emit(Setmark) + + case ntRequire | afterChild: + w.emit(Getmark) + + // NOTE: the following line causes lookahead/lookbehind to be + // NON-BACKTRACKING. It can be commented out with (*) + w.emit(Forejump) + + case ntPrevent | beforeChild: + w.emit(Setjump) + w.pushInt(w.curPos()) + w.emit1(Lazybranch, 0) + + case ntPrevent | afterChild: + w.emit(Backjump) + w.patchJump(w.popInt(), w.curPos()) + w.emit(Forejump) + + case ntGreedy | beforeChild: + w.emit(Setjump) + + case ntGreedy | afterChild: + w.emit(Forejump) + + case ntOne, ntNotone: + w.emit1(InstOp(node.t|ntBits), int(node.ch)) + + case ntNotoneloop, ntNotonelazy, ntOneloop, ntOnelazy: + if node.m > 0 { + if node.t == ntOneloop || node.t == ntOnelazy { + w.emit2(Onerep|bits, int(node.ch), node.m) + } else { + w.emit2(Notonerep|bits, int(node.ch), node.m) + } + } + if node.n > node.m { + if node.n == math.MaxInt32 { + w.emit2(InstOp(node.t|ntBits), int(node.ch), math.MaxInt32) + } else { + w.emit2(InstOp(node.t|ntBits), int(node.ch), node.n-node.m) + } + } + + case ntSetloop, ntSetlazy: + if node.m > 0 { + w.emit2(Setrep|bits, w.setCode(node.set), node.m) + } + if node.n > node.m { + if node.n == math.MaxInt32 { + w.emit2(InstOp(node.t|ntBits), w.setCode(node.set), math.MaxInt32) + } else { + w.emit2(InstOp(node.t|ntBits), w.setCode(node.set), node.n-node.m) + } + } + + case ntMulti: + w.emit1(InstOp(node.t|ntBits), w.stringCode(node.str)) + + case ntSet: + w.emit1(InstOp(node.t|ntBits), w.setCode(node.set)) + + case ntRef: + w.emit1(InstOp(node.t|ntBits), w.mapCapnum(node.m)) + + case ntNothing, ntBol, ntEol, ntBoundary, ntNonboundary, ntECMABoundary, ntNonECMABoundary, ntBeginning, ntStart, ntEndZ, ntEnd: + w.emit(InstOp(node.t)) + + default: + return fmt.Errorf("unexpected opcode in regular expression generation: %v", nodetype) + } + + return nil +} + +// To avoid recursion, we use a simple integer stack. +// This is the push. +func (w *writer) pushInt(i int) { + w.intStack = append(w.intStack, i) +} + +// Returns true if the stack is empty. +func (w *writer) emptyStack() bool { + return len(w.intStack) == 0 +} + +// This is the pop. +func (w *writer) popInt() int { + //get our item + idx := len(w.intStack) - 1 + i := w.intStack[idx] + //trim our slice + w.intStack = w.intStack[:idx] + return i +} + +// Returns the current position in the emitted code. +func (w *writer) curPos() int { + return w.curpos +} + +// Fixes up a jump instruction at the specified offset +// so that it jumps to the specified jumpDest. +func (w *writer) patchJump(offset, jumpDest int) { + w.emitted[offset+1] = jumpDest +} + +// Returns an index in the set table for a charset +// uses a map to eliminate duplicates. +func (w *writer) setCode(set *CharSet) int { + if w.counting { + return 0 + } + + buf := &bytes.Buffer{} + + set.mapHashFill(buf) + hash := buf.String() + i, ok := w.sethash[hash] + if !ok { + i = len(w.sethash) + w.sethash[hash] = i + w.settable = append(w.settable, set) + } + return i +} + +// Returns an index in the string table for a string. +// uses a map to eliminate duplicates. +func (w *writer) stringCode(str []rune) int { + if w.counting { + return 0 + } + + hash := string(str) + i, ok := w.stringhash[hash] + if !ok { + i = len(w.stringhash) + w.stringhash[hash] = i + w.stringtable = append(w.stringtable, str) + } + + return i +} + +// When generating code on a regex that uses a sparse set +// of capture slots, we hash them to a dense set of indices +// for an array of capture slots. Instead of doing the hash +// at match time, it's done at compile time, here. +func (w *writer) mapCapnum(capnum int) int { + if capnum == -1 { + return -1 + } + + if w.caps != nil { + return w.caps[capnum] + } + + return capnum +} + +// Emits a zero-argument operation. Note that the emit +// functions all run in two modes: they can emit code, or +// they can just count the size of the code. +func (w *writer) emit(op InstOp) { + if w.counting { + w.count++ + if opcodeBacktracks(op) { + w.trackcount++ + } + return + } + w.emitted[w.curpos] = int(op) + w.curpos++ +} + +// Emits a one-argument operation. +func (w *writer) emit1(op InstOp, opd1 int) { + if w.counting { + w.count += 2 + if opcodeBacktracks(op) { + w.trackcount++ + } + return + } + w.emitted[w.curpos] = int(op) + w.curpos++ + w.emitted[w.curpos] = opd1 + w.curpos++ +} + +// Emits a two-argument operation. +func (w *writer) emit2(op InstOp, opd1, opd2 int) { + if w.counting { + w.count += 3 + if opcodeBacktracks(op) { + w.trackcount++ + } + return + } + w.emitted[w.curpos] = int(op) + w.curpos++ + w.emitted[w.curpos] = opd1 + w.curpos++ + w.emitted[w.curpos] = opd2 + w.curpos++ +} diff --git a/vendor/github.com/dlclark/regexp2/testoutput1 b/vendor/github.com/dlclark/regexp2/testoutput1 new file mode 100644 index 0000000000..fbf63fdf2f --- /dev/null +++ b/vendor/github.com/dlclark/regexp2/testoutput1 @@ -0,0 +1,7061 @@ +# This set of tests is for features that are compatible with all versions of +# Perl >= 5.10, in non-UTF mode. It should run clean for the 8-bit, 16-bit, and +# 32-bit PCRE libraries, and also using the perltest.pl script. + +#forbid_utf +#newline_default lf any anycrlf +#perltest + +/the quick brown fox/ + the quick brown fox + 0: the quick brown fox + What do you know about the quick brown fox? + 0: the quick brown fox +\= Expect no match + The quick brown FOX +No match + What do you know about THE QUICK BROWN FOX? +No match + +/The quick brown fox/i + the quick brown fox + 0: the quick brown fox + The quick brown FOX + 0: The quick brown FOX + What do you know about the quick brown fox? + 0: the quick brown fox + What do you know about THE QUICK BROWN FOX? + 0: THE QUICK BROWN FOX + +/abcd\t\n\r\f\a\e\071\x3b\$\\\?caxyz/ + abcd\t\n\r\f\a\e9;\$\\?caxyz + 0: abcd\x09\x0a\x0d\x0c\x07\x1b9;$\?caxyz + +/a*abc?xyz+pqr{3}ab{2,}xy{4,5}pq{0,6}AB{0,}zz/ + abxyzpqrrrabbxyyyypqAzz + 0: abxyzpqrrrabbxyyyypqAzz + abxyzpqrrrabbxyyyypqAzz + 0: abxyzpqrrrabbxyyyypqAzz + aabxyzpqrrrabbxyyyypqAzz + 0: aabxyzpqrrrabbxyyyypqAzz + aaabxyzpqrrrabbxyyyypqAzz + 0: aaabxyzpqrrrabbxyyyypqAzz + aaaabxyzpqrrrabbxyyyypqAzz + 0: aaaabxyzpqrrrabbxyyyypqAzz + abcxyzpqrrrabbxyyyypqAzz + 0: abcxyzpqrrrabbxyyyypqAzz + aabcxyzpqrrrabbxyyyypqAzz + 0: aabcxyzpqrrrabbxyyyypqAzz + aaabcxyzpqrrrabbxyyyypAzz + 0: aaabcxyzpqrrrabbxyyyypAzz + aaabcxyzpqrrrabbxyyyypqAzz + 0: aaabcxyzpqrrrabbxyyyypqAzz + aaabcxyzpqrrrabbxyyyypqqAzz + 0: aaabcxyzpqrrrabbxyyyypqqAzz + aaabcxyzpqrrrabbxyyyypqqqAzz + 0: aaabcxyzpqrrrabbxyyyypqqqAzz + aaabcxyzpqrrrabbxyyyypqqqqAzz + 0: aaabcxyzpqrrrabbxyyyypqqqqAzz + aaabcxyzpqrrrabbxyyyypqqqqqAzz + 0: aaabcxyzpqrrrabbxyyyypqqqqqAzz + aaabcxyzpqrrrabbxyyyypqqqqqqAzz + 0: aaabcxyzpqrrrabbxyyyypqqqqqqAzz + aaaabcxyzpqrrrabbxyyyypqAzz + 0: aaaabcxyzpqrrrabbxyyyypqAzz + abxyzzpqrrrabbxyyyypqAzz + 0: abxyzzpqrrrabbxyyyypqAzz + aabxyzzzpqrrrabbxyyyypqAzz + 0: aabxyzzzpqrrrabbxyyyypqAzz + aaabxyzzzzpqrrrabbxyyyypqAzz + 0: aaabxyzzzzpqrrrabbxyyyypqAzz + aaaabxyzzzzpqrrrabbxyyyypqAzz + 0: aaaabxyzzzzpqrrrabbxyyyypqAzz + abcxyzzpqrrrabbxyyyypqAzz + 0: abcxyzzpqrrrabbxyyyypqAzz + aabcxyzzzpqrrrabbxyyyypqAzz + 0: aabcxyzzzpqrrrabbxyyyypqAzz + aaabcxyzzzzpqrrrabbxyyyypqAzz + 0: aaabcxyzzzzpqrrrabbxyyyypqAzz + aaaabcxyzzzzpqrrrabbxyyyypqAzz + 0: aaaabcxyzzzzpqrrrabbxyyyypqAzz + aaaabcxyzzzzpqrrrabbbxyyyypqAzz + 0: aaaabcxyzzzzpqrrrabbbxyyyypqAzz + aaaabcxyzzzzpqrrrabbbxyyyyypqAzz + 0: aaaabcxyzzzzpqrrrabbbxyyyyypqAzz + aaabcxyzpqrrrabbxyyyypABzz + 0: aaabcxyzpqrrrabbxyyyypABzz + aaabcxyzpqrrrabbxyyyypABBzz + 0: aaabcxyzpqrrrabbxyyyypABBzz + >>>aaabxyzpqrrrabbxyyyypqAzz + 0: aaabxyzpqrrrabbxyyyypqAzz + >aaaabxyzpqrrrabbxyyyypqAzz + 0: aaaabxyzpqrrrabbxyyyypqAzz + >>>>abcxyzpqrrrabbxyyyypqAzz + 0: abcxyzpqrrrabbxyyyypqAzz +\= Expect no match + abxyzpqrrabbxyyyypqAzz +No match + abxyzpqrrrrabbxyyyypqAzz +No match + abxyzpqrrrabxyyyypqAzz +No match + aaaabcxyzzzzpqrrrabbbxyyyyyypqAzz +No match + aaaabcxyzzzzpqrrrabbbxyyypqAzz +No match + aaabcxyzpqrrrabbxyyyypqqqqqqqAzz +No match + +/^(abc){1,2}zz/ + abczz + 0: abczz + 1: abc + abcabczz + 0: abcabczz + 1: abc +\= Expect no match + zz +No match + abcabcabczz +No match + >>abczz +No match + +/^(b+?|a){1,2}?c/ + bc + 0: bc + 1: b + bbc + 0: bbc + 1: b + bbbc + 0: bbbc + 1: bb + bac + 0: bac + 1: a + bbac + 0: bbac + 1: a + aac + 0: aac + 1: a + abbbbbbbbbbbc + 0: abbbbbbbbbbbc + 1: bbbbbbbbbbb + bbbbbbbbbbbac + 0: bbbbbbbbbbbac + 1: a +\= Expect no match + aaac +No match + abbbbbbbbbbbac +No match + +/^(b+|a){1,2}c/ + bc + 0: bc + 1: b + bbc + 0: bbc + 1: bb + bbbc + 0: bbbc + 1: bbb + bac + 0: bac + 1: a + bbac + 0: bbac + 1: a + aac + 0: aac + 1: a + abbbbbbbbbbbc + 0: abbbbbbbbbbbc + 1: bbbbbbbbbbb + bbbbbbbbbbbac + 0: bbbbbbbbbbbac + 1: a +\= Expect no match + aaac +No match + abbbbbbbbbbbac +No match + +/^(b+|a){1,2}?bc/ + bbc + 0: bbc + 1: b + +/^(b*|ba){1,2}?bc/ + babc + 0: babc + 1: ba + bbabc + 0: bbabc + 1: ba + bababc + 0: bababc + 1: ba +\= Expect no match + bababbc +No match + babababc +No match + +/^(ba|b*){1,2}?bc/ + babc + 0: babc + 1: ba + bbabc + 0: bbabc + 1: ba + bababc + 0: bababc + 1: ba +\= Expect no match + bababbc +No match + babababc +No match + +#/^\ca\cA\c[;\c:/ +# \x01\x01\e;z +# 0: \x01\x01\x1b;z + +/^[ab\]cde]/ + athing + 0: a + bthing + 0: b + ]thing + 0: ] + cthing + 0: c + dthing + 0: d + ething + 0: e +\= Expect no match + fthing +No match + [thing +No match + \\thing +No match + +/^[]cde]/ + ]thing + 0: ] + cthing + 0: c + dthing + 0: d + ething + 0: e +\= Expect no match + athing +No match + fthing +No match + +/^[^ab\]cde]/ + fthing + 0: f + [thing + 0: [ + \\thing + 0: \ +\= Expect no match + athing +No match + bthing +No match + ]thing +No match + cthing +No match + dthing +No match + ething +No match + +/^[^]cde]/ + athing + 0: a + fthing + 0: f +\= Expect no match + ]thing +No match + cthing +No match + dthing +No match + ething +No match + +# DLC - I don't get this one +#/^\/ +#  +# 0: \x81 + +#updated to handle 16-bits utf8 +/^ÿ/ + ÿ + 0: \xc3\xbf + +/^[0-9]+$/ + 0 + 0: 0 + 1 + 0: 1 + 2 + 0: 2 + 3 + 0: 3 + 4 + 0: 4 + 5 + 0: 5 + 6 + 0: 6 + 7 + 0: 7 + 8 + 0: 8 + 9 + 0: 9 + 10 + 0: 10 + 100 + 0: 100 +\= Expect no match + abc +No match + +/^.*nter/ + enter + 0: enter + inter + 0: inter + uponter + 0: uponter + +/^xxx[0-9]+$/ + xxx0 + 0: xxx0 + xxx1234 + 0: xxx1234 +\= Expect no match + xxx +No match + +/^.+[0-9][0-9][0-9]$/ + x123 + 0: x123 + x1234 + 0: x1234 + xx123 + 0: xx123 + 123456 + 0: 123456 +\= Expect no match + 123 +No match + +/^.+?[0-9][0-9][0-9]$/ + x123 + 0: x123 + x1234 + 0: x1234 + xx123 + 0: xx123 + 123456 + 0: 123456 +\= Expect no match + 123 +No match + +/^([^!]+)!(.+)=apquxz\.ixr\.zzz\.ac\.uk$/ + abc!pqr=apquxz.ixr.zzz.ac.uk + 0: abc!pqr=apquxz.ixr.zzz.ac.uk + 1: abc + 2: pqr +\= Expect no match + !pqr=apquxz.ixr.zzz.ac.uk +No match + abc!=apquxz.ixr.zzz.ac.uk +No match + abc!pqr=apquxz:ixr.zzz.ac.uk +No match + abc!pqr=apquxz.ixr.zzz.ac.ukk +No match + +/:/ + Well, we need a colon: somewhere + 0: : +\= Expect no match + Fail without a colon +No match + +/([\da-f:]+)$/i + 0abc + 0: 0abc + 1: 0abc + abc + 0: abc + 1: abc + fed + 0: fed + 1: fed + E + 0: E + 1: E + :: + 0: :: + 1: :: + 5f03:12C0::932e + 0: 5f03:12C0::932e + 1: 5f03:12C0::932e + fed def + 0: def + 1: def + Any old stuff + 0: ff + 1: ff +\= Expect no match + 0zzz +No match + gzzz +No match + fed\x20 +No match + Any old rubbish +No match + +/^.*\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$/ + .1.2.3 + 0: .1.2.3 + 1: 1 + 2: 2 + 3: 3 + A.12.123.0 + 0: A.12.123.0 + 1: 12 + 2: 123 + 3: 0 +\= Expect no match + .1.2.3333 +No match + 1.2.3 +No match + 1234.2.3 +No match + +/^(\d+)\s+IN\s+SOA\s+(\S+)\s+(\S+)\s*\(\s*$/ + 1 IN SOA non-sp1 non-sp2( + 0: 1 IN SOA non-sp1 non-sp2( + 1: 1 + 2: non-sp1 + 3: non-sp2 + 1 IN SOA non-sp1 non-sp2 ( + 0: 1 IN SOA non-sp1 non-sp2 ( + 1: 1 + 2: non-sp1 + 3: non-sp2 +\= Expect no match + 1IN SOA non-sp1 non-sp2( +No match + +/^[a-zA-Z\d][a-zA-Z\d\-]*(\.[a-zA-Z\d][a-zA-z\d\-]*)*\.$/ + a. + 0: a. + Z. + 0: Z. + 2. + 0: 2. + ab-c.pq-r. + 0: ab-c.pq-r. + 1: .pq-r + sxk.zzz.ac.uk. + 0: sxk.zzz.ac.uk. + 1: .uk + x-.y-. + 0: x-.y-. + 1: .y- +\= Expect no match + -abc.peq. +No match + +/^\*\.[a-z]([a-z\-\d]*[a-z\d]+)?(\.[a-z]([a-z\-\d]*[a-z\d]+)?)*$/ + *.a + 0: *.a + *.b0-a + 0: *.b0-a + 1: 0-a + *.c3-b.c + 0: *.c3-b.c + 1: 3-b + 2: .c + *.c-a.b-c + 0: *.c-a.b-c + 1: -a + 2: .b-c + 3: -c +\= Expect no match + *.0 +No match + *.a- +No match + *.a-b.c- +No match + *.c-a.0-c +No match + +/^(?=ab(de))(abd)(e)/ + abde + 0: abde + 1: de + 2: abd + 3: e + +/^(?!(ab)de|x)(abd)(f)/ + abdf + 0: abdf + 1: + 2: abd + 3: f + +/^(?=(ab(cd)))(ab)/ + abcd + 0: ab + 1: abcd + 2: cd + 3: ab + +/^[\da-f](\.[\da-f])*$/i + a.b.c.d + 0: a.b.c.d + 1: .d + A.B.C.D + 0: A.B.C.D + 1: .D + a.b.c.1.2.3.C + 0: a.b.c.1.2.3.C + 1: .C + +/^\".*\"\s*(;.*)?$/ + \"1234\" + 0: "1234" + \"abcd\" ; + 0: "abcd" ; + 1: ; + \"\" ; rhubarb + 0: "" ; rhubarb + 1: ; rhubarb +\= Expect no match + \"1234\" : things +No match + +/^$/ + \ + 0: +\= Expect no match + A non-empty line +No match + +/ ^ a (?# begins with a) b\sc (?# then b c) $ (?# then end)/x + ab c + 0: ab c +\= Expect no match + abc +No match + ab cde +No match + +/(?x) ^ a (?# begins with a) b\sc (?# then b c) $ (?# then end)/ + ab c + 0: ab c +\= Expect no match + abc +No match + ab cde +No match + +/^ a\ b[c ]d $/x + a bcd + 0: a bcd + a b d + 0: a b d +\= Expect no match + abcd +No match + ab d +No match + +/^(a(b(c)))(d(e(f)))(h(i(j)))(k(l(m)))$/ + abcdefhijklm + 0: abcdefhijklm + 1: abc + 2: bc + 3: c + 4: def + 5: ef + 6: f + 7: hij + 8: ij + 9: j +10: klm +11: lm +12: m + +/^(?:a(b(c)))(?:d(e(f)))(?:h(i(j)))(?:k(l(m)))$/ + abcdefhijklm + 0: abcdefhijklm + 1: bc + 2: c + 3: ef + 4: f + 5: ij + 6: j + 7: lm + 8: m + +#/^[\w][\W][\s][\S][\d][\D][\b][\n][\c]][\022]/ +# a+ Z0+\x08\n\x1d\x12 +# 0: a+ Z0+\x08\x0a\x1d\x12 + +/^[.^$|()*+?{,}]+/ + .^\$(*+)|{?,?} + 0: .^$(*+)|{?,?} + +/^a*\w/ + z + 0: z + az + 0: az + aaaz + 0: aaaz + a + 0: a + aa + 0: aa + aaaa + 0: aaaa + a+ + 0: a + aa+ + 0: aa + +/^a*?\w/ + z + 0: z + az + 0: a + aaaz + 0: a + a + 0: a + aa + 0: a + aaaa + 0: a + a+ + 0: a + aa+ + 0: a + +/^a+\w/ + az + 0: az + aaaz + 0: aaaz + aa + 0: aa + aaaa + 0: aaaa + aa+ + 0: aa + +/^a+?\w/ + az + 0: az + aaaz + 0: aa + aa + 0: aa + aaaa + 0: aa + aa+ + 0: aa + +/^\d{8}\w{2,}/ + 1234567890 + 0: 1234567890 + 12345678ab + 0: 12345678ab + 12345678__ + 0: 12345678__ +\= Expect no match + 1234567 +No match + +/^[aeiou\d]{4,5}$/ + uoie + 0: uoie + 1234 + 0: 1234 + 12345 + 0: 12345 + aaaaa + 0: aaaaa +\= Expect no match + 123456 +No match + +/^[aeiou\d]{4,5}?/ + uoie + 0: uoie + 1234 + 0: 1234 + 12345 + 0: 1234 + aaaaa + 0: aaaa + 123456 + 0: 1234 + +/\A(abc|def)=(\1){2,3}\Z/ + abc=abcabc + 0: abc=abcabc + 1: abc + 2: abc + def=defdefdef + 0: def=defdefdef + 1: def + 2: def +\= Expect no match + abc=defdef +No match + +/^(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)\11*(\3\4)\1(?#)2$/ + abcdefghijkcda2 + 0: abcdefghijkcda2 + 1: a + 2: b + 3: c + 4: d + 5: e + 6: f + 7: g + 8: h + 9: i +10: j +11: k +12: cd + abcdefghijkkkkcda2 + 0: abcdefghijkkkkcda2 + 1: a + 2: b + 3: c + 4: d + 5: e + 6: f + 7: g + 8: h + 9: i +10: j +11: k +12: cd + +/(cat(a(ract|tonic)|erpillar)) \1()2(3)/ + cataract cataract23 + 0: cataract cataract23 + 1: cataract + 2: aract + 3: ract + 4: + 5: 3 + catatonic catatonic23 + 0: catatonic catatonic23 + 1: catatonic + 2: atonic + 3: tonic + 4: + 5: 3 + caterpillar caterpillar23 + 0: caterpillar caterpillar23 + 1: caterpillar + 2: erpillar + 3: + 4: + 5: 3 + + +/^From +([^ ]+) +[a-zA-Z][a-zA-Z][a-zA-Z] +[a-zA-Z][a-zA-Z][a-zA-Z] +[0-9]?[0-9] +[0-9][0-9]:[0-9][0-9]/ + From abcd Mon Sep 01 12:33:02 1997 + 0: From abcd Mon Sep 01 12:33 + 1: abcd + +/^From\s+\S+\s+([a-zA-Z]{3}\s+){2}\d{1,2}\s+\d\d:\d\d/ + From abcd Mon Sep 01 12:33:02 1997 + 0: From abcd Mon Sep 01 12:33 + 1: Sep + From abcd Mon Sep 1 12:33:02 1997 + 0: From abcd Mon Sep 1 12:33 + 1: Sep +\= Expect no match + From abcd Sep 01 12:33:02 1997 +No match + +/^12.34/s + 12\n34 + 0: 12\x0a34 + 12\r34 + 0: 12\x0d34 + +/\w+(?=\t)/ + the quick brown\t fox + 0: brown + +/foo(?!bar)(.*)/ + foobar is foolish see? + 0: foolish see? + 1: lish see? + +/(?:(?!foo)...|^.{0,2})bar(.*)/ + foobar crowbar etc + 0: rowbar etc + 1: etc + barrel + 0: barrel + 1: rel + 2barrel + 0: 2barrel + 1: rel + A barrel + 0: A barrel + 1: rel + +/^(\D*)(?=\d)(?!123)/ + abc456 + 0: abc + 1: abc +\= Expect no match + abc123 +No match + +/^1234(?# test newlines + inside)/ + 1234 + 0: 1234 + +/^1234 #comment in extended re + /x + 1234 + 0: 1234 + +/#rhubarb + abcd/x + abcd + 0: abcd + +/^abcd#rhubarb/x + abcd + 0: abcd + +/^(a)\1{2,3}(.)/ + aaab + 0: aaab + 1: a + 2: b + aaaab + 0: aaaab + 1: a + 2: b + aaaaab + 0: aaaaa + 1: a + 2: a + aaaaaab + 0: aaaaa + 1: a + 2: a + +/(?!^)abc/ + the abc + 0: abc +\= Expect no match + abc +No match + +/(?=^)abc/ + abc + 0: abc +\= Expect no match + the abc +No match + +/^[ab]{1,3}(ab*|b)/ + aabbbbb + 0: aabb + 1: b + +/^[ab]{1,3}?(ab*|b)/ + aabbbbb + 0: aabbbbb + 1: abbbbb + +/^[ab]{1,3}?(ab*?|b)/ + aabbbbb + 0: aa + 1: a + +/^[ab]{1,3}(ab*?|b)/ + aabbbbb + 0: aabb + 1: b + +/ (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* # optional leading comment +(?: (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +" (?: # opening quote... +[^\\\x80-\xff\n\015"] # Anything except backslash and quote +| # or +\\ [^\x80-\xff] # Escaped something (something != CR) +)* " # closing quote +) # initial word +(?: (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* \. (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +" (?: # opening quote... +[^\\\x80-\xff\n\015"] # Anything except backslash and quote +| # or +\\ [^\x80-\xff] # Escaped something (something != CR) +)* " # closing quote +) )* # further okay, if led by a period +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* @ (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # initial subdomain +(?: # +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* \. # if led by a period... +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # ...further okay +)* +# address +| # or +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +" (?: # opening quote... +[^\\\x80-\xff\n\015"] # Anything except backslash and quote +| # or +\\ [^\x80-\xff] # Escaped something (something != CR) +)* " # closing quote +) # one word, optionally followed by.... +(?: +[^()<>@,;:".\\\[\]\x80-\xff\000-\010\012-\037] | # atom and space parts, or... +\( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) | # comments, or... + +" (?: # opening quote... +[^\\\x80-\xff\n\015"] # Anything except backslash and quote +| # or +\\ [^\x80-\xff] # Escaped something (something != CR) +)* " # closing quote +# quoted strings +)* +< (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* # leading < +(?: @ (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # initial subdomain +(?: # +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* \. # if led by a period... +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # ...further okay +)* + +(?: (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* , (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* @ (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # initial subdomain +(?: # +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* \. # if led by a period... +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # ...further okay +)* +)* # further okay, if led by comma +: # closing colon +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* )? # optional route +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +" (?: # opening quote... +[^\\\x80-\xff\n\015"] # Anything except backslash and quote +| # or +\\ [^\x80-\xff] # Escaped something (something != CR) +)* " # closing quote +) # initial word +(?: (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* \. (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +" (?: # opening quote... +[^\\\x80-\xff\n\015"] # Anything except backslash and quote +| # or +\\ [^\x80-\xff] # Escaped something (something != CR) +)* " # closing quote +) )* # further okay, if led by a period +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* @ (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # initial subdomain +(?: # +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* \. # if led by a period... +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* (?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| \[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) # ...further okay +)* +# address spec +(?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* > # trailing > +# name and address +) (?: [\040\t] | \( +(?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] | \( (?: [^\\\x80-\xff\n\015()] | \\ [^\x80-\xff] )* \) )* +\) )* # optional trailing comment +/x + Alan Other + 0: Alan Other + + 0: user@dom.ain + user\@dom.ain + 0: user@dom.ain + \"A. Other\" (a comment) + 0: "A. Other" (a comment) + A. Other (a comment) + 0: Other (a comment) + \"/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/\"\@x400-re.lay + 0: "/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/"@x400-re.lay + A missing angle @,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +# Atom +| # or +" # " +[^\\\x80-\xff\n\015"] * # normal +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015"] * )* # ( special normal* )* +" # " +# Quoted string +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +\. +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +# Atom +| # or +" # " +[^\\\x80-\xff\n\015"] * # normal +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015"] * )* # ( special normal* )* +" # " +# Quoted string +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# additional words +)* +@ +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +(?: +\. +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +)* +# address +| # or +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +# Atom +| # or +" # " +[^\\\x80-\xff\n\015"] * # normal +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015"] * )* # ( special normal* )* +" # " +# Quoted string +) +# leading word +[^()<>@,;:".\\\[\]\x80-\xff\000-\010\012-\037] * # "normal" atoms and or spaces +(?: +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +| +" # " +[^\\\x80-\xff\n\015"] * # normal +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015"] * )* # ( special normal* )* +" # " +) # "special" comment or quoted string +[^()<>@,;:".\\\[\]\x80-\xff\000-\010\012-\037] * # more "normal" +)* +< +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# < +(?: +@ +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +(?: +\. +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +)* +(?: , +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +@ +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +(?: +\. +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +)* +)* # additional domains +: +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +)? # optional route +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +# Atom +| # or +" # " +[^\\\x80-\xff\n\015"] * # normal +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015"] * )* # ( special normal* )* +" # " +# Quoted string +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +\. +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +# Atom +| # or +" # " +[^\\\x80-\xff\n\015"] * # normal +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015"] * )* # ( special normal* )* +" # " +# Quoted string +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# additional words +)* +@ +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +(?: +\. +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +(?: +[^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]+ # some number of atom characters... +(?![^(\040)<>@,;:".\\\[\]\000-\037\x80-\xff]) # ..not followed by something that could be part of an atom +| +\[ # [ +(?: [^\\\x80-\xff\n\015\[\]] | \\ [^\x80-\xff] )* # stuff +\] # ] +) +[\040\t]* # Nab whitespace. +(?: +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: # ( +(?: \\ [^\x80-\xff] | +\( # ( +[^\\\x80-\xff\n\015()] * # normal* +(?: \\ [^\x80-\xff] [^\\\x80-\xff\n\015()] * )* # (special normal*)* +\) # ) +) # special +[^\\\x80-\xff\n\015()] * # normal* +)* # )* +\) # ) +[\040\t]* )* # If comment found, allow more spaces. +# optional trailing comments +)* +# address spec +> # > +# name and address +) +/x + Alan Other + 0: Alan Other + + 0: user@dom.ain + user\@dom.ain + 0: user@dom.ain + \"A. Other\" (a comment) + 0: "A. Other" + A. Other (a comment) + 0: Other + \"/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/\"\@x400-re.lay + 0: "/s=user/ou=host/o=place/prmd=uu.yy/admd= /c=gb/"@x400-re.lay + A missing angle ?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f + +/P[^*]TAIRE[^*]{1,6}?LL/ + xxxxxxxxxxxPSTAIREISLLxxxxxxxxx + 0: PSTAIREISLL + +/P[^*]TAIRE[^*]{1,}?LL/ + xxxxxxxxxxxPSTAIREISLLxxxxxxxxx + 0: PSTAIREISLL + +/(\.\d\d[1-9]?)\d+/ + 1.230003938 + 0: .230003938 + 1: .23 + 1.875000282 + 0: .875000282 + 1: .875 + 1.235 + 0: .235 + 1: .23 + +/(\.\d\d((?=0)|\d(?=\d)))/ + 1.230003938 + 0: .23 + 1: .23 + 2: + 1.875000282 + 0: .875 + 1: .875 + 2: 5 +\= Expect no match + 1.235 +No match + +/\b(foo)\s+(\w+)/i + Food is on the foo table + 0: foo table + 1: foo + 2: table + +/foo(.*)bar/ + The food is under the bar in the barn. + 0: food is under the bar in the bar + 1: d is under the bar in the + +/foo(.*?)bar/ + The food is under the bar in the barn. + 0: food is under the bar + 1: d is under the + +/(.*)(\d*)/ + I have 2 numbers: 53147 + 0: I have 2 numbers: 53147 + 1: I have 2 numbers: 53147 + 2: + +/(.*)(\d+)/ + I have 2 numbers: 53147 + 0: I have 2 numbers: 53147 + 1: I have 2 numbers: 5314 + 2: 7 + +/(.*?)(\d*)/ + I have 2 numbers: 53147 + 0: + 1: + 2: + +/(.*?)(\d+)/ + I have 2 numbers: 53147 + 0: I have 2 + 1: I have + 2: 2 + +/(.*)(\d+)$/ + I have 2 numbers: 53147 + 0: I have 2 numbers: 53147 + 1: I have 2 numbers: 5314 + 2: 7 + +/(.*?)(\d+)$/ + I have 2 numbers: 53147 + 0: I have 2 numbers: 53147 + 1: I have 2 numbers: + 2: 53147 + +/(.*)\b(\d+)$/ + I have 2 numbers: 53147 + 0: I have 2 numbers: 53147 + 1: I have 2 numbers: + 2: 53147 + +/(.*\D)(\d+)$/ + I have 2 numbers: 53147 + 0: I have 2 numbers: 53147 + 1: I have 2 numbers: + 2: 53147 + +/^\D*(?!123)/ + ABC123 + 0: AB + +/^(\D*)(?=\d)(?!123)/ + ABC445 + 0: ABC + 1: ABC +\= Expect no match + ABC123 +No match + +/^[W-]46]/ + W46]789 + 0: W46] + -46]789 + 0: -46] +\= Expect no match + Wall +No match + Zebra +No match + 42 +No match + [abcd] +No match + ]abcd[ +No match + +/^[W-\]46]/ + W46]789 + 0: W + Wall + 0: W + Zebra + 0: Z + Xylophone + 0: X + 42 + 0: 4 + [abcd] + 0: [ + ]abcd[ + 0: ] + \\backslash + 0: \ +\= Expect no match + -46]789 +No match + well +No match + +/\d\d\/\d\d\/\d\d\d\d/ + 01/01/2000 + 0: 01/01/2000 + +/word (?:[a-zA-Z0-9]+ ){0,10}otherword/ + word cat dog elephant mussel cow horse canary baboon snake shark otherword + 0: word cat dog elephant mussel cow horse canary baboon snake shark otherword +\= Expect no match + word cat dog elephant mussel cow horse canary baboon snake shark +No match + +/word (?:[a-zA-Z0-9]+ ){0,300}otherword/ +\= Expect no match + word cat dog elephant mussel cow horse canary baboon snake shark the quick brown fox and the lazy dog and several other words getting close to thirty by now I hope +No match + +/^(a){0,0}/ + bcd + 0: + abc + 0: + aab + 0: + +/^(a){0,1}/ + bcd + 0: + abc + 0: a + 1: a + aab + 0: a + 1: a + +/^(a){0,2}/ + bcd + 0: + abc + 0: a + 1: a + aab + 0: aa + 1: a + +/^(a){0,3}/ + bcd + 0: + abc + 0: a + 1: a + aab + 0: aa + 1: a + aaa + 0: aaa + 1: a + +/^(a){0,}/ + bcd + 0: + abc + 0: a + 1: a + aab + 0: aa + 1: a + aaa + 0: aaa + 1: a + aaaaaaaa + 0: aaaaaaaa + 1: a + +/^(a){1,1}/ + abc + 0: a + 1: a + aab + 0: a + 1: a +\= Expect no match + bcd +No match + +/^(a){1,2}/ + abc + 0: a + 1: a + aab + 0: aa + 1: a +\= Expect no match + bcd +No match + +/^(a){1,3}/ + abc + 0: a + 1: a + aab + 0: aa + 1: a + aaa + 0: aaa + 1: a +\= Expect no match + bcd +No match + +/^(a){1,}/ + abc + 0: a + 1: a + aab + 0: aa + 1: a + aaa + 0: aaa + 1: a + aaaaaaaa + 0: aaaaaaaa + 1: a +\= Expect no match + bcd +No match + +/.*\.gif/ + borfle\nbib.gif\nno + 0: bib.gif + +/.{0,}\.gif/ + borfle\nbib.gif\nno + 0: bib.gif + +/.*\.gif/m + borfle\nbib.gif\nno + 0: bib.gif + +/.*\.gif/s + borfle\nbib.gif\nno + 0: borfle\x0abib.gif + +/.*\.gif/ms + borfle\nbib.gif\nno + 0: borfle\x0abib.gif + +/.*$/ + borfle\nbib.gif\nno + 0: no + +/.*$/m + borfle\nbib.gif\nno + 0: borfle + +/.*$/s + borfle\nbib.gif\nno + 0: borfle\x0abib.gif\x0ano + +/.*$/ms + borfle\nbib.gif\nno + 0: borfle\x0abib.gif\x0ano + +/.*$/ + borfle\nbib.gif\nno\n + 0: no + +/.*$/m + borfle\nbib.gif\nno\n + 0: borfle + +/.*$/s + borfle\nbib.gif\nno\n + 0: borfle\x0abib.gif\x0ano\x0a + +/.*$/ms + borfle\nbib.gif\nno\n + 0: borfle\x0abib.gif\x0ano\x0a + +/(.*X|^B)/ + abcde\n1234Xyz + 0: 1234X + 1: 1234X + BarFoo + 0: B + 1: B +\= Expect no match + abcde\nBar +No match + +/(.*X|^B)/m + abcde\n1234Xyz + 0: 1234X + 1: 1234X + BarFoo + 0: B + 1: B + abcde\nBar + 0: B + 1: B + +/(.*X|^B)/s + abcde\n1234Xyz + 0: abcde\x0a1234X + 1: abcde\x0a1234X + BarFoo + 0: B + 1: B +\= Expect no match + abcde\nBar +No match + +/(.*X|^B)/ms + abcde\n1234Xyz + 0: abcde\x0a1234X + 1: abcde\x0a1234X + BarFoo + 0: B + 1: B + abcde\nBar + 0: B + 1: B + +/(?s)(.*X|^B)/ + abcde\n1234Xyz + 0: abcde\x0a1234X + 1: abcde\x0a1234X + BarFoo + 0: B + 1: B +\= Expect no match + abcde\nBar +No match + +/(?s:.*X|^B)/ + abcde\n1234Xyz + 0: abcde\x0a1234X + BarFoo + 0: B +\= Expect no match + abcde\nBar +No match + +/^.*B/ +\= Expect no match + abc\nB +No match + +/(?s)^.*B/ + abc\nB + 0: abc\x0aB + +/(?m)^.*B/ + abc\nB + 0: B + +/(?ms)^.*B/ + abc\nB + 0: abc\x0aB + +/(?ms)^B/ + abc\nB + 0: B + +/(?s)B$/ + B\n + 0: B + +/^[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]/ + 123456654321 + 0: 123456654321 + +/^\d\d\d\d\d\d\d\d\d\d\d\d/ + 123456654321 + 0: 123456654321 + +/^[\d][\d][\d][\d][\d][\d][\d][\d][\d][\d][\d][\d]/ + 123456654321 + 0: 123456654321 + +/^[abc]{12}/ + abcabcabcabc + 0: abcabcabcabc + +/^[a-c]{12}/ + abcabcabcabc + 0: abcabcabcabc + +/^(a|b|c){12}/ + abcabcabcabc + 0: abcabcabcabc + 1: c + +/^[abcdefghijklmnopqrstuvwxy0123456789]/ + n + 0: n +\= Expect no match + z +No match + +/abcde{0,0}/ + abcd + 0: abcd +\= Expect no match + abce +No match + +/ab[cd]{0,0}e/ + abe + 0: abe +\= Expect no match + abcde +No match + +/ab(c){0,0}d/ + abd + 0: abd +\= Expect no match + abcd +No match + +/a(b*)/ + a + 0: a + 1: + ab + 0: ab + 1: b + abbbb + 0: abbbb + 1: bbbb +\= Expect no match + bbbbb +No match + +/ab\d{0}e/ + abe + 0: abe +\= Expect no match + ab1e +No match + +/"([^\\"]+|\\.)*"/ + the \"quick\" brown fox + 0: "quick" + 1: quick + \"the \\\"quick\\\" brown fox\" + 0: "the \"quick\" brown fox" + 1: brown fox + +/]{0,})>]{0,})>([\d]{0,}\.)(.*)((
([\w\W\s\d][^<>]{0,})|[\s]{0,}))<\/a><\/TD>]{0,})>([\w\W\s\d][^<>]{0,})<\/TD>]{0,})>([\w\W\s\d][^<>]{0,})<\/TD><\/TR>/is + 43.Word Processor
(N-1286)
Lega lstaff.comCA - Statewide + 0: 43.Word Processor
(N-1286)
Lega lstaff.comCA - Statewide + 1: BGCOLOR='#DBE9E9' + 2: align=left valign=top + 3: 43. + 4: Word Processor
(N-1286) + 5: + 6: + 7: + 8: align=left valign=top + 9: Lega lstaff.com +10: align=left valign=top +11: CA - Statewide + +/a[^a]b/ + acb + 0: acb + a\nb + 0: a\x0ab + +/a.b/ + acb + 0: acb +\= Expect no match + a\nb +No match + +/a[^a]b/s + acb + 0: acb + a\nb + 0: a\x0ab + +/a.b/s + acb + 0: acb + a\nb + 0: a\x0ab + +/^(b+?|a){1,2}?c/ + bac + 0: bac + 1: a + bbac + 0: bbac + 1: a + bbbac + 0: bbbac + 1: a + bbbbac + 0: bbbbac + 1: a + bbbbbac + 0: bbbbbac + 1: a + +/^(b+|a){1,2}?c/ + bac + 0: bac + 1: a + bbac + 0: bbac + 1: a + bbbac + 0: bbbac + 1: a + bbbbac + 0: bbbbac + 1: a + bbbbbac + 0: bbbbbac + 1: a + +/(?!\A)x/m + a\bx\n + 0: x + a\nx\n + 0: x +\= Expect no match + x\nb\n +No match + +/(A|B)*?CD/ + CD + 0: CD + +/(A|B)*CD/ + CD + 0: CD + +/(AB)*?\1/ + ABABAB + 0: ABAB + 1: AB + +/(AB)*\1/ + ABABAB + 0: ABABAB + 1: AB + +/(?.*/)foo" + /this/is/a/very/long/line/in/deed/with/very/many/slashes/in/and/foo + 0: /this/is/a/very/long/line/in/deed/with/very/many/slashes/in/and/foo +\= Expect no match + /this/is/a/very/long/line/in/deed/with/very/many/slashes/in/it/you/see/ +No match + +/(?>(\.\d\d[1-9]?))\d+/ + 1.230003938 + 0: .230003938 + 1: .23 + 1.875000282 + 0: .875000282 + 1: .875 +\= Expect no match + 1.235 +No match + +/^((?>\w+)|(?>\s+))*$/ + now is the time for all good men to come to the aid of the party + 0: now is the time for all good men to come to the aid of the party + 1: party +\= Expect no match + this is not a line with only words and spaces! +No match + +/(\d+)(\w)/ + 12345a + 0: 12345a + 1: 12345 + 2: a + 12345+ + 0: 12345 + 1: 1234 + 2: 5 + +/((?>\d+))(\w)/ + 12345a + 0: 12345a + 1: 12345 + 2: a +\= Expect no match + 12345+ +No match + +/(?>a+)b/ + aaab + 0: aaab + +/((?>a+)b)/ + aaab + 0: aaab + 1: aaab + +/(?>(a+))b/ + aaab + 0: aaab + 1: aaa + +/(?>b)+/ + aaabbbccc + 0: bbb + +/(?>a+|b+|c+)*c/ + aaabbbbccccd + 0: aaabbbbc + +/((?>[^()]+)|\([^()]*\))+/ + ((abc(ade)ufh()()x + 0: abc(ade)ufh()()x + 1: x + +/\(((?>[^()]+)|\([^()]+\))+\)/ + (abc) + 0: (abc) + 1: abc + (abc(def)xyz) + 0: (abc(def)xyz) + 1: xyz +\= Expect no match + ((()aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +No match + +/a(?-i)b/i + ab + 0: ab + Ab + 0: Ab +\= Expect no match + aB +No match + AB +No match + +/(a (?x)b c)d e/ + a bcd e + 0: a bcd e + 1: a bc +\= Expect no match + a b cd e +No match + abcd e +No match + a bcde +No match + +/(a b(?x)c d (?-x)e f)/ + a bcde f + 0: a bcde f + 1: a bcde f +\= Expect no match + abcdef +No match + +/(a(?i)b)c/ + abc + 0: abc + 1: ab + aBc + 0: aBc + 1: aB +\= Expect no match + abC +No match + aBC +No match + Abc +No match + ABc +No match + ABC +No match + AbC +No match + +/a(?i:b)c/ + abc + 0: abc + aBc + 0: aBc +\= Expect no match + ABC +No match + abC +No match + aBC +No match + +/a(?i:b)*c/ + aBc + 0: aBc + aBBc + 0: aBBc +\= Expect no match + aBC +No match + aBBC +No match + +/a(?=b(?i)c)\w\wd/ + abcd + 0: abcd + abCd + 0: abCd +\= Expect no match + aBCd +No match + abcD +No match + +/(?s-i:more.*than).*million/i + more than million + 0: more than million + more than MILLION + 0: more than MILLION + more \n than Million + 0: more \x0a than Million +\= Expect no match + MORE THAN MILLION +No match + more \n than \n million +No match + +/(?:(?s-i)more.*than).*million/i + more than million + 0: more than million + more than MILLION + 0: more than MILLION + more \n than Million + 0: more \x0a than Million +\= Expect no match + MORE THAN MILLION +No match + more \n than \n million +No match + +/(?>a(?i)b+)+c/ + abc + 0: abc + aBbc + 0: aBbc + aBBc + 0: aBBc +\= Expect no match + Abc +No match + abAb +No match + abbC +No match + +/(?=a(?i)b)\w\wc/ + abc + 0: abc + aBc + 0: aBc +\= Expect no match + Ab +No match + abC +No match + aBC +No match + +/(?<=a(?i)b)(\w\w)c/ + abxxc + 0: xxc + 1: xx + aBxxc + 0: xxc + 1: xx +\= Expect no match + Abxxc +No match + ABxxc +No match + abxxC +No match + +/(?:(a)|b)(?(1)A|B)/ + aA + 0: aA + 1: a + bB + 0: bB +\= Expect no match + aB +No match + bA +No match + +/^(a)?(?(1)a|b)+$/ + aa + 0: aa + 1: a + b + 0: b + bb + 0: bb +\= Expect no match + ab +No match + +# Perl gets this next one wrong if the pattern ends with $; in that case it +# fails to match "12". + +/^(?(?=abc)\w{3}:|\d\d)/ + abc: + 0: abc: + 12 + 0: 12 + 123 + 0: 12 +\= Expect no match + xyz +No match + +/^(?(?!abc)\d\d|\w{3}:)$/ + abc: + 0: abc: + 12 + 0: 12 +\= Expect no match + 123 +No match + xyz +No match + +/(?(?<=foo)bar|cat)/ + foobar + 0: bar + cat + 0: cat + fcat + 0: cat + focat + 0: cat +\= Expect no match + foocat +No match + +/(?(?a*)*/ + a + 0: a + aa + 0: aa + aaaa + 0: aaaa + +/(abc|)+/ + abc + 0: abc + 1: + abcabc + 0: abcabc + 1: + abcabcabc + 0: abcabcabc + 1: + xyz + 0: + 1: + +/([a]*)*/ + a + 0: a + 1: + aaaaa + 0: aaaaa + 1: + +/([ab]*)*/ + a + 0: a + 1: + b + 0: b + 1: + ababab + 0: ababab + 1: + aaaabcde + 0: aaaab + 1: + bbbb + 0: bbbb + 1: + +/([^a]*)*/ + b + 0: b + 1: + bbbb + 0: bbbb + 1: + aaa + 0: + 1: + +/([^ab]*)*/ + cccc + 0: cccc + 1: + abab + 0: + 1: + +/([a]*?)*/ + a + 0: + 1: + aaaa + 0: + 1: + +/([ab]*?)*/ + a + 0: + 1: + b + 0: + 1: + abab + 0: + 1: + baba + 0: + 1: + +/([^a]*?)*/ + b + 0: + 1: + bbbb + 0: + 1: + aaa + 0: + 1: + +/([^ab]*?)*/ + c + 0: + 1: + cccc + 0: + 1: + baba + 0: + 1: + +/(?>a*)*/ + a + 0: a + aaabcde + 0: aaa + +/((?>a*))*/ + aaaaa + 0: aaaaa + 1: + aabbaa + 0: aa + 1: + +/((?>a*?))*/ + aaaaa + 0: + 1: + aabbaa + 0: + 1: + +/(?(?=[^a-z]+[a-z]) \d{2}-[a-z]{3}-\d{2} | \d{2}-\d{2}-\d{2} ) /x + 12-sep-98 + 0: 12-sep-98 + 12-09-98 + 0: 12-09-98 +\= Expect no match + sep-12-98 +No match + +/(?<=(foo))bar\1/ + foobarfoo + 0: barfoo + 1: foo + foobarfootling + 0: barfoo + 1: foo +\= Expect no match + foobar +No match + barfoo +No match + +/(?i:saturday|sunday)/ + saturday + 0: saturday + sunday + 0: sunday + Saturday + 0: Saturday + Sunday + 0: Sunday + SATURDAY + 0: SATURDAY + SUNDAY + 0: SUNDAY + SunDay + 0: SunDay + +/(a(?i)bc|BB)x/ + abcx + 0: abcx + 1: abc + aBCx + 0: aBCx + 1: aBC + bbx + 0: bbx + 1: bb + BBx + 0: BBx + 1: BB +\= Expect no match + abcX +No match + aBCX +No match + bbX +No match + BBX +No match + +/^([ab](?i)[cd]|[ef])/ + ac + 0: ac + 1: ac + aC + 0: aC + 1: aC + bD + 0: bD + 1: bD + elephant + 0: e + 1: e + Europe + 0: E + 1: E + frog + 0: f + 1: f + France + 0: F + 1: F +\= Expect no match + Africa +No match + +/^(ab|a(?i)[b-c](?m-i)d|x(?i)y|z)/ + ab + 0: ab + 1: ab + aBd + 0: aBd + 1: aBd + xy + 0: xy + 1: xy + xY + 0: xY + 1: xY + zebra + 0: z + 1: z + Zambesi + 0: Z + 1: Z +\= Expect no match + aCD +No match + XY +No match + +/(?<=foo\n)^bar/m + foo\nbar + 0: bar +\= Expect no match + bar +No match + baz\nbar +No match + +/(?<=(?]&/ + <&OUT + 0: <& + +/^(a\1?){4}$/ + aaaaaaaaaa + 0: aaaaaaaaaa + 1: aaaa +\= Expect no match + AB +No match + aaaaaaaaa +No match + aaaaaaaaaaa +No match + +/^(a(?(1)\1)){4}$/ + aaaaaaaaaa + 0: aaaaaaaaaa + 1: aaaa +\= Expect no match + aaaaaaaaa +No match + aaaaaaaaaaa +No match + +/(?:(f)(o)(o)|(b)(a)(r))*/ + foobar + 0: foobar + 1: f + 2: o + 3: o + 4: b + 5: a + 6: r + +/(?<=a)b/ + ab + 0: b +\= Expect no match + cb +No match + b +No match + +/(? + 2: abcd + xy:z:::abcd + 0: xy:z:::abcd + 1: xy:z::: + 2: abcd + +/^[^bcd]*(c+)/ + aexycd + 0: aexyc + 1: c + +/(a*)b+/ + caab + 0: aab + 1: aa + +/([\w:]+::)?(\w+)$/ + abcd + 0: abcd + 1: + 2: abcd + xy:z:::abcd + 0: xy:z:::abcd + 1: xy:z::: + 2: abcd +\= Expect no match + abcd: +No match + abcd: +No match + +/^[^bcd]*(c+)/ + aexycd + 0: aexyc + 1: c + +/(>a+)ab/ + +/(?>a+)b/ + aaab + 0: aaab + +/([[:]+)/ + a:[b]: + 0: :[ + 1: :[ + +/([[=]+)/ + a=[b]= + 0: =[ + 1: =[ + +/([[.]+)/ + a.[b]. + 0: .[ + 1: .[ + +/((?>a+)b)/ + aaab + 0: aaab + 1: aaab + +/(?>(a+))b/ + aaab + 0: aaab + 1: aaa + +/((?>[^()]+)|\([^()]*\))+/ + ((abc(ade)ufh()()x + 0: abc(ade)ufh()()x + 1: x + +/a\Z/ +\= Expect no match + aaab +No match + a\nb\n +No match + +/b\Z/ + a\nb\n + 0: b + +/b\z/ + +/b\Z/ + a\nb + 0: b + +/b\z/ + a\nb + 0: b + +/^(?>(?(1)\.|())[^\W_](?>[a-z0-9-]*[^\W_])?)+$/ + a + 0: a + 1: + abc + 0: abc + 1: + a-b + 0: a-b + 1: + 0-9 + 0: 0-9 + 1: + a.b + 0: a.b + 1: + 5.6.7 + 0: 5.6.7 + 1: + the.quick.brown.fox + 0: the.quick.brown.fox + 1: + a100.b200.300c + 0: a100.b200.300c + 1: + 12-ab.1245 + 0: 12-ab.1245 + 1: +\= Expect no match + \ +No match + .a +No match + -a +No match + a- +No match + a. +No match + a_b +No match + a.- +No match + a.. +No match + ab..bc +No match + the.quick.brown.fox- +No match + the.quick.brown.fox. +No match + the.quick.brown.fox_ +No match + the.quick.brown.fox+ +No match + +/(?>.*)(?<=(abcd|wxyz))/ + alphabetabcd + 0: alphabetabcd + 1: abcd + endingwxyz + 0: endingwxyz + 1: wxyz +\= Expect no match + a rather long string that doesn't end with one of them +No match + +/word (?>(?:(?!otherword)[a-zA-Z0-9]+ ){0,30})otherword/ + word cat dog elephant mussel cow horse canary baboon snake shark otherword + 0: word cat dog elephant mussel cow horse canary baboon snake shark otherword +\= Expect no match + word cat dog elephant mussel cow horse canary baboon snake shark +No match + +/word (?>[a-zA-Z0-9]+ ){0,30}otherword/ +\= Expect no match + word cat dog elephant mussel cow horse canary baboon snake shark the quick brown fox and the lazy dog and several other words getting close to thirty by now I hope +No match + +/(?<=\d{3}(?!999))foo/ + 999foo + 0: foo + 123999foo + 0: foo +\= Expect no match + 123abcfoo +No match + +/(?<=(?!...999)\d{3})foo/ + 999foo + 0: foo + 123999foo + 0: foo +\= Expect no match + 123abcfoo +No match + +/(?<=\d{3}(?!999)...)foo/ + 123abcfoo + 0: foo + 123456foo + 0: foo +\= Expect no match + 123999foo +No match + +/(?<=\d{3}...)(? + 2: + 3: abcd +
+ 2: + 3: abcd + \s*)=(?>\s*) # find + 2: + 3: abcd + Z)+|A)*/ + ZABCDEFG + 0: ZA + 1: A + +/((?>)+|A)*/ + ZABCDEFG + 0: + 1: + +/^[\d-a]/ + abcde + 0: a + -things + 0: - + 0digit + 0: 0 +\= Expect no match + bcdef +No match + +/[\s]+/ + > \x09\x0a\x0c\x0d\x0b< + 0: \x09\x0a\x0c\x0d\x0b + +/\s+/ + > \x09\x0a\x0c\x0d\x0b< + 0: \x09\x0a\x0c\x0d\x0b + +/a b/x + ab + 0: ab + +/(?!\A)x/m + a\nxb\n + 0: x + +/(?!^)x/m +\= Expect no match + a\nxb\n +No match + +#/abc\Qabc\Eabc/ +# abcabcabc +# 0: abcabcabc + +#/abc\Q(*+|\Eabc/ +# abc(*+|abc +# 0: abc(*+|abc + +#/ abc\Q abc\Eabc/x +# abc abcabc +# 0: abc abcabc +#\= Expect no match +# abcabcabc +#No match + +#/abc#comment +# \Q#not comment +# literal\E/x +# abc#not comment\n literal +# 0: abc#not comment\x0a literal + +#/abc#comment +# \Q#not comment +# literal/x +# abc#not comment\n literal +# 0: abc#not comment\x0a literal + +#/abc#comment +# \Q#not comment +# literal\E #more comment +# /x +# abc#not comment\n literal +# 0: abc#not comment\x0a literal + +#/abc#comment +# \Q#not comment +# literal\E #more comment/x +# abc#not comment\n literal +# 0: abc#not comment\x0a literal + +#/\Qabc\$xyz\E/ +# abc\\\$xyz +# 0: abc\$xyz + +#/\Qabc\E\$\Qxyz\E/ +# abc\$xyz +# 0: abc$xyz + +/\Gabc/ + abc + 0: abc +\= Expect no match + xyzabc +No match + +/a(?x: b c )d/ + XabcdY + 0: abcd +\= Expect no match + Xa b c d Y +No match + +/((?x)x y z | a b c)/ + XabcY + 0: abc + 1: abc + AxyzB + 0: xyz + 1: xyz + +/(?i)AB(?-i)C/ + XabCY + 0: abC +\= Expect no match + XabcY +No match + +/((?i)AB(?-i)C|D)E/ + abCE + 0: abCE + 1: abC + DE + 0: DE + 1: D +\= Expect no match + abcE +No match + abCe +No match + dE +No match + De +No match + +/(.*)\d+\1/ + abc123abc + 0: abc123abc + 1: abc + abc123bc + 0: bc123bc + 1: bc + +/(.*)\d+\1/s + abc123abc + 0: abc123abc + 1: abc + abc123bc + 0: bc123bc + 1: bc + +/((.*))\d+\1/ + abc123abc + 0: abc123abc + 1: abc + 2: abc + abc123bc + 0: bc123bc + 1: bc + 2: bc + +# This tests for an IPv6 address in the form where it can have up to +# eight components, one and only one of which is empty. This must be +# an internal component. + +/^(?!:) # colon disallowed at start + (?: # start of item + (?: [0-9a-f]{1,4} | # 1-4 hex digits or + (?(1)0 | () ) ) # if null previously matched, fail; else null + : # followed by colon + ){1,7} # end item; 1-7 of them required + [0-9a-f]{1,4} $ # final hex number at end of string + (?(1)|.) # check that there was an empty component + /ix + a123::a123 + 0: a123::a123 + 1: + a123:b342::abcd + 0: a123:b342::abcd + 1: + a123:b342::324e:abcd + 0: a123:b342::324e:abcd + 1: + a123:ddde:b342::324e:abcd + 0: a123:ddde:b342::324e:abcd + 1: + a123:ddde:b342::324e:dcba:abcd + 0: a123:ddde:b342::324e:dcba:abcd + 1: + a123:ddde:9999:b342::324e:dcba:abcd + 0: a123:ddde:9999:b342::324e:dcba:abcd + 1: +\= Expect no match + 1:2:3:4:5:6:7:8 +No match + a123:bce:ddde:9999:b342::324e:dcba:abcd +No match + a123::9999:b342::324e:dcba:abcd +No match + abcde:2:3:4:5:6:7:8 +No match + ::1 +No match + abcd:fee0:123:: +No match + :1 +No match + 1: +No match + +#/[z\Qa-d]\E]/ +# z +# 0: z +# a +# 0: a +# - +# 0: - +# d +# 0: d +# ] +# 0: ] +#\= Expect no match +# b +#No match + +#TODO: PCRE has an optimization to make this workable, .NET does not +#/(a+)*b/ +#\= Expect no match +# aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +#No match + +# All these had to be updated because we understand unicode +# and this looks like it's expecting single byte matches + +# .NET generates \xe4...not sure what's up, might just be different code pages +/(?i)reg(?:ul(?:[aä]|ae)r|ex)/ + REGular + 0: REGular + regulaer + 0: regulaer + Regex + 0: Regex + regulär + 0: regul\xc3\xa4r + +#/Åæåä[à-ÿÀ-ß]+/ +# Åæåäà +# 0: \xc5\xe6\xe5\xe4\xe0 +# Åæåäÿ +# 0: \xc5\xe6\xe5\xe4\xff +# ÅæåäÀ +# 0: \xc5\xe6\xe5\xe4\xc0 +# Åæåäß +# 0: \xc5\xe6\xe5\xe4\xdf + +/(?<=Z)X./ + \x84XAZXB + 0: XB + +/ab cd (?x) de fg/ + ab cd defg + 0: ab cd defg + +/ab cd(?x) de fg/ + ab cddefg + 0: ab cddefg +\= Expect no match + abcddefg +No match + +/(? + 2: + D + 0: D + 1: + 2: + +# this is really long with debug -- removing for now +#/(a|)*\d/ +# aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa4 +# 0: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa4 +# 1: +#\= Expect no match +# aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +#No match + +/(?>a|)*\d/ + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa4 + 0: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa4 +\= Expect no match + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +No match + +/(?:a|)*\d/ + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa4 + 0: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa4 +\= Expect no match + aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +No match + +/^(?s)(?>.*)(? + 2: a + +/(?>(a))b|(a)c/ + ac + 0: ac + 1: + 2: a + +/(?=(a))ab|(a)c/ + ac + 0: ac + 1: + 2: a + +/((?>(a))b|(a)c)/ + ac + 0: ac + 1: ac + 2: + 3: a + +/(?=(?>(a))b|(a)c)(..)/ + ac + 0: ac + 1: + 2: a + 3: ac + +/(?>(?>(a))b|(a)c)/ + ac + 0: ac + 1: + 2: a + +/((?>(a+)b)+(aabab))/ + aaaabaaabaabab + 0: aaaabaaabaabab + 1: aaaabaaabaabab + 2: aaa + 3: aabab + +/(?>a+|ab)+?c/ +\= Expect no match + aabc +No match + +/(?>a+|ab)+c/ +\= Expect no match + aabc +No match + +/(?:a+|ab)+c/ + aabc + 0: aabc + +/^(?:a|ab)+c/ + aaaabc + 0: aaaabc + +/(?=abc){0}xyz/ + xyz + 0: xyz + +/(?=abc){1}xyz/ +\= Expect no match + xyz +No match + +/(?=(a))?./ + ab + 0: a + 1: a + bc + 0: b + +/(?=(a))??./ + ab + 0: a + bc + 0: b + +/^(?!a){0}\w+/ + aaaaa + 0: aaaaa + +/(?<=(abc))?xyz/ + abcxyz + 0: xyz + 1: abc + pqrxyz + 0: xyz + +/^[g]+/ + ggg<<>> + 0: ggg<<>> +\= Expect no match + \\ga +No match + +/^[ga]+/ + gggagagaxyz + 0: gggagaga + +/[:a]xxx[b:]/ + :xxx: + 0: :xxx: + +/(?<=a{2})b/i + xaabc + 0: b +\= Expect no match + xabc +No match + +/(? +# 4: +# 5: c +# 6: d +# 7: Y + +#/^X(?7)(a)(?|(b|(?|(r)|(t))(s))|(q))(c)(d)(Y)/ +# XYabcdY +# 0: XYabcdY +# 1: a +# 2: b +# 3: +# 4: +# 5: c +# 6: d +# 7: Y + +/(?'abc'\w+):\k{2}/ + a:aaxyz + 0: a:aa + 1: a + ab:ababxyz + 0: ab:abab + 1: ab +\= Expect no match + a:axyz +No match + ab:abxyz +No match + +/^(?a)? (?(ab)b|c) (?(ab)d|e)/x + abd + 0: abd + 1: a + ce + 0: ce + +# .NET has more consistent grouping numbers with these dupe groups for the two options +/(?:a(? (?')|(?")) |b(? (?')|(?")) ) (?(quote)[a-z]+|[0-9]+)/x,dupnames + a\"aaaaa + 0: a"aaaaa + 1: " + 2: + 3: " + b\"aaaaa + 0: b"aaaaa + 1: " + 2: + 3: " +\= Expect no match + b\"11111 +No match + +#/(?P(?P0)(?P>L1)|(?P>L2))/ +# 0 +# 0: 0 +# 1: 0 +# 00 +# 0: 00 +# 1: 00 +# 2: 0 +# 0000 +# 0: 0000 +# 1: 0000 +# 2: 0 + +#/(?P(?P0)|(?P>L2)(?P>L1))/ +# 0 +# 0: 0 +# 1: 0 +# 2: 0 +# 00 +# 0: 0 +# 1: 0 +# 2: 0 +# 0000 +# 0: 0 +# 1: 0 +# 2: 0 + +# Check the use of names for failure + +# Check opening parens in comment when seeking forward reference. + +#/(?P(?P=abn)xxx|)+/ +# xxx +# 0: +# 1: + +#Posses +/^(a)?(\w)/ + aaaaX + 0: aa + 1: a + 2: a + YZ + 0: Y + 1: + 2: Y + +#Posses +/^(?:a)?(\w)/ + aaaaX + 0: aa + 1: a + YZ + 0: Y + 1: Y + +/\A.*?(a|bc)/ + ba + 0: ba + 1: a + +/\A.*?(?:a|bc|d)/ + ba + 0: ba + +# -------------------------- + +/(another)?(\1?)test/ + hello world test + 0: test + 1: + 2: + +/(another)?(\1+)test/ +\= Expect no match + hello world test +No match + +/((?:a?)*)*c/ + aac + 0: aac + 1: + +/((?>a?)*)*c/ + aac + 0: aac + 1: + +/(?>.*?a)(?<=ba)/ + aba + 0: ba + +/(?:.*?a)(?<=ba)/ + aba + 0: aba + +/(?>.*?a)b/s + aab + 0: ab + +/(?>.*?a)b/ + aab + 0: ab + +/(?>^a)b/s +\= Expect no match + aab +No match + +/(?>.*?)(?<=(abcd)|(wxyz))/ + alphabetabcd + 0: + 1: abcd + endingwxyz + 0: + 1: + 2: wxyz + +/(?>.*)(?<=(abcd)|(wxyz))/ + alphabetabcd + 0: alphabetabcd + 1: abcd + endingwxyz + 0: endingwxyz + 1: + 2: wxyz + +"(?>.*)foo" +\= Expect no match + abcdfooxyz +No match + +"(?>.*?)foo" + abcdfooxyz + 0: foo + +# Tests that try to figure out how Perl works. My hypothesis is that the first +# verb that is backtracked onto is the one that acts. This seems to be the case +# almost all the time, but there is one exception that is perhaps a bug. + +/a(?=bc).|abd/ + abd + 0: abd + abc + 0: ab + +/a(?>bc)d|abd/ + abceabd + 0: abd + +# These tests were formerly in test 2, but changes in PCRE and Perl have +# made them compatible. + +/^(a)?(?(1)a|b)+$/ +\= Expect no match + a +No match + +# ---- + +/^\d*\w{4}/ + 1234 + 0: 1234 +\= Expect no match + 123 +No match + +/^[^b]*\w{4}/ + aaaa + 0: aaaa +\= Expect no match + aaa +No match + +/^[^b]*\w{4}/i + aaaa + 0: aaaa +\= Expect no match + aaa +No match + +/^a*\w{4}/ + aaaa + 0: aaaa +\= Expect no match + aaa +No match + +/^a*\w{4}/i + aaaa + 0: aaaa +\= Expect no match + aaa +No match + +/(?:(?foo)|(?bar))\k/dupnames + foofoo + 0: foofoo + 1: foo + barbar + 0: barbar + 1: bar + +# A notable difference between PCRE and .NET. According to +# the PCRE docs: +# If you make a subroutine call to a non-unique named +# subpattern, the one that corresponds to the first +# occurrence of the name is used. In the absence of +# duplicate numbers (see the previous section) this is +# the one with the lowest number. +# .NET takes the most recently captured number according to MSDN: +# A backreference refers to the most recent definition of +# a group (the definition most immediately to the left, +# when matching left to right). When a group makes multiple +# captures, a backreference refers to the most recent capture. + +#/(?A)(?:(?foo)|(?bar))\k/dupnames +# AfooA +# 0: AfooA +# 1: A +# 2: foo +# AbarA +# 0: AbarA +# 1: A +# 2: +# 3: bar +#\= Expect no match +# Afoofoo +#No match +# Abarbar +#No match + +/^(\d+)\s+IN\s+SOA\s+(\S+)\s+(\S+)\s*\(\s*$/ + 1 IN SOA non-sp1 non-sp2( + 0: 1 IN SOA non-sp1 non-sp2( + 1: 1 + 2: non-sp1 + 3: non-sp2 + +# TODO: .NET's group number ordering here in the second example is a bit odd +/^ (?:(?A)|(?'B'B)(?A)) (?(A)x) (?(B)y)$/x,dupnames + Ax + 0: Ax + 1: A + BAxy + 0: BAxy + 1: A + 2: B + +/ ^ a + b $ /x + aaaab + 0: aaaab + +/ ^ a + #comment + b $ /x + aaaab + 0: aaaab + +/ ^ a + #comment + #comment + b $ /x + aaaab + 0: aaaab + +/ ^ (?> a + ) b $ /x + aaaab + 0: aaaab + +/ ^ ( a + ) + \w $ /x + aaaab + 0: aaaab + 1: aaaa + +/(?:x|(?:(xx|yy)+|x|x|x|x|x)|a|a|a)bc/ +\= Expect no match + acb +No match + +#Posses +#/\A(?:[^\"]+|\"(?:[^\"]*|\"\")*\")+/ +# NON QUOTED \"QUOT\"\"ED\" AFTER \"NOT MATCHED +# 0: NON QUOTED "QUOT""ED" AFTER + +#Posses +#/\A(?:[^\"]+|\"(?:[^\"]+|\"\")*\")+/ +# NON QUOTED \"QUOT\"\"ED\" AFTER \"NOT MATCHED +# 0: NON QUOTED "QUOT""ED" AFTER + +#Posses +#/\A(?:[^\"]+|\"(?:[^\"]+|\"\")+\")+/ +# NON QUOTED \"QUOT\"\"ED\" AFTER \"NOT MATCHED +# 0: NON QUOTED "QUOT""ED" AFTER + +#Posses +#/\A([^\"1]+|[\"2]([^\"3]*|[\"4][\"5])*[\"6])+/ +# NON QUOTED \"QUOT\"\"ED\" AFTER \"NOT MATCHED +# 0: NON QUOTED "QUOT""ED" AFTER +# 1: AFTER +# 2: + +/^\w+(?>\s*)(?<=\w)/ + test test + 0: tes + +#/(?Pa)?(?Pb)?(?()c|d)*l/ +# acl +# 0: acl +# 1: a +# bdl +# 0: bdl +# 1: +# 2: b +# adl +# 0: dl +# bcl +# 0: l + +/\sabc/ + \x0babc + 0: \x0babc + +#/[\Qa]\E]+/ +# aa]] +# 0: aa]] + +#/[\Q]a\E]+/ +# aa]] +# 0: aa]] + +/A((((((((a))))))))\8B/ + AaaB + 0: AaaB + 1: a + 2: a + 3: a + 4: a + 5: a + 6: a + 7: a + 8: a + +/A(((((((((a)))))))))\9B/ + AaaB + 0: AaaB + 1: a + 2: a + 3: a + 4: a + 5: a + 6: a + 7: a + 8: a + 9: a + +/(|ab)*?d/ + abd + 0: abd + 1: ab + xyd + 0: d + +/(\2|a)(\1)/ + aaa + 0: aa + 1: a + 2: a + +/(\2)(\1)/ + +"Z*(|d*){216}" + +/((((((((((((x))))))))))))\12/ + xx + 0: xx + 1: x + 2: x + 3: x + 4: x + 5: x + 6: x + 7: x + 8: x + 9: x +10: x +11: x +12: x + +#"(?|(\k'Pm')|(?'Pm'))" +# abcd +# 0: +# 1: + +#/(?|(aaa)|(b))\g{1}/ +# aaaaaa +# 0: aaaaaa +# 1: aaa +# bb +# 0: bb +# 1: b + +#/(?|(aaa)|(b))(?1)/ +# aaaaaa +# 0: aaaaaa +# 1: aaa +# baaa +# 0: baaa +# 1: b +#\= Expect no match +# bb +#No match + +#/(?|(aaa)|(b))/ +# xaaa +# 0: aaa +# 1: aaa +# xbc +# 0: b +# 1: b + +#/(?|(?'a'aaa)|(?'a'b))\k'a'/ +# aaaaaa +# 0: aaaaaa +# 1: aaa +# bb +# 0: bb +# 1: b + +#/(?|(?'a'aaa)|(?'a'b))(?'a'cccc)\k'a'/dupnames +# aaaccccaaa +# 0: aaaccccaaa +# 1: aaa +# 2: cccc +# bccccb +# 0: bccccb +# 1: b +# 2: cccc + +# End of testinput1 diff --git a/vendor/github.com/dop251/goja/.gitignore b/vendor/github.com/dop251/goja/.gitignore new file mode 100644 index 0000000000..22b26fd46d --- /dev/null +++ b/vendor/github.com/dop251/goja/.gitignore @@ -0,0 +1,3 @@ +.idea +*.iml +testdata/test262 diff --git a/vendor/github.com/dop251/goja/.tc39_test262_checkout.sh b/vendor/github.com/dop251/goja/.tc39_test262_checkout.sh new file mode 100644 index 0000000000..68f4d1c035 --- /dev/null +++ b/vendor/github.com/dop251/goja/.tc39_test262_checkout.sh @@ -0,0 +1,9 @@ +#!/bin/sh +sha=ddfe24afe3043388827aa220ef623b8540958bbd # this is just the commit it was last tested with +mkdir -p testdata/test262 +cd testdata/test262 +git init +git remote add origin https://github.com/tc39/test262.git +git fetch origin --depth=1 "${sha}" +git reset --hard "${sha}" +cd - diff --git a/vendor/github.com/dop251/goja/LICENSE b/vendor/github.com/dop251/goja/LICENSE new file mode 100644 index 0000000000..09c000454f --- /dev/null +++ b/vendor/github.com/dop251/goja/LICENSE @@ -0,0 +1,15 @@ +Copyright (c) 2016 Dmitry Panov + +Copyright (c) 2012 Robert Krimen + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/dop251/goja/README.md b/vendor/github.com/dop251/goja/README.md new file mode 100644 index 0000000000..f1c59a910e --- /dev/null +++ b/vendor/github.com/dop251/goja/README.md @@ -0,0 +1,313 @@ +goja +==== + +ECMAScript 5.1(+) implementation in Go. + +[![GoDoc](https://godoc.org/github.com/dop251/goja?status.svg)](https://godoc.org/github.com/dop251/goja) + +Goja is an implementation of ECMAScript 5.1 in pure Go with emphasis on standard compliance and +performance. + +This project was largely inspired by [otto](https://github.com/robertkrimen/otto). + +Minimum required Go version is 1.14. + +Features +-------- + + * Full ECMAScript 5.1 support (including regex and strict mode). + * Passes nearly all [tc39 tests](https://github.com/tc39/test262) tagged with es5id. The goal is to pass all of them. + Note, the current working commit is https://github.com/tc39/test262/commit/ddfe24afe3043388827aa220ef623b8540958bbd. + The next commit removed most of the es5id tags which made it impossible to distinguish which tests to run. + * Capable of running Babel, Typescript compiler and pretty much anything written in ES5. + * Sourcemaps. + * Some ES6 functionality, still work in progress, see https://github.com/dop251/goja/milestone/1?closed=1 + +Known incompatibilities and caveats +----------------------------------- + +### WeakMap +WeakMap is implemented by embedding references to the values into the keys. This means that as long +as the key is reachable all values associated with it in any weak maps also remain reachable and therefore +cannot be garbage collected even if they are not otherwise referenced, even after the WeakMap is gone. +The reference to the value is dropped either when the key is explicitly removed from the WeakMap or when the +key becomes unreachable. + +To illustrate this: + +```javascript +var m = new WeakMap(); +var key = {}; +var value = {/* a very large object */}; +m.set(key, value); +value = undefined; +m = undefined; // The value does NOT become garbage-collectable at this point +key = undefined; // Now it does +// m.delete(key); // This would work too +``` + +The reason for it is the limitation of the Go runtime. At the time of writing (version 1.15) having a finalizer +set on an object which is part of a reference cycle makes the whole cycle non-garbage-collectable. The solution +above is the only reasonable way I can think of without involving finalizers. This is the third attempt +(see https://github.com/dop251/goja/issues/250 and https://github.com/dop251/goja/issues/199 for more details). + +Note, this does not have any effect on the application logic, but may cause a higher-than-expected memory usage. + +FAQ +--- + +### How fast is it? + +Although it's faster than many scripting language implementations in Go I have seen +(for example it's 6-7 times faster than otto on average) it is not a +replacement for V8 or SpiderMonkey or any other general-purpose JavaScript engine. +You can find some benchmarks [here](https://github.com/dop251/goja/issues/2). + +### Why would I want to use it over a V8 wrapper? + +It greatly depends on your usage scenario. If most of the work is done in javascript +(for example crypto or any other heavy calculations) you are definitely better off with V8. + +If you need a scripting language that drives an engine written in Go so that +you need to make frequent calls between Go and javascript passing complex data structures +then the cgo overhead may outweigh the benefits of having a faster javascript engine. + +Because it's written in pure Go there are no cgo dependencies, it's very easy to build and it +should run on any platform supported by Go. + +It gives you a much better control over execution environment so can be useful for research. + +### Is it goroutine-safe? + +No. An instance of goja.Runtime can only be used by a single goroutine +at a time. You can create as many instances of Runtime as you like but +it's not possible to pass object values between runtimes. + +### Where is setTimeout()? + +setTimeout() assumes concurrent execution of code which requires an execution +environment, for example an event loop similar to nodejs or a browser. +There is a [separate project](https://github.com/dop251/goja_nodejs) aimed at providing some NodeJS functionality, +and it includes an event loop. + +### Can you implement (feature X from ES6 or higher)? + +I will be adding features in their dependency order and as quickly as time permits. Please do not ask +for ETAs. Features that are open in the [milestone](https://github.com/dop251/goja/milestone/1) are either in progress +or will be worked on next. + +The ongoing work is done in separate feature branches which are merged into master when appropriate. +Every commit in these branches represents a relatively stable state (i.e. it compiles and passes all enabled tc39 tests), +however because the version of tc39 tests I use is quite old, it may be not as well tested as the ES5.1 functionality. Because there are (usually) no major breaking changes between ECMAScript revisions +it should not break your existing code. You are encouraged to give it a try and report any bugs found. Please do not submit fixes though without discussing it first, as the code could be changed in the meantime. + +### How do I contribute? + +Before submitting a pull request please make sure that: + +- You followed ECMA standard as close as possible. If adding a new feature make sure you've read the specification, +do not just base it on a couple of examples that work fine. +- Your change does not have a significant negative impact on performance (unless it's a bugfix and it's unavoidable) +- It passes all relevant tc39 tests. + +Current Status +-------------- + + * There should be no breaking changes in the API, however it may be extended. + * Some of the AnnexB functionality is missing. + +Basic Example +------------- + +Run JavaScript and get the result value. + +```go +vm := goja.New() +v, err := vm.RunString("2 + 2") +if err != nil { + panic(err) +} +if num := v.Export().(int64); num != 4 { + panic(num) +} +``` + +Passing Values to JS +-------------------- +Any Go value can be passed to JS using Runtime.ToValue() method. See the method's [documentation](https://godoc.org/github.com/dop251/goja#Runtime.ToValue) for more details. + +Exporting Values from JS +------------------------ +A JS value can be exported into its default Go representation using Value.Export() method. + +Alternatively it can be exported into a specific Go variable using [Runtime.ExportTo()](https://godoc.org/github.com/dop251/goja#Runtime.ExportTo) method. + +Within a single export operation the same Object will be represented by the same Go value (either the same map, slice or +a pointer to the same struct). This includes circular objects and makes it possible to export them. + +Calling JS functions from Go +---------------------------- +There are 2 approaches: + +- Using [AssertFunction()](https://godoc.org/github.com/dop251/goja#AssertFunction): +```go +vm := New() +_, err := vm.RunString(` +function sum(a, b) { + return a+b; +} +`) +if err != nil { + panic(err) +} +sum, ok := AssertFunction(vm.Get("sum")) +if !ok { + panic("Not a function") +} + +res, err := sum(Undefined(), vm.ToValue(40), vm.ToValue(2)) +if err != nil { + panic(err) +} +fmt.Println(res) +// Output: 42 +``` +- Using [Runtime.ExportTo()](https://godoc.org/github.com/dop251/goja#Runtime.ExportTo): +```go +const SCRIPT = ` +function f(param) { + return +param + 2; +} +` + +vm := New() +_, err := vm.RunString(SCRIPT) +if err != nil { + panic(err) +} + +var fn func(string) string +err = vm.ExportTo(vm.Get("f"), &fn) +if err != nil { + panic(err) +} + +fmt.Println(fn("40")) // note, _this_ value in the function will be undefined. +// Output: 42 +``` + +The first one is more low level and allows specifying _this_ value, whereas the second one makes the function look like +a normal Go function. + +Mapping struct field and method names +------------------------------------- +By default, the names are passed through as is which means they are capitalised. This does not match +the standard JavaScript naming convention, so if you need to make your JS code look more natural or if you are +dealing with a 3rd party library, you can use a [FieldNameMapper](https://godoc.org/github.com/dop251/goja#FieldNameMapper): + +```go +vm := New() +vm.SetFieldNameMapper(TagFieldNameMapper("json", true)) +type S struct { + Field int `json:"field"` +} +vm.Set("s", S{Field: 42}) +res, _ := vm.RunString(`s.field`) // without the mapper it would have been s.Field +fmt.Println(res.Export()) +// Output: 42 +``` + +There are two standard mappers: [TagFieldNameMapper](https://godoc.org/github.com/dop251/goja#TagFieldNameMapper) and +[UncapFieldNameMapper](https://godoc.org/github.com/dop251/goja#UncapFieldNameMapper), or you can use your own implementation. + +Native Constructors +------------------- + +In order to implement a constructor function in Go use `func (goja.ConstructorCall) *goja.Object`. +See [Runtime.ToValue()](https://godoc.org/github.com/dop251/goja#Runtime.ToValue) documentation for more details. + +Regular Expressions +------------------- + +Goja uses the embedded Go regexp library where possible, otherwise it falls back to [regexp2](https://github.com/dlclark/regexp2). + +Exceptions +---------- + +Any exception thrown in JavaScript is returned as an error of type *Exception. It is possible to extract the value thrown +by using the Value() method: + +```go +vm := New() +_, err := vm.RunString(` + +throw("Test"); + +`) + +if jserr, ok := err.(*Exception); ok { + if jserr.Value().Export() != "Test" { + panic("wrong value") + } +} else { + panic("wrong type") +} +``` + +If a native Go function panics with a Value, it is thrown as a Javascript exception (and therefore can be caught): + +```go +var vm *Runtime + +func Test() { + panic(vm.ToValue("Error")) +} + +vm = New() +vm.Set("Test", Test) +_, err := vm.RunString(` + +try { + Test(); +} catch(e) { + if (e !== "Error") { + throw e; + } +} + +`) + +if err != nil { + panic(err) +} +``` + +Interrupting +------------ + +```go +func TestInterrupt(t *testing.T) { + const SCRIPT = ` + var i = 0; + for (;;) { + i++; + } + ` + + vm := New() + time.AfterFunc(200 * time.Millisecond, func() { + vm.Interrupt("halt") + }) + + _, err := vm.RunString(SCRIPT) + if err == nil { + t.Fatal("Err is nil") + } + // err is of type *InterruptError and its Value() method returns whatever has been passed to vm.Interrupt() +} +``` + +NodeJS Compatibility +-------------------- + +There is a [separate project](https://github.com/dop251/goja_nodejs) aimed at providing some of the NodeJS functionality. diff --git a/vendor/github.com/dop251/goja/array.go b/vendor/github.com/dop251/goja/array.go new file mode 100644 index 0000000000..0a30b3ae33 --- /dev/null +++ b/vendor/github.com/dop251/goja/array.go @@ -0,0 +1,523 @@ +package goja + +import ( + "math" + "math/bits" + "reflect" + "strconv" + + "github.com/dop251/goja/unistring" +) + +type arrayIterObject struct { + baseObject + obj *Object + nextIdx int64 + kind iterationKind +} + +func (ai *arrayIterObject) next() Value { + if ai.obj == nil { + return ai.val.runtime.createIterResultObject(_undefined, true) + } + l := toLength(ai.obj.self.getStr("length", nil)) + index := ai.nextIdx + if index >= l { + ai.obj = nil + return ai.val.runtime.createIterResultObject(_undefined, true) + } + ai.nextIdx++ + idxVal := valueInt(index) + if ai.kind == iterationKindKey { + return ai.val.runtime.createIterResultObject(idxVal, false) + } + elementValue := ai.obj.self.getIdx(idxVal, nil) + var result Value + if ai.kind == iterationKindValue { + result = elementValue + } else { + result = ai.val.runtime.newArrayValues([]Value{idxVal, elementValue}) + } + return ai.val.runtime.createIterResultObject(result, false) +} + +func (r *Runtime) createArrayIterator(iterObj *Object, kind iterationKind) Value { + o := &Object{runtime: r} + + ai := &arrayIterObject{ + obj: iterObj, + kind: kind, + } + ai.class = classArrayIterator + ai.val = o + ai.extensible = true + o.self = ai + ai.prototype = r.global.ArrayIteratorPrototype + ai.init() + + return o +} + +type arrayObject struct { + baseObject + values []Value + length uint32 + objCount int + propValueCount int + lengthProp valueProperty +} + +func (a *arrayObject) init() { + a.baseObject.init() + a.lengthProp.writable = true + + a._put("length", &a.lengthProp) +} + +func (a *arrayObject) _setLengthInt(l int64, throw bool) bool { + if l >= 0 && l <= math.MaxUint32 { + l := uint32(l) + ret := true + if l <= a.length { + if a.propValueCount > 0 { + // Slow path + for i := len(a.values) - 1; i >= int(l); i-- { + if prop, ok := a.values[i].(*valueProperty); ok { + if !prop.configurable { + l = uint32(i) + 1 + ret = false + break + } + a.propValueCount-- + } + } + } + } + if l <= uint32(len(a.values)) { + if l >= 16 && l < uint32(cap(a.values))>>2 { + ar := make([]Value, l) + copy(ar, a.values) + a.values = ar + } else { + ar := a.values[l:len(a.values)] + for i := range ar { + ar[i] = nil + } + a.values = a.values[:l] + } + } + a.length = l + if !ret { + a.val.runtime.typeErrorResult(throw, "Cannot redefine property: length") + } + return ret + } + panic(a.val.runtime.newError(a.val.runtime.global.RangeError, "Invalid array length")) +} + +func (a *arrayObject) setLengthInt(l int64, throw bool) bool { + if l == int64(a.length) { + return true + } + if !a.lengthProp.writable { + a.val.runtime.typeErrorResult(throw, "length is not writable") + return false + } + return a._setLengthInt(l, throw) +} + +func (a *arrayObject) setLength(v Value, throw bool) bool { + l, ok := toIntIgnoreNegZero(v) + if ok && l == int64(a.length) { + return true + } + if !a.lengthProp.writable { + a.val.runtime.typeErrorResult(throw, "length is not writable") + return false + } + if ok { + return a._setLengthInt(l, throw) + } + panic(a.val.runtime.newError(a.val.runtime.global.RangeError, "Invalid array length")) +} + +func (a *arrayObject) getIdx(idx valueInt, receiver Value) Value { + prop := a.getOwnPropIdx(idx) + if prop == nil { + if a.prototype != nil { + if receiver == nil { + return a.prototype.self.getIdx(idx, a.val) + } + return a.prototype.self.getIdx(idx, receiver) + } + } + if prop, ok := prop.(*valueProperty); ok { + if receiver == nil { + return prop.get(a.val) + } + return prop.get(receiver) + } + return prop +} + +func (a *arrayObject) getOwnPropStr(name unistring.String) Value { + if len(a.values) > 0 { + if i := strToArrayIdx(name); i != math.MaxUint32 { + if i < uint32(len(a.values)) { + return a.values[i] + } + } + } + if name == "length" { + return a.getLengthProp() + } + return a.baseObject.getOwnPropStr(name) +} + +func (a *arrayObject) getOwnPropIdx(idx valueInt) Value { + if i := toIdx(idx); i != math.MaxUint32 { + if i < uint32(len(a.values)) { + return a.values[i] + } + return nil + } + + return a.baseObject.getOwnPropStr(idx.string()) +} + +func (a *arrayObject) sortLen() int64 { + return int64(len(a.values)) +} + +func (a *arrayObject) sortGet(i int64) Value { + v := a.values[i] + if p, ok := v.(*valueProperty); ok { + v = p.get(a.val) + } + return v +} + +func (a *arrayObject) swap(i, j int64) { + a.values[i], a.values[j] = a.values[j], a.values[i] +} + +func (a *arrayObject) getStr(name unistring.String, receiver Value) Value { + return a.getStrWithOwnProp(a.getOwnPropStr(name), name, receiver) +} + +func (a *arrayObject) getLengthProp() Value { + a.lengthProp.value = intToValue(int64(a.length)) + return &a.lengthProp +} + +func (a *arrayObject) setOwnIdx(idx valueInt, val Value, throw bool) bool { + if i := toIdx(idx); i != math.MaxUint32 { + return a._setOwnIdx(i, val, throw) + } else { + return a.baseObject.setOwnStr(idx.string(), val, throw) + } +} + +func (a *arrayObject) _setOwnIdx(idx uint32, val Value, throw bool) bool { + var prop Value + if idx < uint32(len(a.values)) { + prop = a.values[idx] + } + + if prop == nil { + if proto := a.prototype; proto != nil { + // we know it's foreign because prototype loops are not allowed + if res, ok := proto.self.setForeignIdx(valueInt(idx), val, a.val, throw); ok { + return res + } + } + // new property + if !a.extensible { + a.val.runtime.typeErrorResult(throw, "Cannot add property %d, object is not extensible", idx) + return false + } else { + if idx >= a.length { + if !a.setLengthInt(int64(idx)+1, throw) { + return false + } + } + if idx >= uint32(len(a.values)) { + if !a.expand(idx) { + a.val.self.(*sparseArrayObject).add(idx, val) + return true + } + } + a.objCount++ + } + } else { + if prop, ok := prop.(*valueProperty); ok { + if !prop.isWritable() { + a.val.runtime.typeErrorResult(throw) + return false + } + prop.set(a.val, val) + return true + } + } + a.values[idx] = val + return true +} + +func (a *arrayObject) setOwnStr(name unistring.String, val Value, throw bool) bool { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { + return a._setOwnIdx(idx, val, throw) + } else { + if name == "length" { + return a.setLength(val, throw) + } else { + return a.baseObject.setOwnStr(name, val, throw) + } + } +} + +func (a *arrayObject) setForeignIdx(idx valueInt, val, receiver Value, throw bool) (bool, bool) { + return a._setForeignIdx(idx, a.getOwnPropIdx(idx), val, receiver, throw) +} + +func (a *arrayObject) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) { + return a._setForeignStr(name, a.getOwnPropStr(name), val, receiver, throw) +} + +type arrayPropIter struct { + a *arrayObject + limit int + idx int +} + +func (i *arrayPropIter) next() (propIterItem, iterNextFunc) { + for i.idx < len(i.a.values) && i.idx < i.limit { + name := unistring.String(strconv.Itoa(i.idx)) + prop := i.a.values[i.idx] + i.idx++ + if prop != nil { + return propIterItem{name: name, value: prop}, i.next + } + } + + return i.a.baseObject.enumerateOwnKeys()() +} + +func (a *arrayObject) enumerateOwnKeys() iterNextFunc { + return (&arrayPropIter{ + a: a, + limit: len(a.values), + }).next +} + +func (a *arrayObject) ownKeys(all bool, accum []Value) []Value { + for i, prop := range a.values { + name := strconv.Itoa(i) + if prop != nil { + if !all { + if prop, ok := prop.(*valueProperty); ok && !prop.enumerable { + continue + } + } + accum = append(accum, asciiString(name)) + } + } + return a.baseObject.ownKeys(all, accum) +} + +func (a *arrayObject) hasOwnPropertyStr(name unistring.String) bool { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { + return idx < uint32(len(a.values)) && a.values[idx] != nil + } else { + return a.baseObject.hasOwnPropertyStr(name) + } +} + +func (a *arrayObject) hasOwnPropertyIdx(idx valueInt) bool { + if idx := toIdx(idx); idx != math.MaxUint32 { + return idx < uint32(len(a.values)) && a.values[idx] != nil + } + return a.baseObject.hasOwnPropertyStr(idx.string()) +} + +func (a *arrayObject) expand(idx uint32) bool { + targetLen := idx + 1 + if targetLen > uint32(len(a.values)) { + if targetLen < uint32(cap(a.values)) { + a.values = a.values[:targetLen] + } else { + if idx > 4096 && (a.objCount == 0 || idx/uint32(a.objCount) > 10) { + //log.Println("Switching standard->sparse") + sa := &sparseArrayObject{ + baseObject: a.baseObject, + length: a.length, + propValueCount: a.propValueCount, + } + sa.setValues(a.values, a.objCount+1) + sa.val.self = sa + sa.lengthProp.writable = a.lengthProp.writable + sa._put("length", &sa.lengthProp) + return false + } else { + if bits.UintSize == 32 { + if targetLen >= math.MaxInt32 { + panic(a.val.runtime.NewTypeError("Array index overflows int")) + } + } + tl := int(targetLen) + newValues := make([]Value, tl, growCap(tl, len(a.values), cap(a.values))) + copy(newValues, a.values) + a.values = newValues + } + } + } + return true +} + +func (r *Runtime) defineArrayLength(prop *valueProperty, descr PropertyDescriptor, setter func(Value, bool) bool, throw bool) bool { + ret := true + + if descr.Configurable == FLAG_TRUE || descr.Enumerable == FLAG_TRUE || descr.Getter != nil || descr.Setter != nil { + ret = false + goto Reject + } + + if newLen := descr.Value; newLen != nil { + ret = setter(newLen, false) + } else { + ret = true + } + + if descr.Writable != FLAG_NOT_SET { + w := descr.Writable.Bool() + if prop.writable { + prop.writable = w + } else { + if w { + ret = false + goto Reject + } + } + } + +Reject: + if !ret { + r.typeErrorResult(throw, "Cannot redefine property: length") + } + + return ret +} + +func (a *arrayObject) _defineIdxProperty(idx uint32, desc PropertyDescriptor, throw bool) bool { + var existing Value + if idx < uint32(len(a.values)) { + existing = a.values[idx] + } + prop, ok := a.baseObject._defineOwnProperty(unistring.String(strconv.FormatUint(uint64(idx), 10)), existing, desc, throw) + if ok { + if idx >= a.length { + if !a.setLengthInt(int64(idx)+1, throw) { + return false + } + } + if a.expand(idx) { + a.values[idx] = prop + a.objCount++ + if _, ok := prop.(*valueProperty); ok { + a.propValueCount++ + } + } else { + a.val.self.(*sparseArrayObject).add(idx, prop) + } + } + return ok +} + +func (a *arrayObject) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { + return a._defineIdxProperty(idx, descr, throw) + } + if name == "length" { + return a.val.runtime.defineArrayLength(&a.lengthProp, descr, a.setLength, throw) + } + return a.baseObject.defineOwnPropertyStr(name, descr, throw) +} + +func (a *arrayObject) defineOwnPropertyIdx(idx valueInt, descr PropertyDescriptor, throw bool) bool { + if idx := toIdx(idx); idx != math.MaxUint32 { + return a._defineIdxProperty(idx, descr, throw) + } + return a.baseObject.defineOwnPropertyStr(idx.string(), descr, throw) +} + +func (a *arrayObject) _deleteIdxProp(idx uint32, throw bool) bool { + if idx < uint32(len(a.values)) { + if v := a.values[idx]; v != nil { + if p, ok := v.(*valueProperty); ok { + if !p.configurable { + a.val.runtime.typeErrorResult(throw, "Cannot delete property '%d' of %s", idx, a.val.toString()) + return false + } + a.propValueCount-- + } + a.values[idx] = nil + a.objCount-- + } + } + return true +} + +func (a *arrayObject) deleteStr(name unistring.String, throw bool) bool { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { + return a._deleteIdxProp(idx, throw) + } + return a.baseObject.deleteStr(name, throw) +} + +func (a *arrayObject) deleteIdx(idx valueInt, throw bool) bool { + if idx := toIdx(idx); idx != math.MaxUint32 { + return a._deleteIdxProp(idx, throw) + } + return a.baseObject.deleteStr(idx.string(), throw) +} + +func (a *arrayObject) export(ctx *objectExportCtx) interface{} { + if v, exists := ctx.get(a); exists { + return v + } + arr := make([]interface{}, a.length) + ctx.put(a, arr) + if a.propValueCount == 0 && a.length == uint32(len(a.values)) && uint32(a.objCount) == a.length { + for i, v := range a.values { + if v != nil { + arr[i] = exportValue(v, ctx) + } + } + } else { + for i := uint32(0); i < a.length; i++ { + v := a.getIdx(valueInt(i), nil) + if v != nil { + arr[i] = exportValue(v, ctx) + } + } + } + return arr +} + +func (a *arrayObject) exportType() reflect.Type { + return reflectTypeArray +} + +func (a *arrayObject) setValuesFromSparse(items []sparseArrayItem, newMaxIdx int) { + a.values = make([]Value, newMaxIdx+1) + for _, item := range items { + a.values[item.idx] = item.value + } + a.objCount = len(items) +} + +func toIdx(v valueInt) uint32 { + if v >= 0 && v < math.MaxUint32 { + return uint32(v) + } + return math.MaxUint32 +} diff --git a/vendor/github.com/dop251/goja/array_sparse.go b/vendor/github.com/dop251/goja/array_sparse.go new file mode 100644 index 0000000000..dfb24d01df --- /dev/null +++ b/vendor/github.com/dop251/goja/array_sparse.go @@ -0,0 +1,467 @@ +package goja + +import ( + "math" + "math/bits" + "reflect" + "sort" + "strconv" + + "github.com/dop251/goja/unistring" +) + +type sparseArrayItem struct { + idx uint32 + value Value +} + +type sparseArrayObject struct { + baseObject + items []sparseArrayItem + length uint32 + propValueCount int + lengthProp valueProperty +} + +func (a *sparseArrayObject) findIdx(idx uint32) int { + return sort.Search(len(a.items), func(i int) bool { + return a.items[i].idx >= idx + }) +} + +func (a *sparseArrayObject) _setLengthInt(l int64, throw bool) bool { + if l >= 0 && l <= math.MaxUint32 { + ret := true + l := uint32(l) + if l <= a.length { + if a.propValueCount > 0 { + // Slow path + for i := len(a.items) - 1; i >= 0; i-- { + item := a.items[i] + if item.idx <= l { + break + } + if prop, ok := item.value.(*valueProperty); ok { + if !prop.configurable { + l = item.idx + 1 + ret = false + break + } + a.propValueCount-- + } + } + } + } + + idx := a.findIdx(l) + + aa := a.items[idx:] + for i := range aa { + aa[i].value = nil + } + a.items = a.items[:idx] + a.length = l + if !ret { + a.val.runtime.typeErrorResult(throw, "Cannot redefine property: length") + } + return ret + } + panic(a.val.runtime.newError(a.val.runtime.global.RangeError, "Invalid array length")) +} + +func (a *sparseArrayObject) setLengthInt(l int64, throw bool) bool { + if l == int64(a.length) { + return true + } + if !a.lengthProp.writable { + a.val.runtime.typeErrorResult(throw, "length is not writable") + return false + } + return a._setLengthInt(l, throw) +} + +func (a *sparseArrayObject) setLength(v Value, throw bool) bool { + l, ok := toIntIgnoreNegZero(v) + if ok && l == int64(a.length) { + return true + } + if !a.lengthProp.writable { + a.val.runtime.typeErrorResult(throw, "length is not writable") + return false + } + if ok { + return a._setLengthInt(l, throw) + } + panic(a.val.runtime.newError(a.val.runtime.global.RangeError, "Invalid array length")) +} + +func (a *sparseArrayObject) _getIdx(idx uint32) Value { + i := a.findIdx(idx) + if i < len(a.items) && a.items[i].idx == idx { + return a.items[i].value + } + + return nil +} + +func (a *sparseArrayObject) getStr(name unistring.String, receiver Value) Value { + return a.getStrWithOwnProp(a.getOwnPropStr(name), name, receiver) +} + +func (a *sparseArrayObject) getIdx(idx valueInt, receiver Value) Value { + prop := a.getOwnPropIdx(idx) + if prop == nil { + if a.prototype != nil { + if receiver == nil { + return a.prototype.self.getIdx(idx, a.val) + } + return a.prototype.self.getIdx(idx, receiver) + } + } + if prop, ok := prop.(*valueProperty); ok { + if receiver == nil { + return prop.get(a.val) + } + return prop.get(receiver) + } + return prop +} + +func (a *sparseArrayObject) getLengthProp() Value { + a.lengthProp.value = intToValue(int64(a.length)) + return &a.lengthProp +} + +func (a *sparseArrayObject) getOwnPropStr(name unistring.String) Value { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { + return a._getIdx(idx) + } + if name == "length" { + return a.getLengthProp() + } + return a.baseObject.getOwnPropStr(name) +} + +func (a *sparseArrayObject) getOwnPropIdx(idx valueInt) Value { + if idx := toIdx(idx); idx != math.MaxUint32 { + return a._getIdx(idx) + } + return a.baseObject.getOwnPropStr(idx.string()) +} + +func (a *sparseArrayObject) add(idx uint32, val Value) { + i := a.findIdx(idx) + a.items = append(a.items, sparseArrayItem{}) + copy(a.items[i+1:], a.items[i:]) + a.items[i] = sparseArrayItem{ + idx: idx, + value: val, + } +} + +func (a *sparseArrayObject) _setOwnIdx(idx uint32, val Value, throw bool) bool { + var prop Value + i := a.findIdx(idx) + if i < len(a.items) && a.items[i].idx == idx { + prop = a.items[i].value + } + + if prop == nil { + if proto := a.prototype; proto != nil { + // we know it's foreign because prototype loops are not allowed + if res, ok := proto.self.setForeignIdx(valueInt(idx), val, a.val, throw); ok { + return res + } + } + + // new property + if !a.extensible { + a.val.runtime.typeErrorResult(throw, "Cannot add property %d, object is not extensible", idx) + return false + } + + if idx >= a.length { + if !a.setLengthInt(int64(idx)+1, throw) { + return false + } + } + + if a.expand(idx) { + a.items = append(a.items, sparseArrayItem{}) + copy(a.items[i+1:], a.items[i:]) + a.items[i] = sparseArrayItem{ + idx: idx, + value: val, + } + } else { + ar := a.val.self.(*arrayObject) + ar.values[idx] = val + ar.objCount++ + return true + } + } else { + if prop, ok := prop.(*valueProperty); ok { + if !prop.isWritable() { + a.val.runtime.typeErrorResult(throw) + return false + } + prop.set(a.val, val) + } else { + a.items[i].value = val + } + } + return true +} + +func (a *sparseArrayObject) setOwnStr(name unistring.String, val Value, throw bool) bool { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { + return a._setOwnIdx(idx, val, throw) + } else { + if name == "length" { + return a.setLength(val, throw) + } else { + return a.baseObject.setOwnStr(name, val, throw) + } + } +} + +func (a *sparseArrayObject) setOwnIdx(idx valueInt, val Value, throw bool) bool { + if idx := toIdx(idx); idx != math.MaxUint32 { + return a._setOwnIdx(idx, val, throw) + } + + return a.baseObject.setOwnStr(idx.string(), val, throw) +} + +func (a *sparseArrayObject) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) { + return a._setForeignStr(name, a.getOwnPropStr(name), val, receiver, throw) +} + +func (a *sparseArrayObject) setForeignIdx(name valueInt, val, receiver Value, throw bool) (bool, bool) { + return a._setForeignIdx(name, a.getOwnPropIdx(name), val, receiver, throw) +} + +type sparseArrayPropIter struct { + a *sparseArrayObject + idx int +} + +func (i *sparseArrayPropIter) next() (propIterItem, iterNextFunc) { + for i.idx < len(i.a.items) { + name := unistring.String(strconv.Itoa(int(i.a.items[i.idx].idx))) + prop := i.a.items[i.idx].value + i.idx++ + if prop != nil { + return propIterItem{name: name, value: prop}, i.next + } + } + + return i.a.baseObject.enumerateOwnKeys()() +} + +func (a *sparseArrayObject) enumerateOwnKeys() iterNextFunc { + return (&sparseArrayPropIter{ + a: a, + }).next +} + +func (a *sparseArrayObject) ownKeys(all bool, accum []Value) []Value { + if all { + for _, item := range a.items { + accum = append(accum, asciiString(strconv.FormatUint(uint64(item.idx), 10))) + } + } else { + for _, item := range a.items { + if prop, ok := item.value.(*valueProperty); ok && !prop.enumerable { + continue + } + accum = append(accum, asciiString(strconv.FormatUint(uint64(item.idx), 10))) + } + } + + return a.baseObject.ownKeys(all, accum) +} + +func (a *sparseArrayObject) setValues(values []Value, objCount int) { + a.items = make([]sparseArrayItem, 0, objCount) + for i, val := range values { + if val != nil { + a.items = append(a.items, sparseArrayItem{ + idx: uint32(i), + value: val, + }) + } + } +} + +func (a *sparseArrayObject) hasOwnPropertyStr(name unistring.String) bool { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { + i := a.findIdx(idx) + return i < len(a.items) && a.items[i].idx == idx + } else { + return a.baseObject.hasOwnPropertyStr(name) + } +} + +func (a *sparseArrayObject) hasOwnPropertyIdx(idx valueInt) bool { + if idx := toIdx(idx); idx != math.MaxUint32 { + i := a.findIdx(idx) + return i < len(a.items) && a.items[i].idx == idx + } + + return a.baseObject.hasOwnPropertyStr(idx.string()) +} + +func (a *sparseArrayObject) expand(idx uint32) bool { + if l := len(a.items); l >= 1024 { + if ii := a.items[l-1].idx; ii > idx { + idx = ii + } + if (bits.UintSize == 64 || idx < math.MaxInt32) && int(idx)>>3 < l { + //log.Println("Switching sparse->standard") + ar := &arrayObject{ + baseObject: a.baseObject, + length: a.length, + propValueCount: a.propValueCount, + } + ar.setValuesFromSparse(a.items, int(idx)) + ar.val.self = ar + ar.lengthProp.writable = a.lengthProp.writable + a._put("length", &ar.lengthProp) + return false + } + } + return true +} + +func (a *sparseArrayObject) _defineIdxProperty(idx uint32, desc PropertyDescriptor, throw bool) bool { + var existing Value + i := a.findIdx(idx) + if i < len(a.items) && a.items[i].idx == idx { + existing = a.items[i].value + } + prop, ok := a.baseObject._defineOwnProperty(unistring.String(strconv.FormatUint(uint64(idx), 10)), existing, desc, throw) + if ok { + if idx >= a.length { + if !a.setLengthInt(int64(idx)+1, throw) { + return false + } + } + if i >= len(a.items) || a.items[i].idx != idx { + if a.expand(idx) { + a.items = append(a.items, sparseArrayItem{}) + copy(a.items[i+1:], a.items[i:]) + a.items[i] = sparseArrayItem{ + idx: idx, + value: prop, + } + if idx >= a.length { + a.length = idx + 1 + } + } else { + a.val.self.(*arrayObject).values[idx] = prop + } + } else { + a.items[i].value = prop + } + if _, ok := prop.(*valueProperty); ok { + a.propValueCount++ + } + } + return ok +} + +func (a *sparseArrayObject) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { + return a._defineIdxProperty(idx, descr, throw) + } + if name == "length" { + return a.val.runtime.defineArrayLength(&a.lengthProp, descr, a.setLength, throw) + } + return a.baseObject.defineOwnPropertyStr(name, descr, throw) +} + +func (a *sparseArrayObject) defineOwnPropertyIdx(idx valueInt, descr PropertyDescriptor, throw bool) bool { + if idx := toIdx(idx); idx != math.MaxUint32 { + return a._defineIdxProperty(idx, descr, throw) + } + return a.baseObject.defineOwnPropertyStr(idx.string(), descr, throw) +} + +func (a *sparseArrayObject) _deleteIdxProp(idx uint32, throw bool) bool { + i := a.findIdx(idx) + if i < len(a.items) && a.items[i].idx == idx { + if p, ok := a.items[i].value.(*valueProperty); ok { + if !p.configurable { + a.val.runtime.typeErrorResult(throw, "Cannot delete property '%d' of %s", idx, a.val.toString()) + return false + } + a.propValueCount-- + } + copy(a.items[i:], a.items[i+1:]) + a.items[len(a.items)-1].value = nil + a.items = a.items[:len(a.items)-1] + } + return true +} + +func (a *sparseArrayObject) deleteStr(name unistring.String, throw bool) bool { + if idx := strToArrayIdx(name); idx != math.MaxUint32 { + return a._deleteIdxProp(idx, throw) + } + return a.baseObject.deleteStr(name, throw) +} + +func (a *sparseArrayObject) deleteIdx(idx valueInt, throw bool) bool { + if idx := toIdx(idx); idx != math.MaxUint32 { + return a._deleteIdxProp(idx, throw) + } + return a.baseObject.deleteStr(idx.string(), throw) +} + +func (a *sparseArrayObject) sortLen() int64 { + if len(a.items) > 0 { + return int64(a.items[len(a.items)-1].idx) + 1 + } + + return 0 +} + +func (a *sparseArrayObject) export(ctx *objectExportCtx) interface{} { + if v, exists := ctx.get(a); exists { + return v + } + arr := make([]interface{}, a.length) + ctx.put(a, arr) + var prevIdx uint32 + for _, item := range a.items { + idx := item.idx + for i := prevIdx; i < idx; i++ { + if a.prototype != nil { + if v := a.prototype.self.getIdx(valueInt(i), nil); v != nil { + arr[i] = exportValue(v, ctx) + } + } + } + v := item.value + if v != nil { + if prop, ok := v.(*valueProperty); ok { + v = prop.get(a.val) + } + arr[idx] = exportValue(v, ctx) + } + prevIdx = idx + 1 + } + for i := prevIdx; i < a.length; i++ { + if a.prototype != nil { + if v := a.prototype.self.getIdx(valueInt(i), nil); v != nil { + arr[i] = exportValue(v, ctx) + } + } + } + return arr +} + +func (a *sparseArrayObject) exportType() reflect.Type { + return reflectTypeArray +} diff --git a/vendor/github.com/dop251/goja/ast/README.markdown b/vendor/github.com/dop251/goja/ast/README.markdown new file mode 100644 index 0000000000..aba088e261 --- /dev/null +++ b/vendor/github.com/dop251/goja/ast/README.markdown @@ -0,0 +1,1068 @@ +# ast +-- + import "github.com/dop251/goja/ast" + +Package ast declares types representing a JavaScript AST. + + +### Warning + +The parser and AST interfaces are still works-in-progress (particularly where +node types are concerned) and may change in the future. + +## Usage + +#### type ArrayLiteral + +```go +type ArrayLiteral struct { + LeftBracket file.Idx + RightBracket file.Idx + Value []Expression +} +``` + + +#### func (*ArrayLiteral) Idx0 + +```go +func (self *ArrayLiteral) Idx0() file.Idx +``` + +#### func (*ArrayLiteral) Idx1 + +```go +func (self *ArrayLiteral) Idx1() file.Idx +``` + +#### type AssignExpression + +```go +type AssignExpression struct { + Operator token.Token + Left Expression + Right Expression +} +``` + + +#### func (*AssignExpression) Idx0 + +```go +func (self *AssignExpression) Idx0() file.Idx +``` + +#### func (*AssignExpression) Idx1 + +```go +func (self *AssignExpression) Idx1() file.Idx +``` + +#### type BadExpression + +```go +type BadExpression struct { + From file.Idx + To file.Idx +} +``` + + +#### func (*BadExpression) Idx0 + +```go +func (self *BadExpression) Idx0() file.Idx +``` + +#### func (*BadExpression) Idx1 + +```go +func (self *BadExpression) Idx1() file.Idx +``` + +#### type BadStatement + +```go +type BadStatement struct { + From file.Idx + To file.Idx +} +``` + + +#### func (*BadStatement) Idx0 + +```go +func (self *BadStatement) Idx0() file.Idx +``` + +#### func (*BadStatement) Idx1 + +```go +func (self *BadStatement) Idx1() file.Idx +``` + +#### type BinaryExpression + +```go +type BinaryExpression struct { + Operator token.Token + Left Expression + Right Expression + Comparison bool +} +``` + + +#### func (*BinaryExpression) Idx0 + +```go +func (self *BinaryExpression) Idx0() file.Idx +``` + +#### func (*BinaryExpression) Idx1 + +```go +func (self *BinaryExpression) Idx1() file.Idx +``` + +#### type BlockStatement + +```go +type BlockStatement struct { + LeftBrace file.Idx + List []Statement + RightBrace file.Idx +} +``` + + +#### func (*BlockStatement) Idx0 + +```go +func (self *BlockStatement) Idx0() file.Idx +``` + +#### func (*BlockStatement) Idx1 + +```go +func (self *BlockStatement) Idx1() file.Idx +``` + +#### type BooleanLiteral + +```go +type BooleanLiteral struct { + Idx file.Idx + Literal string + Value bool +} +``` + + +#### func (*BooleanLiteral) Idx0 + +```go +func (self *BooleanLiteral) Idx0() file.Idx +``` + +#### func (*BooleanLiteral) Idx1 + +```go +func (self *BooleanLiteral) Idx1() file.Idx +``` + +#### type BracketExpression + +```go +type BracketExpression struct { + Left Expression + Member Expression + LeftBracket file.Idx + RightBracket file.Idx +} +``` + + +#### func (*BracketExpression) Idx0 + +```go +func (self *BracketExpression) Idx0() file.Idx +``` + +#### func (*BracketExpression) Idx1 + +```go +func (self *BracketExpression) Idx1() file.Idx +``` + +#### type BranchStatement + +```go +type BranchStatement struct { + Idx file.Idx + Token token.Token + Label *Identifier +} +``` + + +#### func (*BranchStatement) Idx0 + +```go +func (self *BranchStatement) Idx0() file.Idx +``` + +#### func (*BranchStatement) Idx1 + +```go +func (self *BranchStatement) Idx1() file.Idx +``` + +#### type CallExpression + +```go +type CallExpression struct { + Callee Expression + LeftParenthesis file.Idx + ArgumentList []Expression + RightParenthesis file.Idx +} +``` + + +#### func (*CallExpression) Idx0 + +```go +func (self *CallExpression) Idx0() file.Idx +``` + +#### func (*CallExpression) Idx1 + +```go +func (self *CallExpression) Idx1() file.Idx +``` + +#### type CaseStatement + +```go +type CaseStatement struct { + Case file.Idx + Test Expression + Consequent []Statement +} +``` + + +#### func (*CaseStatement) Idx0 + +```go +func (self *CaseStatement) Idx0() file.Idx +``` + +#### func (*CaseStatement) Idx1 + +```go +func (self *CaseStatement) Idx1() file.Idx +``` + +#### type CatchStatement + +```go +type CatchStatement struct { + Catch file.Idx + Parameter *Identifier + Body Statement +} +``` + + +#### func (*CatchStatement) Idx0 + +```go +func (self *CatchStatement) Idx0() file.Idx +``` + +#### func (*CatchStatement) Idx1 + +```go +func (self *CatchStatement) Idx1() file.Idx +``` + +#### type ConditionalExpression + +```go +type ConditionalExpression struct { + Test Expression + Consequent Expression + Alternate Expression +} +``` + + +#### func (*ConditionalExpression) Idx0 + +```go +func (self *ConditionalExpression) Idx0() file.Idx +``` + +#### func (*ConditionalExpression) Idx1 + +```go +func (self *ConditionalExpression) Idx1() file.Idx +``` + +#### type DebuggerStatement + +```go +type DebuggerStatement struct { + Debugger file.Idx +} +``` + + +#### func (*DebuggerStatement) Idx0 + +```go +func (self *DebuggerStatement) Idx0() file.Idx +``` + +#### func (*DebuggerStatement) Idx1 + +```go +func (self *DebuggerStatement) Idx1() file.Idx +``` + +#### type Declaration + +```go +type Declaration interface { + // contains filtered or unexported methods +} +``` + +All declaration nodes implement the Declaration interface. + +#### type DoWhileStatement + +```go +type DoWhileStatement struct { + Do file.Idx + Test Expression + Body Statement +} +``` + + +#### func (*DoWhileStatement) Idx0 + +```go +func (self *DoWhileStatement) Idx0() file.Idx +``` + +#### func (*DoWhileStatement) Idx1 + +```go +func (self *DoWhileStatement) Idx1() file.Idx +``` + +#### type DotExpression + +```go +type DotExpression struct { + Left Expression + Identifier Identifier +} +``` + + +#### func (*DotExpression) Idx0 + +```go +func (self *DotExpression) Idx0() file.Idx +``` + +#### func (*DotExpression) Idx1 + +```go +func (self *DotExpression) Idx1() file.Idx +``` + +#### type EmptyStatement + +```go +type EmptyStatement struct { + Semicolon file.Idx +} +``` + + +#### func (*EmptyStatement) Idx0 + +```go +func (self *EmptyStatement) Idx0() file.Idx +``` + +#### func (*EmptyStatement) Idx1 + +```go +func (self *EmptyStatement) Idx1() file.Idx +``` + +#### type Expression + +```go +type Expression interface { + Node + // contains filtered or unexported methods +} +``` + +All expression nodes implement the Expression interface. + +#### type ExpressionStatement + +```go +type ExpressionStatement struct { + Expression Expression +} +``` + + +#### func (*ExpressionStatement) Idx0 + +```go +func (self *ExpressionStatement) Idx0() file.Idx +``` + +#### func (*ExpressionStatement) Idx1 + +```go +func (self *ExpressionStatement) Idx1() file.Idx +``` + +#### type ForInStatement + +```go +type ForInStatement struct { + For file.Idx + Into Expression + Source Expression + Body Statement +} +``` + + +#### func (*ForInStatement) Idx0 + +```go +func (self *ForInStatement) Idx0() file.Idx +``` + +#### func (*ForInStatement) Idx1 + +```go +func (self *ForInStatement) Idx1() file.Idx +``` + +#### type ForStatement + +```go +type ForStatement struct { + For file.Idx + Initializer Expression + Update Expression + Test Expression + Body Statement +} +``` + + +#### func (*ForStatement) Idx0 + +```go +func (self *ForStatement) Idx0() file.Idx +``` + +#### func (*ForStatement) Idx1 + +```go +func (self *ForStatement) Idx1() file.Idx +``` + +#### type FunctionDeclaration + +```go +type FunctionDeclaration struct { + Function *FunctionLiteral +} +``` + + +#### type FunctionLiteral + +```go +type FunctionLiteral struct { + Function file.Idx + Name *Identifier + ParameterList *ParameterList + Body Statement + Source string + + DeclarationList []Declaration +} +``` + + +#### func (*FunctionLiteral) Idx0 + +```go +func (self *FunctionLiteral) Idx0() file.Idx +``` + +#### func (*FunctionLiteral) Idx1 + +```go +func (self *FunctionLiteral) Idx1() file.Idx +``` + +#### type Identifier + +```go +type Identifier struct { + Name string + Idx file.Idx +} +``` + + +#### func (*Identifier) Idx0 + +```go +func (self *Identifier) Idx0() file.Idx +``` + +#### func (*Identifier) Idx1 + +```go +func (self *Identifier) Idx1() file.Idx +``` + +#### type IfStatement + +```go +type IfStatement struct { + If file.Idx + Test Expression + Consequent Statement + Alternate Statement +} +``` + + +#### func (*IfStatement) Idx0 + +```go +func (self *IfStatement) Idx0() file.Idx +``` + +#### func (*IfStatement) Idx1 + +```go +func (self *IfStatement) Idx1() file.Idx +``` + +#### type LabelledStatement + +```go +type LabelledStatement struct { + Label *Identifier + Colon file.Idx + Statement Statement +} +``` + + +#### func (*LabelledStatement) Idx0 + +```go +func (self *LabelledStatement) Idx0() file.Idx +``` + +#### func (*LabelledStatement) Idx1 + +```go +func (self *LabelledStatement) Idx1() file.Idx +``` + +#### type NewExpression + +```go +type NewExpression struct { + New file.Idx + Callee Expression + LeftParenthesis file.Idx + ArgumentList []Expression + RightParenthesis file.Idx +} +``` + + +#### func (*NewExpression) Idx0 + +```go +func (self *NewExpression) Idx0() file.Idx +``` + +#### func (*NewExpression) Idx1 + +```go +func (self *NewExpression) Idx1() file.Idx +``` + +#### type Node + +```go +type Node interface { + Idx0() file.Idx // The index of the first character belonging to the node + Idx1() file.Idx // The index of the first character immediately after the node +} +``` + +All nodes implement the Node interface. + +#### type NullLiteral + +```go +type NullLiteral struct { + Idx file.Idx + Literal string +} +``` + + +#### func (*NullLiteral) Idx0 + +```go +func (self *NullLiteral) Idx0() file.Idx +``` + +#### func (*NullLiteral) Idx1 + +```go +func (self *NullLiteral) Idx1() file.Idx +``` + +#### type NumberLiteral + +```go +type NumberLiteral struct { + Idx file.Idx + Literal string + Value interface{} +} +``` + + +#### func (*NumberLiteral) Idx0 + +```go +func (self *NumberLiteral) Idx0() file.Idx +``` + +#### func (*NumberLiteral) Idx1 + +```go +func (self *NumberLiteral) Idx1() file.Idx +``` + +#### type ObjectLiteral + +```go +type ObjectLiteral struct { + LeftBrace file.Idx + RightBrace file.Idx + Value []Property +} +``` + + +#### func (*ObjectLiteral) Idx0 + +```go +func (self *ObjectLiteral) Idx0() file.Idx +``` + +#### func (*ObjectLiteral) Idx1 + +```go +func (self *ObjectLiteral) Idx1() file.Idx +``` + +#### type ParameterList + +```go +type ParameterList struct { + Opening file.Idx + List []*Identifier + Closing file.Idx +} +``` + + +#### type Program + +```go +type Program struct { + Body []Statement + + DeclarationList []Declaration + + File *file.File +} +``` + + +#### func (*Program) Idx0 + +```go +func (self *Program) Idx0() file.Idx +``` + +#### func (*Program) Idx1 + +```go +func (self *Program) Idx1() file.Idx +``` + +#### type Property + +```go +type Property struct { + Key string + Kind string + Value Expression +} +``` + + +#### type RegExpLiteral + +```go +type RegExpLiteral struct { + Idx file.Idx + Literal string + Pattern string + Flags string + Value string +} +``` + + +#### func (*RegExpLiteral) Idx0 + +```go +func (self *RegExpLiteral) Idx0() file.Idx +``` + +#### func (*RegExpLiteral) Idx1 + +```go +func (self *RegExpLiteral) Idx1() file.Idx +``` + +#### type ReturnStatement + +```go +type ReturnStatement struct { + Return file.Idx + Argument Expression +} +``` + + +#### func (*ReturnStatement) Idx0 + +```go +func (self *ReturnStatement) Idx0() file.Idx +``` + +#### func (*ReturnStatement) Idx1 + +```go +func (self *ReturnStatement) Idx1() file.Idx +``` + +#### type SequenceExpression + +```go +type SequenceExpression struct { + Sequence []Expression +} +``` + + +#### func (*SequenceExpression) Idx0 + +```go +func (self *SequenceExpression) Idx0() file.Idx +``` + +#### func (*SequenceExpression) Idx1 + +```go +func (self *SequenceExpression) Idx1() file.Idx +``` + +#### type Statement + +```go +type Statement interface { + Node + // contains filtered or unexported methods +} +``` + +All statement nodes implement the Statement interface. + +#### type StringLiteral + +```go +type StringLiteral struct { + Idx file.Idx + Literal string + Value string +} +``` + + +#### func (*StringLiteral) Idx0 + +```go +func (self *StringLiteral) Idx0() file.Idx +``` + +#### func (*StringLiteral) Idx1 + +```go +func (self *StringLiteral) Idx1() file.Idx +``` + +#### type SwitchStatement + +```go +type SwitchStatement struct { + Switch file.Idx + Discriminant Expression + Default int + Body []*CaseStatement +} +``` + + +#### func (*SwitchStatement) Idx0 + +```go +func (self *SwitchStatement) Idx0() file.Idx +``` + +#### func (*SwitchStatement) Idx1 + +```go +func (self *SwitchStatement) Idx1() file.Idx +``` + +#### type ThisExpression + +```go +type ThisExpression struct { + Idx file.Idx +} +``` + + +#### func (*ThisExpression) Idx0 + +```go +func (self *ThisExpression) Idx0() file.Idx +``` + +#### func (*ThisExpression) Idx1 + +```go +func (self *ThisExpression) Idx1() file.Idx +``` + +#### type ThrowStatement + +```go +type ThrowStatement struct { + Throw file.Idx + Argument Expression +} +``` + + +#### func (*ThrowStatement) Idx0 + +```go +func (self *ThrowStatement) Idx0() file.Idx +``` + +#### func (*ThrowStatement) Idx1 + +```go +func (self *ThrowStatement) Idx1() file.Idx +``` + +#### type TryStatement + +```go +type TryStatement struct { + Try file.Idx + Body Statement + Catch *CatchStatement + Finally Statement +} +``` + + +#### func (*TryStatement) Idx0 + +```go +func (self *TryStatement) Idx0() file.Idx +``` + +#### func (*TryStatement) Idx1 + +```go +func (self *TryStatement) Idx1() file.Idx +``` + +#### type UnaryExpression + +```go +type UnaryExpression struct { + Operator token.Token + Idx file.Idx // If a prefix operation + Operand Expression + Postfix bool +} +``` + + +#### func (*UnaryExpression) Idx0 + +```go +func (self *UnaryExpression) Idx0() file.Idx +``` + +#### func (*UnaryExpression) Idx1 + +```go +func (self *UnaryExpression) Idx1() file.Idx +``` + +#### type VariableDeclaration + +```go +type VariableDeclaration struct { + Var file.Idx + List []*VariableExpression +} +``` + + +#### type VariableExpression + +```go +type VariableExpression struct { + Name string + Idx file.Idx + Initializer Expression +} +``` + + +#### func (*VariableExpression) Idx0 + +```go +func (self *VariableExpression) Idx0() file.Idx +``` + +#### func (*VariableExpression) Idx1 + +```go +func (self *VariableExpression) Idx1() file.Idx +``` + +#### type VariableStatement + +```go +type VariableStatement struct { + Var file.Idx + List []Expression +} +``` + + +#### func (*VariableStatement) Idx0 + +```go +func (self *VariableStatement) Idx0() file.Idx +``` + +#### func (*VariableStatement) Idx1 + +```go +func (self *VariableStatement) Idx1() file.Idx +``` + +#### type WhileStatement + +```go +type WhileStatement struct { + While file.Idx + Test Expression + Body Statement +} +``` + + +#### func (*WhileStatement) Idx0 + +```go +func (self *WhileStatement) Idx0() file.Idx +``` + +#### func (*WhileStatement) Idx1 + +```go +func (self *WhileStatement) Idx1() file.Idx +``` + +#### type WithStatement + +```go +type WithStatement struct { + With file.Idx + Object Expression + Body Statement +} +``` + + +#### func (*WithStatement) Idx0 + +```go +func (self *WithStatement) Idx0() file.Idx +``` + +#### func (*WithStatement) Idx1 + +```go +func (self *WithStatement) Idx1() file.Idx +``` + +-- +**godocdown** http://github.com/robertkrimen/godocdown diff --git a/vendor/github.com/dop251/goja/ast/node.go b/vendor/github.com/dop251/goja/ast/node.go new file mode 100644 index 0000000000..9ae2836cc4 --- /dev/null +++ b/vendor/github.com/dop251/goja/ast/node.go @@ -0,0 +1,658 @@ +/* +Package ast declares types representing a JavaScript AST. + +Warning + +The parser and AST interfaces are still works-in-progress (particularly where +node types are concerned) and may change in the future. + +*/ +package ast + +import ( + "github.com/dop251/goja/file" + "github.com/dop251/goja/token" + "github.com/dop251/goja/unistring" +) + +type PropertyKind string + +const ( + PropertyKindValue PropertyKind = "value" + PropertyKindGet PropertyKind = "get" + PropertyKindSet PropertyKind = "set" + PropertyKindMethod PropertyKind = "method" +) + +// All nodes implement the Node interface. +type Node interface { + Idx0() file.Idx // The index of the first character belonging to the node + Idx1() file.Idx // The index of the first character immediately after the node +} + +// ========== // +// Expression // +// ========== // + +type ( + // All expression nodes implement the Expression interface. + Expression interface { + Node + _expressionNode() + } + + BindingTarget interface { + Expression + _bindingTarget() + } + + Binding struct { + Target BindingTarget + Initializer Expression + } + + Pattern interface { + BindingTarget + _pattern() + } + + ArrayLiteral struct { + LeftBracket file.Idx + RightBracket file.Idx + Value []Expression + } + + ArrayPattern struct { + LeftBracket file.Idx + RightBracket file.Idx + Elements []Expression + Rest Expression + } + + AssignExpression struct { + Operator token.Token + Left Expression + Right Expression + } + + BadExpression struct { + From file.Idx + To file.Idx + } + + BinaryExpression struct { + Operator token.Token + Left Expression + Right Expression + Comparison bool + } + + BooleanLiteral struct { + Idx file.Idx + Literal string + Value bool + } + + BracketExpression struct { + Left Expression + Member Expression + LeftBracket file.Idx + RightBracket file.Idx + } + + CallExpression struct { + Callee Expression + LeftParenthesis file.Idx + ArgumentList []Expression + RightParenthesis file.Idx + } + + ConditionalExpression struct { + Test Expression + Consequent Expression + Alternate Expression + } + + DotExpression struct { + Left Expression + Identifier Identifier + } + + FunctionLiteral struct { + Function file.Idx + Name *Identifier + ParameterList *ParameterList + Body *BlockStatement + Source string + + DeclarationList []*VariableDeclaration + } + + Identifier struct { + Name unistring.String + Idx file.Idx + } + + NewExpression struct { + New file.Idx + Callee Expression + LeftParenthesis file.Idx + ArgumentList []Expression + RightParenthesis file.Idx + } + + NullLiteral struct { + Idx file.Idx + Literal string + } + + NumberLiteral struct { + Idx file.Idx + Literal string + Value interface{} + } + + ObjectLiteral struct { + LeftBrace file.Idx + RightBrace file.Idx + Value []Property + } + + ObjectPattern struct { + LeftBrace file.Idx + RightBrace file.Idx + Properties []Property + Rest Expression + } + + ParameterList struct { + Opening file.Idx + List []*Binding + Rest Expression + Closing file.Idx + } + + Property interface { + Expression + _property() + } + + PropertyShort struct { + Name Identifier + Initializer Expression + } + + PropertyKeyed struct { + Key Expression + Kind PropertyKind + Value Expression + } + + SpreadElement struct { + Expression + } + + RegExpLiteral struct { + Idx file.Idx + Literal string + Pattern string + Flags string + } + + SequenceExpression struct { + Sequence []Expression + } + + StringLiteral struct { + Idx file.Idx + Literal string + Value unistring.String + } + + ThisExpression struct { + Idx file.Idx + } + + UnaryExpression struct { + Operator token.Token + Idx file.Idx // If a prefix operation + Operand Expression + Postfix bool + } + + MetaProperty struct { + Meta, Property *Identifier + Idx file.Idx + } +) + +// _expressionNode + +func (*ArrayLiteral) _expressionNode() {} +func (*AssignExpression) _expressionNode() {} +func (*BadExpression) _expressionNode() {} +func (*BinaryExpression) _expressionNode() {} +func (*BooleanLiteral) _expressionNode() {} +func (*BracketExpression) _expressionNode() {} +func (*CallExpression) _expressionNode() {} +func (*ConditionalExpression) _expressionNode() {} +func (*DotExpression) _expressionNode() {} +func (*FunctionLiteral) _expressionNode() {} +func (*Identifier) _expressionNode() {} +func (*NewExpression) _expressionNode() {} +func (*NullLiteral) _expressionNode() {} +func (*NumberLiteral) _expressionNode() {} +func (*ObjectLiteral) _expressionNode() {} +func (*RegExpLiteral) _expressionNode() {} +func (*SequenceExpression) _expressionNode() {} +func (*StringLiteral) _expressionNode() {} +func (*ThisExpression) _expressionNode() {} +func (*UnaryExpression) _expressionNode() {} +func (*MetaProperty) _expressionNode() {} +func (*ObjectPattern) _expressionNode() {} +func (*ArrayPattern) _expressionNode() {} +func (*Binding) _expressionNode() {} + +func (*PropertyShort) _expressionNode() {} +func (*PropertyKeyed) _expressionNode() {} + +// ========= // +// Statement // +// ========= // + +type ( + // All statement nodes implement the Statement interface. + Statement interface { + Node + _statementNode() + } + + BadStatement struct { + From file.Idx + To file.Idx + } + + BlockStatement struct { + LeftBrace file.Idx + List []Statement + RightBrace file.Idx + } + + BranchStatement struct { + Idx file.Idx + Token token.Token + Label *Identifier + } + + CaseStatement struct { + Case file.Idx + Test Expression + Consequent []Statement + } + + CatchStatement struct { + Catch file.Idx + Parameter BindingTarget + Body *BlockStatement + } + + DebuggerStatement struct { + Debugger file.Idx + } + + DoWhileStatement struct { + Do file.Idx + Test Expression + Body Statement + } + + EmptyStatement struct { + Semicolon file.Idx + } + + ExpressionStatement struct { + Expression Expression + } + + ForInStatement struct { + For file.Idx + Into ForInto + Source Expression + Body Statement + } + + ForOfStatement struct { + For file.Idx + Into ForInto + Source Expression + Body Statement + } + + ForStatement struct { + For file.Idx + Initializer ForLoopInitializer + Update Expression + Test Expression + Body Statement + } + + IfStatement struct { + If file.Idx + Test Expression + Consequent Statement + Alternate Statement + } + + LabelledStatement struct { + Label *Identifier + Colon file.Idx + Statement Statement + } + + ReturnStatement struct { + Return file.Idx + Argument Expression + } + + SwitchStatement struct { + Switch file.Idx + Discriminant Expression + Default int + Body []*CaseStatement + } + + ThrowStatement struct { + Throw file.Idx + Argument Expression + } + + TryStatement struct { + Try file.Idx + Body *BlockStatement + Catch *CatchStatement + Finally *BlockStatement + } + + VariableStatement struct { + Var file.Idx + List []*Binding + } + + LexicalDeclaration struct { + Idx file.Idx + Token token.Token + List []*Binding + } + + WhileStatement struct { + While file.Idx + Test Expression + Body Statement + } + + WithStatement struct { + With file.Idx + Object Expression + Body Statement + } + + FunctionDeclaration struct { + Function *FunctionLiteral + } +) + +// _statementNode + +func (*BadStatement) _statementNode() {} +func (*BlockStatement) _statementNode() {} +func (*BranchStatement) _statementNode() {} +func (*CaseStatement) _statementNode() {} +func (*CatchStatement) _statementNode() {} +func (*DebuggerStatement) _statementNode() {} +func (*DoWhileStatement) _statementNode() {} +func (*EmptyStatement) _statementNode() {} +func (*ExpressionStatement) _statementNode() {} +func (*ForInStatement) _statementNode() {} +func (*ForOfStatement) _statementNode() {} +func (*ForStatement) _statementNode() {} +func (*IfStatement) _statementNode() {} +func (*LabelledStatement) _statementNode() {} +func (*ReturnStatement) _statementNode() {} +func (*SwitchStatement) _statementNode() {} +func (*ThrowStatement) _statementNode() {} +func (*TryStatement) _statementNode() {} +func (*VariableStatement) _statementNode() {} +func (*WhileStatement) _statementNode() {} +func (*WithStatement) _statementNode() {} +func (*LexicalDeclaration) _statementNode() {} +func (*FunctionDeclaration) _statementNode() {} + +// =========== // +// Declaration // +// =========== // + +type ( + VariableDeclaration struct { + Var file.Idx + List []*Binding + } +) + +type ( + ForLoopInitializer interface { + _forLoopInitializer() + } + + ForLoopInitializerExpression struct { + Expression Expression + } + + ForLoopInitializerVarDeclList struct { + Var file.Idx + List []*Binding + } + + ForLoopInitializerLexicalDecl struct { + LexicalDeclaration LexicalDeclaration + } + + ForInto interface { + _forInto() + } + + ForIntoVar struct { + Binding *Binding + } + + ForDeclaration struct { + Idx file.Idx + IsConst bool + Target BindingTarget + } + + ForIntoExpression struct { + Expression Expression + } +) + +func (*ForLoopInitializerExpression) _forLoopInitializer() {} +func (*ForLoopInitializerVarDeclList) _forLoopInitializer() {} +func (*ForLoopInitializerLexicalDecl) _forLoopInitializer() {} + +func (*ForIntoVar) _forInto() {} +func (*ForDeclaration) _forInto() {} +func (*ForIntoExpression) _forInto() {} + +func (*ArrayPattern) _pattern() {} +func (*ArrayPattern) _bindingTarget() {} + +func (*ObjectPattern) _pattern() {} +func (*ObjectPattern) _bindingTarget() {} + +func (*BadExpression) _bindingTarget() {} + +func (*PropertyShort) _property() {} +func (*PropertyKeyed) _property() {} +func (*SpreadElement) _property() {} + +func (*Identifier) _bindingTarget() {} + +// ==== // +// Node // +// ==== // + +type Program struct { + Body []Statement + + DeclarationList []*VariableDeclaration + + File *file.File +} + +// ==== // +// Idx0 // +// ==== // + +func (self *ArrayLiteral) Idx0() file.Idx { return self.LeftBracket } +func (self *ArrayPattern) Idx0() file.Idx { return self.LeftBracket } +func (self *ObjectPattern) Idx0() file.Idx { return self.LeftBrace } +func (self *AssignExpression) Idx0() file.Idx { return self.Left.Idx0() } +func (self *BadExpression) Idx0() file.Idx { return self.From } +func (self *BinaryExpression) Idx0() file.Idx { return self.Left.Idx0() } +func (self *BooleanLiteral) Idx0() file.Idx { return self.Idx } +func (self *BracketExpression) Idx0() file.Idx { return self.Left.Idx0() } +func (self *CallExpression) Idx0() file.Idx { return self.Callee.Idx0() } +func (self *ConditionalExpression) Idx0() file.Idx { return self.Test.Idx0() } +func (self *DotExpression) Idx0() file.Idx { return self.Left.Idx0() } +func (self *FunctionLiteral) Idx0() file.Idx { return self.Function } +func (self *Identifier) Idx0() file.Idx { return self.Idx } +func (self *NewExpression) Idx0() file.Idx { return self.New } +func (self *NullLiteral) Idx0() file.Idx { return self.Idx } +func (self *NumberLiteral) Idx0() file.Idx { return self.Idx } +func (self *ObjectLiteral) Idx0() file.Idx { return self.LeftBrace } +func (self *RegExpLiteral) Idx0() file.Idx { return self.Idx } +func (self *SequenceExpression) Idx0() file.Idx { return self.Sequence[0].Idx0() } +func (self *StringLiteral) Idx0() file.Idx { return self.Idx } +func (self *ThisExpression) Idx0() file.Idx { return self.Idx } +func (self *UnaryExpression) Idx0() file.Idx { return self.Idx } +func (self *MetaProperty) Idx0() file.Idx { return self.Idx } + +func (self *BadStatement) Idx0() file.Idx { return self.From } +func (self *BlockStatement) Idx0() file.Idx { return self.LeftBrace } +func (self *BranchStatement) Idx0() file.Idx { return self.Idx } +func (self *CaseStatement) Idx0() file.Idx { return self.Case } +func (self *CatchStatement) Idx0() file.Idx { return self.Catch } +func (self *DebuggerStatement) Idx0() file.Idx { return self.Debugger } +func (self *DoWhileStatement) Idx0() file.Idx { return self.Do } +func (self *EmptyStatement) Idx0() file.Idx { return self.Semicolon } +func (self *ExpressionStatement) Idx0() file.Idx { return self.Expression.Idx0() } +func (self *ForInStatement) Idx0() file.Idx { return self.For } +func (self *ForOfStatement) Idx0() file.Idx { return self.For } +func (self *ForStatement) Idx0() file.Idx { return self.For } +func (self *IfStatement) Idx0() file.Idx { return self.If } +func (self *LabelledStatement) Idx0() file.Idx { return self.Label.Idx0() } +func (self *Program) Idx0() file.Idx { return self.Body[0].Idx0() } +func (self *ReturnStatement) Idx0() file.Idx { return self.Return } +func (self *SwitchStatement) Idx0() file.Idx { return self.Switch } +func (self *ThrowStatement) Idx0() file.Idx { return self.Throw } +func (self *TryStatement) Idx0() file.Idx { return self.Try } +func (self *VariableStatement) Idx0() file.Idx { return self.Var } +func (self *WhileStatement) Idx0() file.Idx { return self.While } +func (self *WithStatement) Idx0() file.Idx { return self.With } +func (self *LexicalDeclaration) Idx0() file.Idx { return self.Idx } +func (self *FunctionDeclaration) Idx0() file.Idx { return self.Function.Idx0() } +func (self *Binding) Idx0() file.Idx { return self.Target.Idx0() } + +func (self *ForLoopInitializerVarDeclList) Idx0() file.Idx { return self.List[0].Idx0() } +func (self *PropertyShort) Idx0() file.Idx { return self.Name.Idx } +func (self *PropertyKeyed) Idx0() file.Idx { return self.Key.Idx0() } + +// ==== // +// Idx1 // +// ==== // + +func (self *ArrayLiteral) Idx1() file.Idx { return self.RightBracket + 1 } +func (self *ArrayPattern) Idx1() file.Idx { return self.RightBracket + 1 } +func (self *AssignExpression) Idx1() file.Idx { return self.Right.Idx1() } +func (self *BadExpression) Idx1() file.Idx { return self.To } +func (self *BinaryExpression) Idx1() file.Idx { return self.Right.Idx1() } +func (self *BooleanLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) } +func (self *BracketExpression) Idx1() file.Idx { return self.RightBracket + 1 } +func (self *CallExpression) Idx1() file.Idx { return self.RightParenthesis + 1 } +func (self *ConditionalExpression) Idx1() file.Idx { return self.Test.Idx1() } +func (self *DotExpression) Idx1() file.Idx { return self.Identifier.Idx1() } +func (self *FunctionLiteral) Idx1() file.Idx { return self.Body.Idx1() } +func (self *Identifier) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Name)) } +func (self *NewExpression) Idx1() file.Idx { return self.RightParenthesis + 1 } +func (self *NullLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + 4) } // "null" +func (self *NumberLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) } +func (self *ObjectLiteral) Idx1() file.Idx { return self.RightBrace + 1 } +func (self *ObjectPattern) Idx1() file.Idx { return self.RightBrace + 1 } +func (self *RegExpLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) } +func (self *SequenceExpression) Idx1() file.Idx { return self.Sequence[len(self.Sequence)-1].Idx1() } +func (self *StringLiteral) Idx1() file.Idx { return file.Idx(int(self.Idx) + len(self.Literal)) } +func (self *ThisExpression) Idx1() file.Idx { return self.Idx + 4 } +func (self *UnaryExpression) Idx1() file.Idx { + if self.Postfix { + return self.Operand.Idx1() + 2 // ++ -- + } + return self.Operand.Idx1() +} +func (self *MetaProperty) Idx1() file.Idx { + return self.Property.Idx1() +} + +func (self *BadStatement) Idx1() file.Idx { return self.To } +func (self *BlockStatement) Idx1() file.Idx { return self.RightBrace + 1 } +func (self *BranchStatement) Idx1() file.Idx { return self.Idx } +func (self *CaseStatement) Idx1() file.Idx { return self.Consequent[len(self.Consequent)-1].Idx1() } +func (self *CatchStatement) Idx1() file.Idx { return self.Body.Idx1() } +func (self *DebuggerStatement) Idx1() file.Idx { return self.Debugger + 8 } +func (self *DoWhileStatement) Idx1() file.Idx { return self.Test.Idx1() } +func (self *EmptyStatement) Idx1() file.Idx { return self.Semicolon + 1 } +func (self *ExpressionStatement) Idx1() file.Idx { return self.Expression.Idx1() } +func (self *ForInStatement) Idx1() file.Idx { return self.Body.Idx1() } +func (self *ForOfStatement) Idx1() file.Idx { return self.Body.Idx1() } +func (self *ForStatement) Idx1() file.Idx { return self.Body.Idx1() } +func (self *IfStatement) Idx1() file.Idx { + if self.Alternate != nil { + return self.Alternate.Idx1() + } + return self.Consequent.Idx1() +} +func (self *LabelledStatement) Idx1() file.Idx { return self.Colon + 1 } +func (self *Program) Idx1() file.Idx { return self.Body[len(self.Body)-1].Idx1() } +func (self *ReturnStatement) Idx1() file.Idx { return self.Return + 6 } +func (self *SwitchStatement) Idx1() file.Idx { return self.Body[len(self.Body)-1].Idx1() } +func (self *ThrowStatement) Idx1() file.Idx { return self.Argument.Idx1() } +func (self *TryStatement) Idx1() file.Idx { + if self.Finally != nil { + return self.Finally.Idx1() + } + if self.Catch != nil { + return self.Catch.Idx1() + } + return self.Body.Idx1() +} +func (self *VariableStatement) Idx1() file.Idx { return self.List[len(self.List)-1].Idx1() } +func (self *WhileStatement) Idx1() file.Idx { return self.Body.Idx1() } +func (self *WithStatement) Idx1() file.Idx { return self.Body.Idx1() } +func (self *LexicalDeclaration) Idx1() file.Idx { return self.List[len(self.List)-1].Idx1() } +func (self *FunctionDeclaration) Idx1() file.Idx { return self.Function.Idx1() } +func (self *Binding) Idx1() file.Idx { + if self.Initializer != nil { + return self.Initializer.Idx1() + } + return self.Target.Idx1() +} + +func (self *ForLoopInitializerVarDeclList) Idx1() file.Idx { return self.List[len(self.List)-1].Idx1() } + +func (self *PropertyShort) Idx1() file.Idx { + if self.Initializer != nil { + return self.Initializer.Idx1() + } + return self.Name.Idx1() +} + +func (self *PropertyKeyed) Idx1() file.Idx { return self.Value.Idx1() } diff --git a/vendor/github.com/dop251/goja/builtin_array.go b/vendor/github.com/dop251/goja/builtin_array.go new file mode 100644 index 0000000000..a90aabd403 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_array.go @@ -0,0 +1,1435 @@ +package goja + +import ( + "math" + "sort" +) + +func (r *Runtime) newArray(prototype *Object) (a *arrayObject) { + v := &Object{runtime: r} + + a = &arrayObject{} + a.class = classArray + a.val = v + a.extensible = true + v.self = a + a.prototype = prototype + a.init() + return +} + +func (r *Runtime) newArrayObject() *arrayObject { + return r.newArray(r.global.ArrayPrototype) +} + +func setArrayValues(a *arrayObject, values []Value) *arrayObject { + a.values = values + a.length = uint32(len(values)) + a.objCount = len(values) + return a +} + +func setArrayLength(a *arrayObject, l int64) *arrayObject { + a.setOwnStr("length", intToValue(l), true) + return a +} + +func arraySpeciesCreate(obj *Object, size int64) *Object { + if isArray(obj) { + v := obj.self.getStr("constructor", nil) + if constructObj, ok := v.(*Object); ok { + v = constructObj.self.getSym(SymSpecies, nil) + if v == _null { + v = nil + } + } + + if v != nil && v != _undefined { + constructObj, _ := v.(*Object) + if constructObj != nil { + if constructor := constructObj.self.assertConstructor(); constructor != nil { + return constructor([]Value{intToValue(size)}, constructObj) + } + } + panic(obj.runtime.NewTypeError("Species is not a constructor")) + } + } + return obj.runtime.newArrayLength(size) +} + +func max(a, b int64) int64 { + if a > b { + return a + } + return b +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func relToIdx(rel, l int64) int64 { + if rel >= 0 { + return min(rel, l) + } + return max(l+rel, 0) +} + +func (r *Runtime) newArrayValues(values []Value) *Object { + return setArrayValues(r.newArrayObject(), values).val +} + +func (r *Runtime) newArrayLength(l int64) *Object { + return setArrayLength(r.newArrayObject(), l).val +} + +func (r *Runtime) builtin_newArray(args []Value, proto *Object) *Object { + l := len(args) + if l == 1 { + if al, ok := args[0].(valueInt); ok { + return setArrayLength(r.newArray(proto), int64(al)).val + } else if f, ok := args[0].(valueFloat); ok { + al := int64(f) + if float64(al) == float64(f) { + return r.newArrayLength(al) + } else { + panic(r.newError(r.global.RangeError, "Invalid array length")) + } + } + return setArrayValues(r.newArray(proto), []Value{args[0]}).val + } else { + argsCopy := make([]Value, l) + copy(argsCopy, args) + return setArrayValues(r.newArray(proto), argsCopy).val + } +} + +func (r *Runtime) generic_push(obj *Object, call FunctionCall) Value { + l := toLength(obj.self.getStr("length", nil)) + nl := l + int64(len(call.Arguments)) + if nl >= maxInt { + r.typeErrorResult(true, "Invalid array length") + panic("unreachable") + } + for i, arg := range call.Arguments { + obj.self.setOwnIdx(valueInt(l+int64(i)), arg, true) + } + n := valueInt(nl) + obj.self.setOwnStr("length", n, true) + return n +} + +func (r *Runtime) arrayproto_push(call FunctionCall) Value { + obj := call.This.ToObject(r) + return r.generic_push(obj, call) +} + +func (r *Runtime) arrayproto_pop_generic(obj *Object) Value { + l := toLength(obj.self.getStr("length", nil)) + if l == 0 { + obj.self.setOwnStr("length", intToValue(0), true) + return _undefined + } + idx := valueInt(l - 1) + val := obj.self.getIdx(idx, nil) + obj.self.deleteIdx(idx, true) + obj.self.setOwnStr("length", idx, true) + return val +} + +func (r *Runtime) arrayproto_pop(call FunctionCall) Value { + obj := call.This.ToObject(r) + if a, ok := obj.self.(*arrayObject); ok { + l := a.length + if l > 0 { + var val Value + l-- + if l < uint32(len(a.values)) { + val = a.values[l] + } + if val == nil { + // optimisation bail-out + return r.arrayproto_pop_generic(obj) + } + if _, ok := val.(*valueProperty); ok { + // optimisation bail-out + return r.arrayproto_pop_generic(obj) + } + //a._setLengthInt(l, false) + a.values[l] = nil + a.values = a.values[:l] + a.length = l + return val + } + return _undefined + } else { + return r.arrayproto_pop_generic(obj) + } +} + +func (r *Runtime) arrayproto_join(call FunctionCall) Value { + o := call.This.ToObject(r) + l := int(toLength(o.self.getStr("length", nil))) + var sep valueString + if s := call.Argument(0); s != _undefined { + sep = s.toString() + } else { + sep = asciiString(",") + } + if l == 0 { + return stringEmpty + } + + var buf valueStringBuilder + + element0 := o.self.getIdx(valueInt(0), nil) + if element0 != nil && element0 != _undefined && element0 != _null { + buf.WriteString(element0.toString()) + } + + for i := 1; i < l; i++ { + buf.WriteString(sep) + element := o.self.getIdx(valueInt(int64(i)), nil) + if element != nil && element != _undefined && element != _null { + buf.WriteString(element.toString()) + } + } + + return buf.String() +} + +func (r *Runtime) arrayproto_toString(call FunctionCall) Value { + array := call.This.ToObject(r) + f := array.self.getStr("join", nil) + if fObj, ok := f.(*Object); ok { + if fcall, ok := fObj.self.assertCallable(); ok { + return fcall(FunctionCall{ + This: array, + }) + } + } + return r.objectproto_toString(FunctionCall{ + This: array, + }) +} + +func (r *Runtime) writeItemLocaleString(item Value, buf *valueStringBuilder) { + if item != nil && item != _undefined && item != _null { + if f, ok := r.getVStr(item, "toLocaleString").(*Object); ok { + if c, ok := f.self.assertCallable(); ok { + strVal := c(FunctionCall{ + This: item, + }) + buf.WriteString(strVal.toString()) + return + } + } + r.typeErrorResult(true, "Property 'toLocaleString' of object %s is not a function", item) + } +} + +func (r *Runtime) arrayproto_toLocaleString(call FunctionCall) Value { + array := call.This.ToObject(r) + var buf valueStringBuilder + if a := r.checkStdArrayObj(array); a != nil { + for i, item := range a.values { + if i > 0 { + buf.WriteRune(',') + } + r.writeItemLocaleString(item, &buf) + } + } else { + length := toLength(array.self.getStr("length", nil)) + for i := int64(0); i < length; i++ { + if i > 0 { + buf.WriteRune(',') + } + item := array.self.getIdx(valueInt(i), nil) + r.writeItemLocaleString(item, &buf) + } + } + + return buf.String() +} + +func isConcatSpreadable(obj *Object) bool { + spreadable := obj.self.getSym(SymIsConcatSpreadable, nil) + if spreadable != nil && spreadable != _undefined { + return spreadable.ToBoolean() + } + return isArray(obj) +} + +func (r *Runtime) arrayproto_concat_append(a *Object, item Value) { + aLength := toLength(a.self.getStr("length", nil)) + if obj, ok := item.(*Object); ok && isConcatSpreadable(obj) { + length := toLength(obj.self.getStr("length", nil)) + if aLength+length >= maxInt { + panic(r.NewTypeError("Invalid array length")) + } + for i := int64(0); i < length; i++ { + v := obj.self.getIdx(valueInt(i), nil) + if v != nil { + createDataPropertyOrThrow(a, intToValue(aLength), v) + } + aLength++ + } + } else { + createDataPropertyOrThrow(a, intToValue(aLength), item) + aLength++ + } + a.self.setOwnStr("length", intToValue(aLength), true) +} + +func (r *Runtime) arrayproto_concat(call FunctionCall) Value { + obj := call.This.ToObject(r) + a := arraySpeciesCreate(obj, 0) + r.arrayproto_concat_append(a, call.This.ToObject(r)) + for _, item := range call.Arguments { + r.arrayproto_concat_append(a, item) + } + return a +} + +func (r *Runtime) arrayproto_slice(call FunctionCall) Value { + o := call.This.ToObject(r) + length := toLength(o.self.getStr("length", nil)) + start := relToIdx(call.Argument(0).ToInteger(), length) + var end int64 + if endArg := call.Argument(1); endArg != _undefined { + end = endArg.ToInteger() + } else { + end = length + } + end = relToIdx(end, length) + + count := end - start + if count < 0 { + count = 0 + } + + a := arraySpeciesCreate(o, count) + if src := r.checkStdArrayObj(o); src != nil { + if dst, ok := a.self.(*arrayObject); ok { + values := make([]Value, count) + copy(values, src.values[start:]) + setArrayValues(dst, values) + return a + } + } + + n := int64(0) + for start < end { + p := o.self.getIdx(valueInt(start), nil) + if p != nil { + createDataPropertyOrThrow(a, valueInt(n), p) + } + start++ + n++ + } + return a +} + +func (r *Runtime) arrayproto_sort(call FunctionCall) Value { + o := call.This.ToObject(r) + + var compareFn func(FunctionCall) Value + arg := call.Argument(0) + if arg != _undefined { + if arg, ok := call.Argument(0).(*Object); ok { + compareFn, _ = arg.self.assertCallable() + } + if compareFn == nil { + panic(r.NewTypeError("The comparison function must be either a function or undefined")) + } + } + + ctx := arraySortCtx{ + obj: o.self, + compare: compareFn, + } + + sort.Stable(&ctx) + return o +} + +func (r *Runtime) arrayproto_splice(call FunctionCall) Value { + o := call.This.ToObject(r) + length := toLength(o.self.getStr("length", nil)) + actualStart := relToIdx(call.Argument(0).ToInteger(), length) + var actualDeleteCount int64 + switch len(call.Arguments) { + case 0: + case 1: + actualDeleteCount = length - actualStart + default: + actualDeleteCount = min(max(call.Argument(1).ToInteger(), 0), length-actualStart) + } + a := arraySpeciesCreate(o, actualDeleteCount) + itemCount := max(int64(len(call.Arguments)-2), 0) + newLength := length - actualDeleteCount + itemCount + if src := r.checkStdArrayObj(o); src != nil { + if dst, ok := a.self.(*arrayObject); ok { + values := make([]Value, actualDeleteCount) + copy(values, src.values[actualStart:]) + setArrayValues(dst, values) + } else { + for k := int64(0); k < actualDeleteCount; k++ { + createDataPropertyOrThrow(a, intToValue(k), src.values[k+actualStart]) + } + a.self.setOwnStr("length", intToValue(actualDeleteCount), true) + } + var values []Value + if itemCount < actualDeleteCount { + values = src.values + copy(values[actualStart+itemCount:], values[actualStart+actualDeleteCount:]) + tail := values[newLength:] + for k := range tail { + tail[k] = nil + } + values = values[:newLength] + } else if itemCount > actualDeleteCount { + if int64(cap(src.values)) >= newLength { + values = src.values[:newLength] + copy(values[actualStart+itemCount:], values[actualStart+actualDeleteCount:length]) + } else { + values = make([]Value, newLength) + copy(values, src.values[:actualStart]) + copy(values[actualStart+itemCount:], src.values[actualStart+actualDeleteCount:]) + } + } else { + values = src.values + } + if itemCount > 0 { + copy(values[actualStart:], call.Arguments[2:]) + } + src.values = values + src.objCount = len(values) + } else { + for k := int64(0); k < actualDeleteCount; k++ { + from := valueInt(k + actualStart) + if o.self.hasPropertyIdx(from) { + createDataPropertyOrThrow(a, valueInt(k), o.self.getIdx(from, nil)) + } + } + + if itemCount < actualDeleteCount { + for k := actualStart; k < length-actualDeleteCount; k++ { + from := valueInt(k + actualDeleteCount) + to := valueInt(k + itemCount) + if o.self.hasPropertyIdx(from) { + o.self.setOwnIdx(to, o.self.getIdx(from, nil), true) + } else { + o.self.deleteIdx(to, true) + } + } + + for k := length; k > length-actualDeleteCount+itemCount; k-- { + o.self.deleteIdx(valueInt(k-1), true) + } + } else if itemCount > actualDeleteCount { + for k := length - actualDeleteCount; k > actualStart; k-- { + from := valueInt(k + actualDeleteCount - 1) + to := valueInt(k + itemCount - 1) + if o.self.hasPropertyIdx(from) { + o.self.setOwnIdx(to, o.self.getIdx(from, nil), true) + } else { + o.self.deleteIdx(to, true) + } + } + } + + if itemCount > 0 { + for i, item := range call.Arguments[2:] { + o.self.setOwnIdx(valueInt(actualStart+int64(i)), item, true) + } + } + } + + o.self.setOwnStr("length", intToValue(newLength), true) + + return a +} + +func (r *Runtime) arrayproto_unshift(call FunctionCall) Value { + o := call.This.ToObject(r) + length := toLength(o.self.getStr("length", nil)) + argCount := int64(len(call.Arguments)) + newLen := intToValue(length + argCount) + newSize := length + argCount + if arr := r.checkStdArrayObj(o); arr != nil && newSize < math.MaxUint32 { + if int64(cap(arr.values)) >= newSize { + arr.values = arr.values[:newSize] + copy(arr.values[argCount:], arr.values[:length]) + } else { + values := make([]Value, newSize) + copy(values[argCount:], arr.values) + arr.values = values + } + copy(arr.values, call.Arguments) + arr.objCount = int(arr.length) + } else { + for k := length - 1; k >= 0; k-- { + from := valueInt(k) + to := valueInt(k + argCount) + if o.self.hasPropertyIdx(from) { + o.self.setOwnIdx(to, o.self.getIdx(from, nil), true) + } else { + o.self.deleteIdx(to, true) + } + } + + for k, arg := range call.Arguments { + o.self.setOwnIdx(valueInt(int64(k)), arg, true) + } + } + + o.self.setOwnStr("length", newLen, true) + return newLen +} + +func (r *Runtime) arrayproto_indexOf(call FunctionCall) Value { + o := call.This.ToObject(r) + length := toLength(o.self.getStr("length", nil)) + if length == 0 { + return intToValue(-1) + } + + n := call.Argument(1).ToInteger() + if n >= length { + return intToValue(-1) + } + + if n < 0 { + n = max(length+n, 0) + } + + searchElement := call.Argument(0) + + if arr := r.checkStdArrayObj(o); arr != nil { + for i, val := range arr.values[n:] { + if searchElement.StrictEquals(val) { + return intToValue(n + int64(i)) + } + } + return intToValue(-1) + } + + for ; n < length; n++ { + idx := valueInt(n) + if o.self.hasPropertyIdx(idx) { + if val := o.self.getIdx(idx, nil); val != nil { + if searchElement.StrictEquals(val) { + return idx + } + } + } + } + + return intToValue(-1) +} + +func (r *Runtime) arrayproto_includes(call FunctionCall) Value { + o := call.This.ToObject(r) + length := toLength(o.self.getStr("length", nil)) + if length == 0 { + return valueFalse + } + + n := call.Argument(1).ToInteger() + if n >= length { + return valueFalse + } + + if n < 0 { + n = max(length+n, 0) + } + + searchElement := call.Argument(0) + if searchElement == _negativeZero { + searchElement = _positiveZero + } + + if arr := r.checkStdArrayObj(o); arr != nil { + for _, val := range arr.values[n:] { + if searchElement.SameAs(val) { + return valueTrue + } + } + return valueFalse + } + + for ; n < length; n++ { + idx := valueInt(n) + val := nilSafe(o.self.getIdx(idx, nil)) + if searchElement.SameAs(val) { + return valueTrue + } + } + + return valueFalse +} + +func (r *Runtime) arrayproto_lastIndexOf(call FunctionCall) Value { + o := call.This.ToObject(r) + length := toLength(o.self.getStr("length", nil)) + if length == 0 { + return intToValue(-1) + } + + var fromIndex int64 + + if len(call.Arguments) < 2 { + fromIndex = length - 1 + } else { + fromIndex = call.Argument(1).ToInteger() + if fromIndex >= 0 { + fromIndex = min(fromIndex, length-1) + } else { + fromIndex += length + } + } + + searchElement := call.Argument(0) + + if arr := r.checkStdArrayObj(o); arr != nil { + vals := arr.values + for k := fromIndex; k >= 0; k-- { + if v := vals[k]; v != nil && searchElement.StrictEquals(v) { + return intToValue(k) + } + } + return intToValue(-1) + } + + for k := fromIndex; k >= 0; k-- { + idx := valueInt(k) + if o.self.hasPropertyIdx(idx) { + if val := o.self.getIdx(idx, nil); val != nil { + if searchElement.StrictEquals(val) { + return idx + } + } + } + } + + return intToValue(-1) +} + +func (r *Runtime) arrayproto_every(call FunctionCall) Value { + o := call.This.ToObject(r) + length := toLength(o.self.getStr("length", nil)) + callbackFn := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, o}, + } + for k := int64(0); k < length; k++ { + idx := valueInt(k) + if val := o.self.getIdx(idx, nil); val != nil { + fc.Arguments[0] = val + fc.Arguments[1] = idx + if !callbackFn(fc).ToBoolean() { + return valueFalse + } + } + } + return valueTrue +} + +func (r *Runtime) arrayproto_some(call FunctionCall) Value { + o := call.This.ToObject(r) + length := toLength(o.self.getStr("length", nil)) + callbackFn := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, o}, + } + for k := int64(0); k < length; k++ { + idx := valueInt(k) + if val := o.self.getIdx(idx, nil); val != nil { + fc.Arguments[0] = val + fc.Arguments[1] = idx + if callbackFn(fc).ToBoolean() { + return valueTrue + } + } + } + return valueFalse +} + +func (r *Runtime) arrayproto_forEach(call FunctionCall) Value { + o := call.This.ToObject(r) + length := toLength(o.self.getStr("length", nil)) + callbackFn := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, o}, + } + for k := int64(0); k < length; k++ { + idx := valueInt(k) + if val := o.self.getIdx(idx, nil); val != nil { + fc.Arguments[0] = val + fc.Arguments[1] = idx + callbackFn(fc) + } + } + return _undefined +} + +func (r *Runtime) arrayproto_map(call FunctionCall) Value { + o := call.This.ToObject(r) + length := toLength(o.self.getStr("length", nil)) + callbackFn := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, o}, + } + a := arraySpeciesCreate(o, length) + if _, stdSrc := o.self.(*arrayObject); stdSrc { + if arr, ok := a.self.(*arrayObject); ok { + values := make([]Value, length) + for k := int64(0); k < length; k++ { + idx := valueInt(k) + if val := o.self.getIdx(idx, nil); val != nil { + fc.Arguments[0] = val + fc.Arguments[1] = idx + values[k] = callbackFn(fc) + } + } + setArrayValues(arr, values) + return a + } + } + for k := int64(0); k < length; k++ { + idx := valueInt(k) + if val := o.self.getIdx(idx, nil); val != nil { + fc.Arguments[0] = val + fc.Arguments[1] = idx + createDataPropertyOrThrow(a, idx, callbackFn(fc)) + } + } + return a +} + +func (r *Runtime) arrayproto_filter(call FunctionCall) Value { + o := call.This.ToObject(r) + length := toLength(o.self.getStr("length", nil)) + callbackFn := call.Argument(0).ToObject(r) + if callbackFn, ok := callbackFn.self.assertCallable(); ok { + a := arraySpeciesCreate(o, 0) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, o}, + } + if _, stdSrc := o.self.(*arrayObject); stdSrc { + if arr := r.checkStdArrayObj(a); arr != nil { + var values []Value + for k := int64(0); k < length; k++ { + idx := valueInt(k) + if val := o.self.getIdx(idx, nil); val != nil { + fc.Arguments[0] = val + fc.Arguments[1] = idx + if callbackFn(fc).ToBoolean() { + values = append(values, val) + } + } + } + setArrayValues(arr, values) + return a + } + } + + to := int64(0) + for k := int64(0); k < length; k++ { + idx := valueInt(k) + if val := o.self.getIdx(idx, nil); val != nil { + fc.Arguments[0] = val + fc.Arguments[1] = idx + if callbackFn(fc).ToBoolean() { + createDataPropertyOrThrow(a, intToValue(to), val) + to++ + } + } + } + return a + } else { + r.typeErrorResult(true, "%s is not a function", call.Argument(0)) + } + panic("unreachable") +} + +func (r *Runtime) arrayproto_reduce(call FunctionCall) Value { + o := call.This.ToObject(r) + length := toLength(o.self.getStr("length", nil)) + callbackFn := call.Argument(0).ToObject(r) + if callbackFn, ok := callbackFn.self.assertCallable(); ok { + fc := FunctionCall{ + This: _undefined, + Arguments: []Value{nil, nil, nil, o}, + } + + var k int64 + + if len(call.Arguments) >= 2 { + fc.Arguments[0] = call.Argument(1) + } else { + for ; k < length; k++ { + idx := valueInt(k) + if val := o.self.getIdx(idx, nil); val != nil { + fc.Arguments[0] = val + break + } + } + if fc.Arguments[0] == nil { + r.typeErrorResult(true, "No initial value") + panic("unreachable") + } + k++ + } + + for ; k < length; k++ { + idx := valueInt(k) + if val := o.self.getIdx(idx, nil); val != nil { + fc.Arguments[1] = val + fc.Arguments[2] = idx + fc.Arguments[0] = callbackFn(fc) + } + } + return fc.Arguments[0] + } else { + r.typeErrorResult(true, "%s is not a function", call.Argument(0)) + } + panic("unreachable") +} + +func (r *Runtime) arrayproto_reduceRight(call FunctionCall) Value { + o := call.This.ToObject(r) + length := toLength(o.self.getStr("length", nil)) + callbackFn := call.Argument(0).ToObject(r) + if callbackFn, ok := callbackFn.self.assertCallable(); ok { + fc := FunctionCall{ + This: _undefined, + Arguments: []Value{nil, nil, nil, o}, + } + + k := length - 1 + + if len(call.Arguments) >= 2 { + fc.Arguments[0] = call.Argument(1) + } else { + for ; k >= 0; k-- { + idx := valueInt(k) + if val := o.self.getIdx(idx, nil); val != nil { + fc.Arguments[0] = val + break + } + } + if fc.Arguments[0] == nil { + r.typeErrorResult(true, "No initial value") + panic("unreachable") + } + k-- + } + + for ; k >= 0; k-- { + idx := valueInt(k) + if val := o.self.getIdx(idx, nil); val != nil { + fc.Arguments[1] = val + fc.Arguments[2] = idx + fc.Arguments[0] = callbackFn(fc) + } + } + return fc.Arguments[0] + } else { + r.typeErrorResult(true, "%s is not a function", call.Argument(0)) + } + panic("unreachable") +} + +func arrayproto_reverse_generic_step(o *Object, lower, upper int64) { + lowerP := valueInt(lower) + upperP := valueInt(upper) + lowerValue := o.self.getIdx(lowerP, nil) + upperValue := o.self.getIdx(upperP, nil) + if lowerValue != nil && upperValue != nil { + o.self.setOwnIdx(lowerP, upperValue, true) + o.self.setOwnIdx(upperP, lowerValue, true) + } else if lowerValue == nil && upperValue != nil { + o.self.setOwnIdx(lowerP, upperValue, true) + o.self.deleteIdx(upperP, true) + } else if lowerValue != nil && upperValue == nil { + o.self.deleteIdx(lowerP, true) + o.self.setOwnIdx(upperP, lowerValue, true) + } +} + +func (r *Runtime) arrayproto_reverse_generic(o *Object, start int64) { + l := toLength(o.self.getStr("length", nil)) + middle := l / 2 + for lower := start; lower != middle; lower++ { + arrayproto_reverse_generic_step(o, lower, l-lower-1) + } +} + +func (r *Runtime) arrayproto_reverse(call FunctionCall) Value { + o := call.This.ToObject(r) + if a := r.checkStdArrayObj(o); a != nil { + l := len(a.values) + middle := l / 2 + for lower := 0; lower != middle; lower++ { + upper := l - lower - 1 + a.values[lower], a.values[upper] = a.values[upper], a.values[lower] + } + //TODO: go arrays + } else { + r.arrayproto_reverse_generic(o, 0) + } + return o +} + +func (r *Runtime) arrayproto_shift(call FunctionCall) Value { + o := call.This.ToObject(r) + if a := r.checkStdArrayObj(o); a != nil { + if len(a.values) == 0 { + return _undefined + } + first := a.values[0] + copy(a.values, a.values[1:]) + a.values[len(a.values)-1] = nil + a.values = a.values[:len(a.values)-1] + a.length-- + return first + } + length := toLength(o.self.getStr("length", nil)) + if length == 0 { + o.self.setOwnStr("length", intToValue(0), true) + return _undefined + } + first := o.self.getIdx(valueInt(0), nil) + for i := int64(1); i < length; i++ { + idxFrom := valueInt(i) + idxTo := valueInt(i - 1) + if o.self.hasPropertyIdx(idxFrom) { + o.self.setOwnIdx(idxTo, nilSafe(o.self.getIdx(idxFrom, nil)), true) + } else { + o.self.deleteIdx(idxTo, true) + } + } + + lv := valueInt(length - 1) + o.self.deleteIdx(lv, true) + o.self.setOwnStr("length", lv, true) + + return first +} + +func (r *Runtime) arrayproto_values(call FunctionCall) Value { + return r.createArrayIterator(call.This.ToObject(r), iterationKindValue) +} + +func (r *Runtime) arrayproto_keys(call FunctionCall) Value { + return r.createArrayIterator(call.This.ToObject(r), iterationKindKey) +} + +func (r *Runtime) arrayproto_copyWithin(call FunctionCall) Value { + o := call.This.ToObject(r) + l := toLength(o.self.getStr("length", nil)) + var relEnd, dir int64 + to := relToIdx(call.Argument(0).ToInteger(), l) + from := relToIdx(call.Argument(1).ToInteger(), l) + if end := call.Argument(2); end != _undefined { + relEnd = end.ToInteger() + } else { + relEnd = l + } + final := relToIdx(relEnd, l) + count := min(final-from, l-to) + if arr := r.checkStdArrayObj(o); arr != nil { + if count > 0 { + copy(arr.values[to:to+count], arr.values[from:from+count]) + } + return o + } + if from < to && to < from+count { + dir = -1 + from = from + count - 1 + to = to + count - 1 + } else { + dir = 1 + } + for count > 0 { + if o.self.hasPropertyIdx(valueInt(from)) { + o.self.setOwnIdx(valueInt(to), o.self.getIdx(valueInt(from), nil), true) + } else { + o.self.deleteIdx(valueInt(to), true) + } + from += dir + to += dir + count-- + } + + return o +} + +func (r *Runtime) arrayproto_entries(call FunctionCall) Value { + return r.createArrayIterator(call.This.ToObject(r), iterationKindKeyValue) +} + +func (r *Runtime) arrayproto_fill(call FunctionCall) Value { + o := call.This.ToObject(r) + l := toLength(o.self.getStr("length", nil)) + k := relToIdx(call.Argument(1).ToInteger(), l) + var relEnd int64 + if endArg := call.Argument(2); endArg != _undefined { + relEnd = endArg.ToInteger() + } else { + relEnd = l + } + final := relToIdx(relEnd, l) + value := call.Argument(0) + if arr := r.checkStdArrayObj(o); arr != nil { + for ; k < final; k++ { + arr.values[k] = value + } + } else { + for ; k < final; k++ { + o.self.setOwnIdx(valueInt(k), value, true) + } + } + return o +} + +func (r *Runtime) arrayproto_find(call FunctionCall) Value { + o := call.This.ToObject(r) + l := toLength(o.self.getStr("length", nil)) + predicate := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, o}, + } + for k := int64(0); k < l; k++ { + idx := valueInt(k) + kValue := o.self.getIdx(idx, nil) + fc.Arguments[0], fc.Arguments[1] = kValue, idx + if predicate(fc).ToBoolean() { + return kValue + } + } + + return _undefined +} + +func (r *Runtime) arrayproto_findIndex(call FunctionCall) Value { + o := call.This.ToObject(r) + l := toLength(o.self.getStr("length", nil)) + predicate := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, o}, + } + for k := int64(0); k < l; k++ { + idx := valueInt(k) + kValue := o.self.getIdx(idx, nil) + fc.Arguments[0], fc.Arguments[1] = kValue, idx + if predicate(fc).ToBoolean() { + return idx + } + } + + return intToValue(-1) +} + +func (r *Runtime) arrayproto_flat(call FunctionCall) Value { + o := call.This.ToObject(r) + l := toLength(o.self.getStr("length", nil)) + depthNum := int64(1) + if len(call.Arguments) > 0 { + depthNum = call.Argument(0).ToInteger() + } + a := arraySpeciesCreate(o, 0) + r.flattenIntoArray(a, o, l, 0, depthNum, nil, nil) + return a +} + +func (r *Runtime) flattenIntoArray(target, source *Object, sourceLen, start, depth int64, mapperFunction func(FunctionCall) Value, thisArg Value) int64 { + targetIndex, sourceIndex := start, int64(0) + for sourceIndex < sourceLen { + p := intToValue(sourceIndex) + if source.hasProperty(p.toString()) { + element := source.get(p, source) + if mapperFunction != nil { + element = mapperFunction(FunctionCall{ + This: thisArg, + Arguments: []Value{element, p, source}, + }) + } + var elementArray *Object + if depth > 0 { + if elementObj, ok := element.(*Object); ok && isArray(elementObj) { + elementArray = elementObj + } + } + if elementArray != nil { + elementLen := toLength(elementArray.self.getStr("length", nil)) + targetIndex = r.flattenIntoArray(target, elementArray, elementLen, targetIndex, depth-1, nil, nil) + } else { + if targetIndex >= maxInt-1 { + panic(r.NewTypeError("Invalid array length")) + } + createDataPropertyOrThrow(target, intToValue(targetIndex), element) + targetIndex++ + } + } + sourceIndex++ + } + return targetIndex +} + +func (r *Runtime) arrayproto_flatMap(call FunctionCall) Value { + o := call.This.ToObject(r) + l := toLength(o.self.getStr("length", nil)) + callbackFn := r.toCallable(call.Argument(0)) + thisArg := Undefined() + if len(call.Arguments) > 1 { + thisArg = call.Argument(1) + } + a := arraySpeciesCreate(o, 0) + r.flattenIntoArray(a, o, l, 0, 1, callbackFn, thisArg) + return a +} + +func (r *Runtime) checkStdArrayObj(obj *Object) *arrayObject { + if arr, ok := obj.self.(*arrayObject); ok && + arr.propValueCount == 0 && + arr.length == uint32(len(arr.values)) && + uint32(arr.objCount) == arr.length { + + return arr + } + + return nil +} + +func (r *Runtime) checkStdArray(v Value) *arrayObject { + if obj, ok := v.(*Object); ok { + return r.checkStdArrayObj(obj) + } + + return nil +} + +func (r *Runtime) checkStdArrayIter(v Value) *arrayObject { + if arr := r.checkStdArray(v); arr != nil && + arr.getSym(SymIterator, nil) == r.global.arrayValues { + + return arr + } + + return nil +} + +func (r *Runtime) array_from(call FunctionCall) Value { + var mapFn func(FunctionCall) Value + if mapFnArg := call.Argument(1); mapFnArg != _undefined { + if mapFnObj, ok := mapFnArg.(*Object); ok { + if fn, ok := mapFnObj.self.assertCallable(); ok { + mapFn = fn + } + } + if mapFn == nil { + panic(r.NewTypeError("%s is not a function", mapFnArg)) + } + } + t := call.Argument(2) + items := call.Argument(0) + if mapFn == nil && call.This == r.global.Array { // mapFn may mutate the array + if arr := r.checkStdArrayIter(items); arr != nil { + items := make([]Value, len(arr.values)) + copy(items, arr.values) + return r.newArrayValues(items) + } + } + + var ctor func(args []Value, newTarget *Object) *Object + if call.This != r.global.Array { + if o, ok := call.This.(*Object); ok { + if c := o.self.assertConstructor(); c != nil { + ctor = c + } + } + } + var arr *Object + if usingIterator := toMethod(r.getV(items, SymIterator)); usingIterator != nil { + if ctor != nil { + arr = ctor([]Value{}, nil) + } else { + arr = r.newArrayValues(nil) + } + iter := r.getIterator(items, usingIterator) + if mapFn == nil { + if a := r.checkStdArrayObj(arr); a != nil { + var values []Value + r.iterate(iter, func(val Value) { + values = append(values, val) + }) + setArrayValues(a, values) + return arr + } + } + k := int64(0) + r.iterate(iter, func(val Value) { + if mapFn != nil { + val = mapFn(FunctionCall{This: t, Arguments: []Value{val, intToValue(k)}}) + } + createDataPropertyOrThrow(arr, intToValue(k), val) + k++ + }) + arr.self.setOwnStr("length", intToValue(k), true) + } else { + arrayLike := items.ToObject(r) + l := toLength(arrayLike.self.getStr("length", nil)) + if ctor != nil { + arr = ctor([]Value{intToValue(l)}, nil) + } else { + arr = r.newArrayValues(nil) + } + if mapFn == nil { + if a := r.checkStdArrayObj(arr); a != nil { + values := make([]Value, l) + for k := int64(0); k < l; k++ { + values[k] = nilSafe(arrayLike.self.getIdx(valueInt(k), nil)) + } + setArrayValues(a, values) + return arr + } + } + for k := int64(0); k < l; k++ { + idx := valueInt(k) + item := arrayLike.self.getIdx(idx, nil) + if mapFn != nil { + item = mapFn(FunctionCall{This: t, Arguments: []Value{item, idx}}) + } else { + item = nilSafe(item) + } + createDataPropertyOrThrow(arr, idx, item) + } + arr.self.setOwnStr("length", intToValue(l), true) + } + + return arr +} + +func (r *Runtime) array_isArray(call FunctionCall) Value { + if o, ok := call.Argument(0).(*Object); ok { + if isArray(o) { + return valueTrue + } + } + return valueFalse +} + +func (r *Runtime) array_of(call FunctionCall) Value { + var ctor func(args []Value, newTarget *Object) *Object + if call.This != r.global.Array { + if o, ok := call.This.(*Object); ok { + if c := o.self.assertConstructor(); c != nil { + ctor = c + } + } + } + if ctor == nil { + values := make([]Value, len(call.Arguments)) + copy(values, call.Arguments) + return r.newArrayValues(values) + } + l := intToValue(int64(len(call.Arguments))) + arr := ctor([]Value{l}, nil) + for i, val := range call.Arguments { + createDataPropertyOrThrow(arr, intToValue(int64(i)), val) + } + arr.self.setOwnStr("length", l, true) + return arr +} + +func (r *Runtime) arrayIterProto_next(call FunctionCall) Value { + thisObj := r.toObject(call.This) + if iter, ok := thisObj.self.(*arrayIterObject); ok { + return iter.next() + } + panic(r.NewTypeError("Method Array Iterator.prototype.next called on incompatible receiver %s", thisObj.String())) +} + +func (r *Runtime) createArrayProto(val *Object) objectImpl { + o := &arrayObject{ + baseObject: baseObject{ + class: classArray, + val: val, + extensible: true, + prototype: r.global.ObjectPrototype, + }, + } + o.init() + + o._putProp("constructor", r.global.Array, true, false, true) + o._putProp("concat", r.newNativeFunc(r.arrayproto_concat, nil, "concat", nil, 1), true, false, true) + o._putProp("copyWithin", r.newNativeFunc(r.arrayproto_copyWithin, nil, "copyWithin", nil, 2), true, false, true) + o._putProp("entries", r.newNativeFunc(r.arrayproto_entries, nil, "entries", nil, 0), true, false, true) + o._putProp("every", r.newNativeFunc(r.arrayproto_every, nil, "every", nil, 1), true, false, true) + o._putProp("fill", r.newNativeFunc(r.arrayproto_fill, nil, "fill", nil, 1), true, false, true) + o._putProp("filter", r.newNativeFunc(r.arrayproto_filter, nil, "filter", nil, 1), true, false, true) + o._putProp("find", r.newNativeFunc(r.arrayproto_find, nil, "find", nil, 1), true, false, true) + o._putProp("findIndex", r.newNativeFunc(r.arrayproto_findIndex, nil, "findIndex", nil, 1), true, false, true) + o._putProp("flat", r.newNativeFunc(r.arrayproto_flat, nil, "flat", nil, 0), true, false, true) + o._putProp("flatMap", r.newNativeFunc(r.arrayproto_flatMap, nil, "flatMap", nil, 1), true, false, true) + o._putProp("forEach", r.newNativeFunc(r.arrayproto_forEach, nil, "forEach", nil, 1), true, false, true) + o._putProp("includes", r.newNativeFunc(r.arrayproto_includes, nil, "includes", nil, 1), true, false, true) + o._putProp("indexOf", r.newNativeFunc(r.arrayproto_indexOf, nil, "indexOf", nil, 1), true, false, true) + o._putProp("join", r.newNativeFunc(r.arrayproto_join, nil, "join", nil, 1), true, false, true) + o._putProp("keys", r.newNativeFunc(r.arrayproto_keys, nil, "keys", nil, 0), true, false, true) + o._putProp("lastIndexOf", r.newNativeFunc(r.arrayproto_lastIndexOf, nil, "lastIndexOf", nil, 1), true, false, true) + o._putProp("map", r.newNativeFunc(r.arrayproto_map, nil, "map", nil, 1), true, false, true) + o._putProp("pop", r.newNativeFunc(r.arrayproto_pop, nil, "pop", nil, 0), true, false, true) + o._putProp("push", r.newNativeFunc(r.arrayproto_push, nil, "push", nil, 1), true, false, true) + o._putProp("reduce", r.newNativeFunc(r.arrayproto_reduce, nil, "reduce", nil, 1), true, false, true) + o._putProp("reduceRight", r.newNativeFunc(r.arrayproto_reduceRight, nil, "reduceRight", nil, 1), true, false, true) + o._putProp("reverse", r.newNativeFunc(r.arrayproto_reverse, nil, "reverse", nil, 0), true, false, true) + o._putProp("shift", r.newNativeFunc(r.arrayproto_shift, nil, "shift", nil, 0), true, false, true) + o._putProp("slice", r.newNativeFunc(r.arrayproto_slice, nil, "slice", nil, 2), true, false, true) + o._putProp("some", r.newNativeFunc(r.arrayproto_some, nil, "some", nil, 1), true, false, true) + o._putProp("sort", r.newNativeFunc(r.arrayproto_sort, nil, "sort", nil, 1), true, false, true) + o._putProp("splice", r.newNativeFunc(r.arrayproto_splice, nil, "splice", nil, 2), true, false, true) + o._putProp("toLocaleString", r.newNativeFunc(r.arrayproto_toLocaleString, nil, "toLocaleString", nil, 0), true, false, true) + o._putProp("toString", r.global.arrayToString, true, false, true) + o._putProp("unshift", r.newNativeFunc(r.arrayproto_unshift, nil, "unshift", nil, 1), true, false, true) + o._putProp("values", r.global.arrayValues, true, false, true) + + o._putSym(SymIterator, valueProp(r.global.arrayValues, true, false, true)) + + bl := r.newBaseObject(nil, classObject) + bl.setOwnStr("copyWithin", valueTrue, true) + bl.setOwnStr("entries", valueTrue, true) + bl.setOwnStr("fill", valueTrue, true) + bl.setOwnStr("find", valueTrue, true) + bl.setOwnStr("findIndex", valueTrue, true) + bl.setOwnStr("flat", valueTrue, true) + bl.setOwnStr("flatMap", valueTrue, true) + bl.setOwnStr("includes", valueTrue, true) + bl.setOwnStr("keys", valueTrue, true) + bl.setOwnStr("values", valueTrue, true) + o._putSym(SymUnscopables, valueProp(bl.val, false, false, true)) + + return o +} + +func (r *Runtime) createArray(val *Object) objectImpl { + o := r.newNativeFuncConstructObj(val, r.builtin_newArray, "Array", r.global.ArrayPrototype, 1) + o._putProp("from", r.newNativeFunc(r.array_from, nil, "from", nil, 1), true, false, true) + o._putProp("isArray", r.newNativeFunc(r.array_isArray, nil, "isArray", nil, 1), true, false, true) + o._putProp("of", r.newNativeFunc(r.array_of, nil, "of", nil, 0), true, false, true) + o._putSym(SymSpecies, &valueProperty{ + getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0), + accessor: true, + configurable: true, + }) + + return o +} + +func (r *Runtime) createArrayIterProto(val *Object) objectImpl { + o := newBaseObjectObj(val, r.global.IteratorPrototype, classObject) + + o._putProp("next", r.newNativeFunc(r.arrayIterProto_next, nil, "next", nil, 0), true, false, true) + o._putSym(SymToStringTag, valueProp(asciiString(classArrayIterator), false, false, true)) + + return o +} + +func (r *Runtime) initArray() { + r.global.arrayValues = r.newNativeFunc(r.arrayproto_values, nil, "values", nil, 0) + r.global.arrayToString = r.newNativeFunc(r.arrayproto_toString, nil, "toString", nil, 0) + + r.global.ArrayIteratorPrototype = r.newLazyObject(r.createArrayIterProto) + //r.global.ArrayPrototype = r.newArray(r.global.ObjectPrototype).val + //o := r.global.ArrayPrototype.self + r.global.ArrayPrototype = r.newLazyObject(r.createArrayProto) + + //r.global.Array = r.newNativeFuncConstruct(r.builtin_newArray, "Array", r.global.ArrayPrototype, 1) + //o = r.global.Array.self + //o._putProp("isArray", r.newNativeFunc(r.array_isArray, nil, "isArray", nil, 1), true, false, true) + r.global.Array = r.newLazyObject(r.createArray) + + r.addToGlobal("Array", r.global.Array) +} + +type sortable interface { + sortLen() int64 + sortGet(int64) Value + swap(int64, int64) +} + +type arraySortCtx struct { + obj sortable + compare func(FunctionCall) Value +} + +func (a *arraySortCtx) sortCompare(x, y Value) int { + if x == nil && y == nil { + return 0 + } + + if x == nil { + return 1 + } + + if y == nil { + return -1 + } + + if x == _undefined && y == _undefined { + return 0 + } + + if x == _undefined { + return 1 + } + + if y == _undefined { + return -1 + } + + if a.compare != nil { + f := a.compare(FunctionCall{ + This: _undefined, + Arguments: []Value{x, y}, + }).ToFloat() + if f > 0 { + return 1 + } + if f < 0 { + return -1 + } + if math.Signbit(f) { + return -1 + } + return 0 + } + return x.toString().compareTo(y.toString()) +} + +// sort.Interface + +func (a *arraySortCtx) Len() int { + return int(a.obj.sortLen()) +} + +func (a *arraySortCtx) Less(j, k int) bool { + return a.sortCompare(a.obj.sortGet(int64(j)), a.obj.sortGet(int64(k))) < 0 +} + +func (a *arraySortCtx) Swap(j, k int) { + a.obj.swap(int64(j), int64(k)) +} diff --git a/vendor/github.com/dop251/goja/builtin_boolean.go b/vendor/github.com/dop251/goja/builtin_boolean.go new file mode 100644 index 0000000000..df8d18cff8 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_boolean.go @@ -0,0 +1,50 @@ +package goja + +func (r *Runtime) booleanproto_toString(call FunctionCall) Value { + var b bool + switch o := call.This.(type) { + case valueBool: + b = bool(o) + goto success + case *Object: + if p, ok := o.self.(*primitiveValueObject); ok { + if b1, ok := p.pValue.(valueBool); ok { + b = bool(b1) + goto success + } + } + } + r.typeErrorResult(true, "Method Boolean.prototype.toString is called on incompatible receiver") + +success: + if b { + return stringTrue + } + return stringFalse +} + +func (r *Runtime) booleanproto_valueOf(call FunctionCall) Value { + switch o := call.This.(type) { + case valueBool: + return o + case *Object: + if p, ok := o.self.(*primitiveValueObject); ok { + if b, ok := p.pValue.(valueBool); ok { + return b + } + } + } + + r.typeErrorResult(true, "Method Boolean.prototype.valueOf is called on incompatible receiver") + return nil +} + +func (r *Runtime) initBoolean() { + r.global.BooleanPrototype = r.newPrimitiveObject(valueFalse, r.global.ObjectPrototype, classBoolean) + o := r.global.BooleanPrototype.self + o._putProp("toString", r.newNativeFunc(r.booleanproto_toString, nil, "toString", nil, 0), true, false, true) + o._putProp("valueOf", r.newNativeFunc(r.booleanproto_valueOf, nil, "valueOf", nil, 0), true, false, true) + + r.global.Boolean = r.newNativeFunc(r.builtin_Boolean, r.builtin_newBoolean, "Boolean", r.global.BooleanPrototype, 1) + r.addToGlobal("Boolean", r.global.Boolean) +} diff --git a/vendor/github.com/dop251/goja/builtin_date.go b/vendor/github.com/dop251/goja/builtin_date.go new file mode 100644 index 0000000000..61ff1d64f8 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_date.go @@ -0,0 +1,1014 @@ +package goja + +import ( + "fmt" + "math" + "time" +) + +func (r *Runtime) makeDate(args []Value, utc bool) (t time.Time, valid bool) { + switch { + case len(args) >= 2: + t = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.Local) + t, valid = _dateSetYear(t, FunctionCall{Arguments: args}, 0, utc) + case len(args) == 0: + t = r.now() + valid = true + default: // one argument + if o, ok := args[0].(*Object); ok { + if d, ok := o.self.(*dateObject); ok { + t = d.time() + valid = true + } + } + if !valid { + pv := toPrimitive(args[0]) + if val, ok := pv.(valueString); ok { + return dateParse(val.String()) + } + pv = pv.ToNumber() + var n int64 + if i, ok := pv.(valueInt); ok { + n = int64(i) + } else if f, ok := pv.(valueFloat); ok { + f := float64(f) + if math.IsNaN(f) || math.IsInf(f, 0) { + return + } + if math.Abs(f) > maxTime { + return + } + n = int64(f) + } else { + n = pv.ToInteger() + } + t = timeFromMsec(n) + valid = true + } + } + if valid { + msec := t.Unix()*1000 + int64(t.Nanosecond()/1e6) + if msec < 0 { + msec = -msec + } + if msec > maxTime { + valid = false + } + } + return +} + +func (r *Runtime) newDateTime(args []Value, proto *Object) *Object { + t, isSet := r.makeDate(args, false) + return r.newDateObject(t, isSet, proto) +} + +func (r *Runtime) builtin_newDate(args []Value, proto *Object) *Object { + return r.newDateTime(args, proto) +} + +func (r *Runtime) builtin_date(FunctionCall) Value { + return asciiString(dateFormat(r.now())) +} + +func (r *Runtime) date_parse(call FunctionCall) Value { + t, set := dateParse(call.Argument(0).toString().String()) + if set { + return intToValue(timeToMsec(t)) + } + return _NaN +} + +func (r *Runtime) date_UTC(call FunctionCall) Value { + var args []Value + if len(call.Arguments) < 2 { + args = []Value{call.Argument(0), _positiveZero} + } else { + args = call.Arguments + } + t, valid := r.makeDate(args, true) + if !valid { + return _NaN + } + return intToValue(timeToMsec(t)) +} + +func (r *Runtime) date_now(FunctionCall) Value { + return intToValue(timeToMsec(r.now())) +} + +func (r *Runtime) dateproto_toString(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return asciiString(d.time().Format(dateTimeLayout)) + } else { + return stringInvalidDate + } + } + panic(r.NewTypeError("Method Date.prototype.toString is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_toUTCString(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return asciiString(d.timeUTC().Format(utcDateTimeLayout)) + } else { + return stringInvalidDate + } + } + panic(r.NewTypeError("Method Date.prototype.toUTCString is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_toISOString(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + utc := d.timeUTC() + year := utc.Year() + if year >= -9999 && year <= 9999 { + return asciiString(utc.Format(isoDateTimeLayout)) + } + // extended year + return asciiString(fmt.Sprintf("%+06d-", year) + utc.Format(isoDateTimeLayout[5:])) + } else { + panic(r.newError(r.global.RangeError, "Invalid time value")) + } + } + panic(r.NewTypeError("Method Date.prototype.toISOString is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_toJSON(call FunctionCall) Value { + obj := call.This.ToObject(r) + tv := obj.toPrimitiveNumber() + if f, ok := tv.(valueFloat); ok { + f := float64(f) + if math.IsNaN(f) || math.IsInf(f, 0) { + return _null + } + } + + if toISO, ok := obj.self.getStr("toISOString", nil).(*Object); ok { + if toISO, ok := toISO.self.assertCallable(); ok { + return toISO(FunctionCall{ + This: obj, + }) + } + } + + panic(r.NewTypeError("toISOString is not a function")) +} + +func (r *Runtime) dateproto_toPrimitive(call FunctionCall) Value { + o := r.toObject(call.This) + arg := call.Argument(0) + + if asciiString("string").StrictEquals(arg) || asciiString("default").StrictEquals(arg) { + return o.self.toPrimitiveString() + } + if asciiString("number").StrictEquals(arg) { + return o.self.toPrimitiveNumber() + } + panic(r.NewTypeError("Invalid hint: %s", arg)) +} + +func (r *Runtime) dateproto_toDateString(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return asciiString(d.time().Format(dateLayout)) + } else { + return stringInvalidDate + } + } + panic(r.NewTypeError("Method Date.prototype.toDateString is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_toTimeString(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return asciiString(d.time().Format(timeLayout)) + } else { + return stringInvalidDate + } + } + panic(r.NewTypeError("Method Date.prototype.toTimeString is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_toLocaleString(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return asciiString(d.time().Format(datetimeLayout_en_GB)) + } else { + return stringInvalidDate + } + } + panic(r.NewTypeError("Method Date.prototype.toLocaleString is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_toLocaleDateString(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return asciiString(d.time().Format(dateLayout_en_GB)) + } else { + return stringInvalidDate + } + } + panic(r.NewTypeError("Method Date.prototype.toLocaleDateString is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_toLocaleTimeString(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return asciiString(d.time().Format(timeLayout_en_GB)) + } else { + return stringInvalidDate + } + } + panic(r.NewTypeError("Method Date.prototype.toLocaleTimeString is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_valueOf(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(d.msec) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.valueOf is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getTime(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(d.msec) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getTime is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getFullYear(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.time().Year())) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getFullYear is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getUTCFullYear(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.timeUTC().Year())) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getUTCFullYear is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getMonth(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.time().Month()) - 1) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getMonth is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getUTCMonth(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.timeUTC().Month()) - 1) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getUTCMonth is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getHours(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.time().Hour())) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getHours is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getUTCHours(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.timeUTC().Hour())) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getUTCHours is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getDate(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.time().Day())) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getDate is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getUTCDate(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.timeUTC().Day())) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getUTCDate is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getDay(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.time().Weekday())) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getDay is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getUTCDay(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.timeUTC().Weekday())) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getUTCDay is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getMinutes(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.time().Minute())) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getMinutes is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getUTCMinutes(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.timeUTC().Minute())) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getUTCMinutes is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getSeconds(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.time().Second())) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getSeconds is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getUTCSeconds(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.timeUTC().Second())) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getUTCSeconds is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getMilliseconds(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.time().Nanosecond() / 1e6)) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getMilliseconds is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getUTCMilliseconds(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + return intToValue(int64(d.timeUTC().Nanosecond() / 1e6)) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getUTCMilliseconds is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_getTimezoneOffset(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + _, offset := d.time().Zone() + return floatToValue(float64(-offset) / 60) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.getTimezoneOffset is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setTime(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + n := call.Argument(0).ToNumber() + if IsNaN(n) { + d.unset() + return _NaN + } + return d.setTimeMs(n.ToInteger()) + } + panic(r.NewTypeError("Method Date.prototype.setTime is called on incompatible receiver")) +} + +// _norm returns nhi, nlo such that +// hi * base + lo == nhi * base + nlo +// 0 <= nlo < base +func _norm(hi, lo, base int64) (nhi, nlo int64, ok bool) { + if lo < 0 { + if hi == math.MinInt64 && lo <= -base { + // underflow + ok = false + return + } + n := (-lo-1)/base + 1 + hi -= n + lo += n * base + } + if lo >= base { + if hi == math.MaxInt64 { + // overflow + ok = false + return + } + n := lo / base + hi += n + lo -= n * base + } + return hi, lo, true +} + +func mkTime(year, m, day, hour, min, sec, nsec int64, loc *time.Location) (t time.Time, ok bool) { + year, m, ok = _norm(year, m, 12) + if !ok { + return + } + + // Normalise nsec, sec, min, hour, overflowing into day. + sec, nsec, ok = _norm(sec, nsec, 1e9) + if !ok { + return + } + min, sec, ok = _norm(min, sec, 60) + if !ok { + return + } + hour, min, ok = _norm(hour, min, 60) + if !ok { + return + } + day, hour, ok = _norm(day, hour, 24) + if !ok { + return + } + if year > math.MaxInt32 || year < math.MinInt32 || + day > math.MaxInt32 || day < math.MinInt32 || + m >= math.MaxInt32 || m < math.MinInt32-1 { + return time.Time{}, false + } + month := time.Month(m) + 1 + return time.Date(int(year), month, int(day), int(hour), int(min), int(sec), int(nsec), loc), true +} + +func _intArg(call FunctionCall, argNum int) (int64, bool) { + n := call.Argument(argNum).ToNumber() + if IsNaN(n) { + return 0, false + } + return n.ToInteger(), true +} + +func _dateSetYear(t time.Time, call FunctionCall, argNum int, utc bool) (time.Time, bool) { + var year int64 + if argNum == 0 || argNum > 0 && argNum < len(call.Arguments) { + var ok bool + year, ok = _intArg(call, argNum) + if !ok { + return time.Time{}, false + } + if year >= 0 && year <= 99 { + year += 1900 + } + } else { + year = int64(t.Year()) + } + + return _dateSetMonth(year, t, call, argNum+1, utc) +} + +func _dateSetFullYear(t time.Time, call FunctionCall, argNum int, utc bool) (time.Time, bool) { + var year int64 + if argNum == 0 || argNum > 0 && argNum < len(call.Arguments) { + var ok bool + year, ok = _intArg(call, argNum) + if !ok { + return time.Time{}, false + } + } else { + year = int64(t.Year()) + } + return _dateSetMonth(year, t, call, argNum+1, utc) +} + +func _dateSetMonth(year int64, t time.Time, call FunctionCall, argNum int, utc bool) (time.Time, bool) { + var mon int64 + if argNum == 0 || argNum > 0 && argNum < len(call.Arguments) { + var ok bool + mon, ok = _intArg(call, argNum) + if !ok { + return time.Time{}, false + } + } else { + mon = int64(t.Month()) - 1 + } + + return _dateSetDay(year, mon, t, call, argNum+1, utc) +} + +func _dateSetDay(year, mon int64, t time.Time, call FunctionCall, argNum int, utc bool) (time.Time, bool) { + var day int64 + if argNum == 0 || argNum > 0 && argNum < len(call.Arguments) { + var ok bool + day, ok = _intArg(call, argNum) + if !ok { + return time.Time{}, false + } + } else { + day = int64(t.Day()) + } + + return _dateSetHours(year, mon, day, t, call, argNum+1, utc) +} + +func _dateSetHours(year, mon, day int64, t time.Time, call FunctionCall, argNum int, utc bool) (time.Time, bool) { + var hours int64 + if argNum == 0 || argNum > 0 && argNum < len(call.Arguments) { + var ok bool + hours, ok = _intArg(call, argNum) + if !ok { + return time.Time{}, false + } + } else { + hours = int64(t.Hour()) + } + return _dateSetMinutes(year, mon, day, hours, t, call, argNum+1, utc) +} + +func _dateSetMinutes(year, mon, day, hours int64, t time.Time, call FunctionCall, argNum int, utc bool) (time.Time, bool) { + var min int64 + if argNum == 0 || argNum > 0 && argNum < len(call.Arguments) { + var ok bool + min, ok = _intArg(call, argNum) + if !ok { + return time.Time{}, false + } + } else { + min = int64(t.Minute()) + } + return _dateSetSeconds(year, mon, day, hours, min, t, call, argNum+1, utc) +} + +func _dateSetSeconds(year, mon, day, hours, min int64, t time.Time, call FunctionCall, argNum int, utc bool) (time.Time, bool) { + var sec int64 + if argNum == 0 || argNum > 0 && argNum < len(call.Arguments) { + var ok bool + sec, ok = _intArg(call, argNum) + if !ok { + return time.Time{}, false + } + } else { + sec = int64(t.Second()) + } + return _dateSetMilliseconds(year, mon, day, hours, min, sec, t, call, argNum+1, utc) +} + +func _dateSetMilliseconds(year, mon, day, hours, min, sec int64, t time.Time, call FunctionCall, argNum int, utc bool) (time.Time, bool) { + var msec int64 + if argNum == 0 || argNum > 0 && argNum < len(call.Arguments) { + var ok bool + msec, ok = _intArg(call, argNum) + if !ok { + return time.Time{}, false + } + } else { + msec = int64(t.Nanosecond() / 1e6) + } + var ok bool + sec, msec, ok = _norm(sec, msec, 1e3) + if !ok { + return time.Time{}, false + } + + var loc *time.Location + if utc { + loc = time.UTC + } else { + loc = time.Local + } + r, ok := mkTime(year, mon, day, hours, min, sec, msec*1e6, loc) + if !ok { + return time.Time{}, false + } + if utc { + return r.In(time.Local), true + } + return r, true +} + +func (r *Runtime) dateproto_setMilliseconds(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + n := call.Argument(0).ToNumber() + if IsNaN(n) { + d.unset() + return _NaN + } + msec := n.ToInteger() + sec := d.msec / 1e3 + var ok bool + sec, msec, ok = _norm(sec, msec, 1e3) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(sec*1e3 + msec) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.setMilliseconds is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setUTCMilliseconds(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + n := call.Argument(0).ToNumber() + if IsNaN(n) { + d.unset() + return _NaN + } + msec := n.ToInteger() + sec := d.msec / 1e3 + var ok bool + sec, msec, ok = _norm(sec, msec, 1e3) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(sec*1e3 + msec) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.setUTCMilliseconds is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setSeconds(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + t, ok := _dateSetFullYear(d.time(), call, -5, false) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(timeToMsec(t)) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.setSeconds is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setUTCSeconds(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + t, ok := _dateSetFullYear(d.timeUTC(), call, -5, true) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(timeToMsec(t)) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.setUTCSeconds is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setMinutes(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + t, ok := _dateSetFullYear(d.time(), call, -4, false) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(timeToMsec(t)) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.setMinutes is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setUTCMinutes(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + t, ok := _dateSetFullYear(d.timeUTC(), call, -4, true) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(timeToMsec(t)) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.setUTCMinutes is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setHours(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + t, ok := _dateSetFullYear(d.time(), call, -3, false) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(timeToMsec(t)) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.setHours is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setUTCHours(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + t, ok := _dateSetFullYear(d.timeUTC(), call, -3, true) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(timeToMsec(t)) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.setUTCHours is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setDate(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + t, ok := _dateSetFullYear(d.time(), limitCallArgs(call, 1), -2, false) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(timeToMsec(t)) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.setDate is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setUTCDate(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + t, ok := _dateSetFullYear(d.timeUTC(), limitCallArgs(call, 1), -2, true) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(timeToMsec(t)) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.setUTCDate is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setMonth(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + t, ok := _dateSetFullYear(d.time(), limitCallArgs(call, 2), -1, false) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(timeToMsec(t)) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.setMonth is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setUTCMonth(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + if d.isSet() { + t, ok := _dateSetFullYear(d.timeUTC(), limitCallArgs(call, 2), -1, true) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(timeToMsec(t)) + } else { + return _NaN + } + } + panic(r.NewTypeError("Method Date.prototype.setUTCMonth is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setFullYear(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + var t time.Time + if d.isSet() { + t = d.time() + } else { + t = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.Local) + } + t, ok := _dateSetFullYear(t, limitCallArgs(call, 3), 0, false) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(timeToMsec(t)) + } + panic(r.NewTypeError("Method Date.prototype.setFullYear is called on incompatible receiver")) +} + +func (r *Runtime) dateproto_setUTCFullYear(call FunctionCall) Value { + obj := r.toObject(call.This) + if d, ok := obj.self.(*dateObject); ok { + var t time.Time + if d.isSet() { + t = d.timeUTC() + } else { + t = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + } + t, ok := _dateSetFullYear(t, limitCallArgs(call, 3), 0, true) + if !ok { + d.unset() + return _NaN + } + return d.setTimeMs(timeToMsec(t)) + } + panic(r.NewTypeError("Method Date.prototype.setUTCFullYear is called on incompatible receiver")) +} + +func (r *Runtime) createDateProto(val *Object) objectImpl { + o := &baseObject{ + class: classObject, + val: val, + extensible: true, + prototype: r.global.ObjectPrototype, + } + o.init() + + o._putProp("constructor", r.global.Date, true, false, true) + o._putProp("toString", r.newNativeFunc(r.dateproto_toString, nil, "toString", nil, 0), true, false, true) + o._putProp("toDateString", r.newNativeFunc(r.dateproto_toDateString, nil, "toDateString", nil, 0), true, false, true) + o._putProp("toTimeString", r.newNativeFunc(r.dateproto_toTimeString, nil, "toTimeString", nil, 0), true, false, true) + o._putProp("toLocaleString", r.newNativeFunc(r.dateproto_toLocaleString, nil, "toLocaleString", nil, 0), true, false, true) + o._putProp("toLocaleDateString", r.newNativeFunc(r.dateproto_toLocaleDateString, nil, "toLocaleDateString", nil, 0), true, false, true) + o._putProp("toLocaleTimeString", r.newNativeFunc(r.dateproto_toLocaleTimeString, nil, "toLocaleTimeString", nil, 0), true, false, true) + o._putProp("valueOf", r.newNativeFunc(r.dateproto_valueOf, nil, "valueOf", nil, 0), true, false, true) + o._putProp("getTime", r.newNativeFunc(r.dateproto_getTime, nil, "getTime", nil, 0), true, false, true) + o._putProp("getFullYear", r.newNativeFunc(r.dateproto_getFullYear, nil, "getFullYear", nil, 0), true, false, true) + o._putProp("getUTCFullYear", r.newNativeFunc(r.dateproto_getUTCFullYear, nil, "getUTCFullYear", nil, 0), true, false, true) + o._putProp("getMonth", r.newNativeFunc(r.dateproto_getMonth, nil, "getMonth", nil, 0), true, false, true) + o._putProp("getUTCMonth", r.newNativeFunc(r.dateproto_getUTCMonth, nil, "getUTCMonth", nil, 0), true, false, true) + o._putProp("getDate", r.newNativeFunc(r.dateproto_getDate, nil, "getDate", nil, 0), true, false, true) + o._putProp("getUTCDate", r.newNativeFunc(r.dateproto_getUTCDate, nil, "getUTCDate", nil, 0), true, false, true) + o._putProp("getDay", r.newNativeFunc(r.dateproto_getDay, nil, "getDay", nil, 0), true, false, true) + o._putProp("getUTCDay", r.newNativeFunc(r.dateproto_getUTCDay, nil, "getUTCDay", nil, 0), true, false, true) + o._putProp("getHours", r.newNativeFunc(r.dateproto_getHours, nil, "getHours", nil, 0), true, false, true) + o._putProp("getUTCHours", r.newNativeFunc(r.dateproto_getUTCHours, nil, "getUTCHours", nil, 0), true, false, true) + o._putProp("getMinutes", r.newNativeFunc(r.dateproto_getMinutes, nil, "getMinutes", nil, 0), true, false, true) + o._putProp("getUTCMinutes", r.newNativeFunc(r.dateproto_getUTCMinutes, nil, "getUTCMinutes", nil, 0), true, false, true) + o._putProp("getSeconds", r.newNativeFunc(r.dateproto_getSeconds, nil, "getSeconds", nil, 0), true, false, true) + o._putProp("getUTCSeconds", r.newNativeFunc(r.dateproto_getUTCSeconds, nil, "getUTCSeconds", nil, 0), true, false, true) + o._putProp("getMilliseconds", r.newNativeFunc(r.dateproto_getMilliseconds, nil, "getMilliseconds", nil, 0), true, false, true) + o._putProp("getUTCMilliseconds", r.newNativeFunc(r.dateproto_getUTCMilliseconds, nil, "getUTCMilliseconds", nil, 0), true, false, true) + o._putProp("getTimezoneOffset", r.newNativeFunc(r.dateproto_getTimezoneOffset, nil, "getTimezoneOffset", nil, 0), true, false, true) + o._putProp("setTime", r.newNativeFunc(r.dateproto_setTime, nil, "setTime", nil, 1), true, false, true) + o._putProp("setMilliseconds", r.newNativeFunc(r.dateproto_setMilliseconds, nil, "setMilliseconds", nil, 1), true, false, true) + o._putProp("setUTCMilliseconds", r.newNativeFunc(r.dateproto_setUTCMilliseconds, nil, "setUTCMilliseconds", nil, 1), true, false, true) + o._putProp("setSeconds", r.newNativeFunc(r.dateproto_setSeconds, nil, "setSeconds", nil, 2), true, false, true) + o._putProp("setUTCSeconds", r.newNativeFunc(r.dateproto_setUTCSeconds, nil, "setUTCSeconds", nil, 2), true, false, true) + o._putProp("setMinutes", r.newNativeFunc(r.dateproto_setMinutes, nil, "setMinutes", nil, 3), true, false, true) + o._putProp("setUTCMinutes", r.newNativeFunc(r.dateproto_setUTCMinutes, nil, "setUTCMinutes", nil, 3), true, false, true) + o._putProp("setHours", r.newNativeFunc(r.dateproto_setHours, nil, "setHours", nil, 4), true, false, true) + o._putProp("setUTCHours", r.newNativeFunc(r.dateproto_setUTCHours, nil, "setUTCHours", nil, 4), true, false, true) + o._putProp("setDate", r.newNativeFunc(r.dateproto_setDate, nil, "setDate", nil, 1), true, false, true) + o._putProp("setUTCDate", r.newNativeFunc(r.dateproto_setUTCDate, nil, "setUTCDate", nil, 1), true, false, true) + o._putProp("setMonth", r.newNativeFunc(r.dateproto_setMonth, nil, "setMonth", nil, 2), true, false, true) + o._putProp("setUTCMonth", r.newNativeFunc(r.dateproto_setUTCMonth, nil, "setUTCMonth", nil, 2), true, false, true) + o._putProp("setFullYear", r.newNativeFunc(r.dateproto_setFullYear, nil, "setFullYear", nil, 3), true, false, true) + o._putProp("setUTCFullYear", r.newNativeFunc(r.dateproto_setUTCFullYear, nil, "setUTCFullYear", nil, 3), true, false, true) + o._putProp("toUTCString", r.newNativeFunc(r.dateproto_toUTCString, nil, "toUTCString", nil, 0), true, false, true) + o._putProp("toISOString", r.newNativeFunc(r.dateproto_toISOString, nil, "toISOString", nil, 0), true, false, true) + o._putProp("toJSON", r.newNativeFunc(r.dateproto_toJSON, nil, "toJSON", nil, 1), true, false, true) + + o._putSym(SymToPrimitive, valueProp(r.newNativeFunc(r.dateproto_toPrimitive, nil, "[Symbol.toPrimitive]", nil, 1), false, false, true)) + + return o +} + +func (r *Runtime) createDate(val *Object) objectImpl { + o := r.newNativeFuncObj(val, r.builtin_date, r.builtin_newDate, "Date", r.global.DatePrototype, 7) + + o._putProp("parse", r.newNativeFunc(r.date_parse, nil, "parse", nil, 1), true, false, true) + o._putProp("UTC", r.newNativeFunc(r.date_UTC, nil, "UTC", nil, 7), true, false, true) + o._putProp("now", r.newNativeFunc(r.date_now, nil, "now", nil, 0), true, false, true) + + return o +} + +func (r *Runtime) initDate() { + r.global.DatePrototype = r.newLazyObject(r.createDateProto) + + r.global.Date = r.newLazyObject(r.createDate) + r.addToGlobal("Date", r.global.Date) +} diff --git a/vendor/github.com/dop251/goja/builtin_error.go b/vendor/github.com/dop251/goja/builtin_error.go new file mode 100644 index 0000000000..5880b88dde --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_error.go @@ -0,0 +1,56 @@ +package goja + +func (r *Runtime) createErrorPrototype(name valueString) *Object { + o := r.newBaseObject(r.global.ErrorPrototype, classObject) + o._putProp("message", stringEmpty, true, false, true) + o._putProp("name", name, true, false, true) + return o.val +} + +func (r *Runtime) initErrors() { + r.global.ErrorPrototype = r.NewObject() + o := r.global.ErrorPrototype.self + o._putProp("message", stringEmpty, true, false, true) + o._putProp("name", stringError, true, false, true) + o._putProp("toString", r.newNativeFunc(r.error_toString, nil, "toString", nil, 0), true, false, true) + + r.global.Error = r.newNativeFuncConstruct(r.builtin_Error, "Error", r.global.ErrorPrototype, 1) + r.addToGlobal("Error", r.global.Error) + + r.global.TypeErrorPrototype = r.createErrorPrototype(stringTypeError) + + r.global.TypeError = r.newNativeFuncConstructProto(r.builtin_Error, "TypeError", r.global.TypeErrorPrototype, r.global.Error, 1) + r.addToGlobal("TypeError", r.global.TypeError) + + r.global.ReferenceErrorPrototype = r.createErrorPrototype(stringReferenceError) + + r.global.ReferenceError = r.newNativeFuncConstructProto(r.builtin_Error, "ReferenceError", r.global.ReferenceErrorPrototype, r.global.Error, 1) + r.addToGlobal("ReferenceError", r.global.ReferenceError) + + r.global.SyntaxErrorPrototype = r.createErrorPrototype(stringSyntaxError) + + r.global.SyntaxError = r.newNativeFuncConstructProto(r.builtin_Error, "SyntaxError", r.global.SyntaxErrorPrototype, r.global.Error, 1) + r.addToGlobal("SyntaxError", r.global.SyntaxError) + + r.global.RangeErrorPrototype = r.createErrorPrototype(stringRangeError) + + r.global.RangeError = r.newNativeFuncConstructProto(r.builtin_Error, "RangeError", r.global.RangeErrorPrototype, r.global.Error, 1) + r.addToGlobal("RangeError", r.global.RangeError) + + r.global.EvalErrorPrototype = r.createErrorPrototype(stringEvalError) + o = r.global.EvalErrorPrototype.self + o._putProp("name", stringEvalError, true, false, true) + + r.global.EvalError = r.newNativeFuncConstructProto(r.builtin_Error, "EvalError", r.global.EvalErrorPrototype, r.global.Error, 1) + r.addToGlobal("EvalError", r.global.EvalError) + + r.global.URIErrorPrototype = r.createErrorPrototype(stringURIError) + + r.global.URIError = r.newNativeFuncConstructProto(r.builtin_Error, "URIError", r.global.URIErrorPrototype, r.global.Error, 1) + r.addToGlobal("URIError", r.global.URIError) + + r.global.GoErrorPrototype = r.createErrorPrototype(stringGoError) + + r.global.GoError = r.newNativeFuncConstructProto(r.builtin_Error, "GoError", r.global.GoErrorPrototype, r.global.Error, 1) + r.addToGlobal("GoError", r.global.GoError) +} diff --git a/vendor/github.com/dop251/goja/builtin_function.go b/vendor/github.com/dop251/goja/builtin_function.go new file mode 100644 index 0000000000..968993d94d --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_function.go @@ -0,0 +1,195 @@ +package goja + +import ( + "fmt" +) + +func (r *Runtime) builtin_Function(args []Value, proto *Object) *Object { + var sb valueStringBuilder + sb.WriteString(asciiString("(function anonymous(")) + if len(args) > 1 { + ar := args[:len(args)-1] + for i, arg := range ar { + sb.WriteString(arg.toString()) + if i < len(ar)-1 { + sb.WriteRune(',') + } + } + } + sb.WriteString(asciiString("){")) + if len(args) > 0 { + sb.WriteString(args[len(args)-1].toString()) + } + sb.WriteString(asciiString("})")) + + ret := r.toObject(r.eval(sb.String(), false, false, _undefined)) + ret.self.setProto(proto, true) + return ret +} + +func (r *Runtime) functionproto_toString(call FunctionCall) Value { + obj := r.toObject(call.This) +repeat: + switch f := obj.self.(type) { + case *funcObject: + return newStringValue(f.src) + case *nativeFuncObject: + return newStringValue(fmt.Sprintf("function %s() { [native code] }", nilSafe(f.getStr("name", nil)).toString())) + case *boundFuncObject: + return newStringValue(fmt.Sprintf("function %s() { [native code] }", nilSafe(f.getStr("name", nil)).toString())) + case *lazyObject: + obj.self = f.create(obj) + goto repeat + case *proxyObject: + var name string + repeat2: + switch c := f.target.self.(type) { + case *funcObject: + name = c.src + case *nativeFuncObject: + name = nilSafe(f.getStr("name", nil)).toString().String() + case *boundFuncObject: + name = nilSafe(f.getStr("name", nil)).toString().String() + case *lazyObject: + f.target.self = c.create(obj) + goto repeat2 + default: + name = f.target.String() + } + return newStringValue(fmt.Sprintf("function proxy() { [%s] }", name)) + } + + r.typeErrorResult(true, "Object is not a function") + return nil +} + +func (r *Runtime) functionproto_hasInstance(call FunctionCall) Value { + if o, ok := call.This.(*Object); ok { + if _, ok = o.self.assertCallable(); ok { + return r.toBoolean(o.self.hasInstance(call.Argument(0))) + } + } + + return valueFalse +} + +func (r *Runtime) createListFromArrayLike(a Value) []Value { + o := r.toObject(a) + if arr := r.checkStdArrayObj(o); arr != nil { + return arr.values + } + l := toLength(o.self.getStr("length", nil)) + res := make([]Value, 0, l) + for k := int64(0); k < l; k++ { + res = append(res, o.self.getIdx(valueInt(k), nil)) + } + return res +} + +func (r *Runtime) functionproto_apply(call FunctionCall) Value { + var args []Value + if len(call.Arguments) >= 2 { + args = r.createListFromArrayLike(call.Arguments[1]) + } + + f := r.toCallable(call.This) + return f(FunctionCall{ + This: call.Argument(0), + Arguments: args, + }) +} + +func (r *Runtime) functionproto_call(call FunctionCall) Value { + var args []Value + if len(call.Arguments) > 0 { + args = call.Arguments[1:] + } + + f := r.toCallable(call.This) + return f(FunctionCall{ + This: call.Argument(0), + Arguments: args, + }) +} + +func (r *Runtime) boundCallable(target func(FunctionCall) Value, boundArgs []Value) func(FunctionCall) Value { + var this Value + var args []Value + if len(boundArgs) > 0 { + this = boundArgs[0] + args = make([]Value, len(boundArgs)-1) + copy(args, boundArgs[1:]) + } else { + this = _undefined + } + return func(call FunctionCall) Value { + a := append(args, call.Arguments...) + return target(FunctionCall{ + This: this, + Arguments: a, + }) + } +} + +func (r *Runtime) boundConstruct(target func([]Value, *Object) *Object, boundArgs []Value) func([]Value, *Object) *Object { + if target == nil { + return nil + } + var args []Value + if len(boundArgs) > 1 { + args = make([]Value, len(boundArgs)-1) + copy(args, boundArgs[1:]) + } + return func(fargs []Value, newTarget *Object) *Object { + a := append(args, fargs...) + copy(a, args) + return target(a, newTarget) + } +} + +func (r *Runtime) functionproto_bind(call FunctionCall) Value { + obj := r.toObject(call.This) + + fcall := r.toCallable(call.This) + construct := obj.self.assertConstructor() + + l := int(toUint32(obj.self.getStr("length", nil))) + l -= len(call.Arguments) - 1 + if l < 0 { + l = 0 + } + + name := obj.self.getStr("name", nil) + nameStr := stringBound_ + if s, ok := name.(valueString); ok { + nameStr = nameStr.concat(s) + } + + v := &Object{runtime: r} + + ff := r.newNativeFuncObj(v, r.boundCallable(fcall, call.Arguments), r.boundConstruct(construct, call.Arguments), nameStr.string(), nil, l) + v.self = &boundFuncObject{ + nativeFuncObject: *ff, + wrapped: obj, + } + + //ret := r.newNativeFunc(r.boundCallable(f, call.Arguments), nil, "", nil, l) + //o := ret.self + //o.putStr("caller", r.global.throwerProperty, false) + //o.putStr("arguments", r.global.throwerProperty, false) + return v +} + +func (r *Runtime) initFunction() { + o := r.global.FunctionPrototype.self.(*nativeFuncObject) + o.prototype = r.global.ObjectPrototype + o._putProp("name", stringEmpty, false, false, true) + o._putProp("apply", r.newNativeFunc(r.functionproto_apply, nil, "apply", nil, 2), true, false, true) + o._putProp("bind", r.newNativeFunc(r.functionproto_bind, nil, "bind", nil, 1), true, false, true) + o._putProp("call", r.newNativeFunc(r.functionproto_call, nil, "call", nil, 1), true, false, true) + o._putProp("toString", r.newNativeFunc(r.functionproto_toString, nil, "toString", nil, 0), true, false, true) + o._putSym(SymHasInstance, valueProp(r.newNativeFunc(r.functionproto_hasInstance, nil, "[Symbol.hasInstance]", nil, 1), false, false, false)) + + r.global.Function = r.newNativeFuncConstruct(r.builtin_Function, "Function", r.global.FunctionPrototype, 1) + r.addToGlobal("Function", r.global.Function) +} diff --git a/vendor/github.com/dop251/goja/builtin_global.go b/vendor/github.com/dop251/goja/builtin_global.go new file mode 100644 index 0000000000..2fb20400c1 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_global.go @@ -0,0 +1,516 @@ +package goja + +import ( + "errors" + "github.com/dop251/goja/unistring" + "io" + "math" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +const hexUpper = "0123456789ABCDEF" + +var ( + parseFloatRegexp = regexp.MustCompile(`^([+-]?(?:Infinity|[0-9]*\.?[0-9]*(?:[eE][+-]?[0-9]+)?))`) +) + +func (r *Runtime) builtin_isNaN(call FunctionCall) Value { + if math.IsNaN(call.Argument(0).ToFloat()) { + return valueTrue + } else { + return valueFalse + } +} + +func (r *Runtime) builtin_parseInt(call FunctionCall) Value { + str := call.Argument(0).toString().toTrimmedUTF8() + radix := int(toInt32(call.Argument(1))) + v, _ := parseInt(str, radix) + return v +} + +func (r *Runtime) builtin_parseFloat(call FunctionCall) Value { + m := parseFloatRegexp.FindStringSubmatch(call.Argument(0).toString().toTrimmedUTF8()) + if len(m) == 2 { + if s := m[1]; s != "" && s != "+" && s != "-" { + switch s { + case "+", "-": + case "Infinity", "+Infinity": + return _positiveInf + case "-Infinity": + return _negativeInf + default: + f, err := strconv.ParseFloat(s, 64) + if err == nil || isRangeErr(err) { + return floatToValue(f) + } + } + } + } + return _NaN +} + +func (r *Runtime) builtin_isFinite(call FunctionCall) Value { + f := call.Argument(0).ToFloat() + if math.IsNaN(f) || math.IsInf(f, 0) { + return valueFalse + } + return valueTrue +} + +func (r *Runtime) _encode(uriString valueString, unescaped *[256]bool) valueString { + reader := uriString.reader(0) + utf8Buf := make([]byte, utf8.UTFMax) + needed := false + l := 0 + for { + rn, _, err := reader.ReadRune() + if err != nil { + if err != io.EOF { + panic(r.newError(r.global.URIError, "Malformed URI")) + } + break + } + + if rn >= utf8.RuneSelf { + needed = true + l += utf8.EncodeRune(utf8Buf, rn) * 3 + } else if !unescaped[rn] { + needed = true + l += 3 + } else { + l++ + } + } + + if !needed { + return uriString + } + + buf := make([]byte, l) + i := 0 + reader = uriString.reader(0) + for { + rn, _, err := reader.ReadRune() + if err == io.EOF { + break + } + + if rn >= utf8.RuneSelf { + n := utf8.EncodeRune(utf8Buf, rn) + for _, b := range utf8Buf[:n] { + buf[i] = '%' + buf[i+1] = hexUpper[b>>4] + buf[i+2] = hexUpper[b&15] + i += 3 + } + } else if !unescaped[rn] { + buf[i] = '%' + buf[i+1] = hexUpper[rn>>4] + buf[i+2] = hexUpper[rn&15] + i += 3 + } else { + buf[i] = byte(rn) + i++ + } + } + return asciiString(buf) +} + +func (r *Runtime) _decode(sv valueString, reservedSet *[256]bool) valueString { + s := sv.String() + hexCount := 0 + for i := 0; i < len(s); { + switch s[i] { + case '%': + if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) { + panic(r.newError(r.global.URIError, "Malformed URI")) + } + c := unhex(s[i+1])<<4 | unhex(s[i+2]) + if !reservedSet[c] { + hexCount++ + } + i += 3 + default: + i++ + } + } + + if hexCount == 0 { + return sv + } + + t := make([]byte, len(s)-hexCount*2) + j := 0 + isUnicode := false + for i := 0; i < len(s); { + ch := s[i] + switch ch { + case '%': + c := unhex(s[i+1])<<4 | unhex(s[i+2]) + if reservedSet[c] { + t[j] = s[i] + t[j+1] = s[i+1] + t[j+2] = s[i+2] + j += 3 + } else { + t[j] = c + if c >= utf8.RuneSelf { + isUnicode = true + } + j++ + } + i += 3 + default: + if ch >= utf8.RuneSelf { + isUnicode = true + } + t[j] = ch + j++ + i++ + } + } + + if !isUnicode { + return asciiString(t) + } + + us := make([]rune, 0, len(s)) + for len(t) > 0 { + rn, size := utf8.DecodeRune(t) + if rn == utf8.RuneError { + if size != 3 || t[0] != 0xef || t[1] != 0xbf || t[2] != 0xbd { + panic(r.newError(r.global.URIError, "Malformed URI")) + } + } + us = append(us, rn) + t = t[size:] + } + return unicodeStringFromRunes(us) +} + +func ishex(c byte) bool { + switch { + case '0' <= c && c <= '9': + return true + case 'a' <= c && c <= 'f': + return true + case 'A' <= c && c <= 'F': + return true + } + return false +} + +func unhex(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 0 +} + +func (r *Runtime) builtin_decodeURI(call FunctionCall) Value { + uriString := call.Argument(0).toString() + return r._decode(uriString, &uriReservedHash) +} + +func (r *Runtime) builtin_decodeURIComponent(call FunctionCall) Value { + uriString := call.Argument(0).toString() + return r._decode(uriString, &emptyEscapeSet) +} + +func (r *Runtime) builtin_encodeURI(call FunctionCall) Value { + uriString := call.Argument(0).toString() + return r._encode(uriString, &uriReservedUnescapedHash) +} + +func (r *Runtime) builtin_encodeURIComponent(call FunctionCall) Value { + uriString := call.Argument(0).toString() + return r._encode(uriString, &uriUnescaped) +} + +func (r *Runtime) builtin_escape(call FunctionCall) Value { + s := call.Argument(0).toString() + var sb strings.Builder + l := s.length() + for i := 0; i < l; i++ { + r := uint16(s.charAt(i)) + if r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' || r >= '0' && r <= '9' || + r == '@' || r == '*' || r == '_' || r == '+' || r == '-' || r == '.' || r == '/' { + sb.WriteByte(byte(r)) + } else if r <= 0xff { + sb.WriteByte('%') + sb.WriteByte(hexUpper[r>>4]) + sb.WriteByte(hexUpper[r&0xf]) + } else { + sb.WriteString("%u") + sb.WriteByte(hexUpper[r>>12]) + sb.WriteByte(hexUpper[(r>>8)&0xf]) + sb.WriteByte(hexUpper[(r>>4)&0xf]) + sb.WriteByte(hexUpper[r&0xf]) + } + } + return asciiString(sb.String()) +} + +func (r *Runtime) builtin_unescape(call FunctionCall) Value { + s := call.Argument(0).toString() + l := s.length() + _, unicode := s.(unicodeString) + var asciiBuf []byte + var unicodeBuf []uint16 + if unicode { + unicodeBuf = make([]uint16, 1, l+1) + unicodeBuf[0] = unistring.BOM + } else { + asciiBuf = make([]byte, 0, l) + } + for i := 0; i < l; { + r := s.charAt(i) + if r == '%' { + if i <= l-6 && s.charAt(i+1) == 'u' { + c0 := s.charAt(i + 2) + c1 := s.charAt(i + 3) + c2 := s.charAt(i + 4) + c3 := s.charAt(i + 5) + if c0 <= 0xff && ishex(byte(c0)) && + c1 <= 0xff && ishex(byte(c1)) && + c2 <= 0xff && ishex(byte(c2)) && + c3 <= 0xff && ishex(byte(c3)) { + r = rune(unhex(byte(c0)))<<12 | + rune(unhex(byte(c1)))<<8 | + rune(unhex(byte(c2)))<<4 | + rune(unhex(byte(c3))) + i += 5 + goto out + } + } + if i <= l-3 { + c0 := s.charAt(i + 1) + c1 := s.charAt(i + 2) + if c0 <= 0xff && ishex(byte(c0)) && + c1 <= 0xff && ishex(byte(c1)) { + r = rune(unhex(byte(c0))<<4 | unhex(byte(c1))) + i += 2 + } + } + } + out: + if r >= utf8.RuneSelf && !unicode { + unicodeBuf = make([]uint16, 1, l+1) + unicodeBuf[0] = unistring.BOM + for _, b := range asciiBuf { + unicodeBuf = append(unicodeBuf, uint16(b)) + } + asciiBuf = nil + unicode = true + } + if unicode { + unicodeBuf = append(unicodeBuf, uint16(r)) + } else { + asciiBuf = append(asciiBuf, byte(r)) + } + i++ + } + if unicode { + return unicodeString(unicodeBuf) + } + + return asciiString(asciiBuf) +} + +func (r *Runtime) initGlobalObject() { + o := r.globalObject.self + o._putProp("globalThis", r.globalObject, true, false, true) + o._putProp("NaN", _NaN, false, false, false) + o._putProp("undefined", _undefined, false, false, false) + o._putProp("Infinity", _positiveInf, false, false, false) + + o._putProp("isNaN", r.newNativeFunc(r.builtin_isNaN, nil, "isNaN", nil, 1), true, false, true) + o._putProp("parseInt", r.newNativeFunc(r.builtin_parseInt, nil, "parseInt", nil, 2), true, false, true) + o._putProp("parseFloat", r.newNativeFunc(r.builtin_parseFloat, nil, "parseFloat", nil, 1), true, false, true) + o._putProp("isFinite", r.newNativeFunc(r.builtin_isFinite, nil, "isFinite", nil, 1), true, false, true) + o._putProp("decodeURI", r.newNativeFunc(r.builtin_decodeURI, nil, "decodeURI", nil, 1), true, false, true) + o._putProp("decodeURIComponent", r.newNativeFunc(r.builtin_decodeURIComponent, nil, "decodeURIComponent", nil, 1), true, false, true) + o._putProp("encodeURI", r.newNativeFunc(r.builtin_encodeURI, nil, "encodeURI", nil, 1), true, false, true) + o._putProp("encodeURIComponent", r.newNativeFunc(r.builtin_encodeURIComponent, nil, "encodeURIComponent", nil, 1), true, false, true) + o._putProp("escape", r.newNativeFunc(r.builtin_escape, nil, "escape", nil, 1), true, false, true) + o._putProp("unescape", r.newNativeFunc(r.builtin_unescape, nil, "unescape", nil, 1), true, false, true) + + o._putSym(SymToStringTag, valueProp(asciiString(classGlobal), false, false, true)) + + // TODO: Annex B + +} + +func digitVal(d byte) int { + var v byte + switch { + case '0' <= d && d <= '9': + v = d - '0' + case 'a' <= d && d <= 'z': + v = d - 'a' + 10 + case 'A' <= d && d <= 'Z': + v = d - 'A' + 10 + default: + return 36 + } + return int(v) +} + +// ECMAScript compatible version of strconv.ParseInt +func parseInt(s string, base int) (Value, error) { + var n int64 + var err error + var cutoff, maxVal int64 + var sign bool + i := 0 + + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + + switch s[0] { + case '-': + sign = true + s = s[1:] + case '+': + s = s[1:] + } + + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + + // Look for hex prefix. + if s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X') { + if base == 0 || base == 16 { + base = 16 + s = s[2:] + } + } + + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + if len(s) < 3 { + err = strconv.ErrSyntax + goto Error + } + base = 16 + s = s[2:] + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + // Cutoff is the smallest number such that cutoff*base > maxInt64. + // Use compile-time constants for common cases. + switch base { + case 10: + cutoff = math.MaxInt64/10 + 1 + case 16: + cutoff = math.MaxInt64/16 + 1 + default: + cutoff = math.MaxInt64/int64(base) + 1 + } + + maxVal = math.MaxInt64 + for ; i < len(s); i++ { + if n >= cutoff { + // n*base overflows + return parseLargeInt(float64(n), s[i:], base, sign) + } + v := digitVal(s[i]) + if v >= base { + break + } + n *= int64(base) + + n1 := n + int64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + return parseLargeInt(float64(n)+float64(v), s[i+1:], base, sign) + } + n = n1 + } + + if i == 0 { + err = strconv.ErrSyntax + goto Error + } + + if sign { + n = -n + } + return intToValue(n), nil + +Error: + return _NaN, err +} + +func parseLargeInt(n float64, s string, base int, sign bool) (Value, error) { + i := 0 + b := float64(base) + for ; i < len(s); i++ { + v := digitVal(s[i]) + if v >= base { + break + } + n = n*b + float64(v) + } + if sign { + n = -n + } + // We know it can't be represented as int, so use valueFloat instead of floatToValue + return valueFloat(n), nil +} + +var ( + uriUnescaped [256]bool + uriReserved [256]bool + uriReservedHash [256]bool + uriReservedUnescapedHash [256]bool + emptyEscapeSet [256]bool +) + +func init() { + for _, c := range "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_.!~*'()" { + uriUnescaped[c] = true + } + + for _, c := range ";/?:@&=+$," { + uriReserved[c] = true + } + + for i := 0; i < 256; i++ { + if uriUnescaped[i] || uriReserved[i] { + uriReservedUnescapedHash[i] = true + } + uriReservedHash[i] = uriReserved[i] + } + uriReservedUnescapedHash['#'] = true + uriReservedHash['#'] = true +} diff --git a/vendor/github.com/dop251/goja/builtin_json.go b/vendor/github.com/dop251/goja/builtin_json.go new file mode 100644 index 0000000000..dcdb6cc910 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_json.go @@ -0,0 +1,511 @@ +package goja + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "math" + "strings" + "unicode/utf16" + + "github.com/dop251/goja/unistring" +) + +const hex = "0123456789abcdef" + +func (r *Runtime) builtinJSON_parse(call FunctionCall) Value { + d := json.NewDecoder(bytes.NewBufferString(call.Argument(0).toString().String())) + + value, err := r.builtinJSON_decodeValue(d) + if err != nil { + panic(r.newError(r.global.SyntaxError, err.Error())) + } + + if tok, err := d.Token(); err != io.EOF { + panic(r.newError(r.global.SyntaxError, "Unexpected token at the end: %v", tok)) + } + + var reviver func(FunctionCall) Value + + if arg1 := call.Argument(1); arg1 != _undefined { + reviver, _ = arg1.ToObject(r).self.assertCallable() + } + + if reviver != nil { + root := r.NewObject() + root.self.setOwnStr("", value, false) + return r.builtinJSON_reviveWalk(reviver, root, stringEmpty) + } + + return value +} + +func (r *Runtime) builtinJSON_decodeToken(d *json.Decoder, tok json.Token) (Value, error) { + switch tok := tok.(type) { + case json.Delim: + switch tok { + case '{': + return r.builtinJSON_decodeObject(d) + case '[': + return r.builtinJSON_decodeArray(d) + } + case nil: + return _null, nil + case string: + return newStringValue(tok), nil + case float64: + return floatToValue(tok), nil + case bool: + if tok { + return valueTrue, nil + } + return valueFalse, nil + } + return nil, fmt.Errorf("Unexpected token (%T): %v", tok, tok) +} + +func (r *Runtime) builtinJSON_decodeValue(d *json.Decoder) (Value, error) { + tok, err := d.Token() + if err != nil { + return nil, err + } + return r.builtinJSON_decodeToken(d, tok) +} + +func (r *Runtime) builtinJSON_decodeObject(d *json.Decoder) (*Object, error) { + object := r.NewObject() + for { + key, end, err := r.builtinJSON_decodeObjectKey(d) + if err != nil { + return nil, err + } + if end { + break + } + value, err := r.builtinJSON_decodeValue(d) + if err != nil { + return nil, err + } + + object.self._putProp(unistring.NewFromString(key), value, true, true, true) + } + return object, nil +} + +func (r *Runtime) builtinJSON_decodeObjectKey(d *json.Decoder) (string, bool, error) { + tok, err := d.Token() + if err != nil { + return "", false, err + } + switch tok := tok.(type) { + case json.Delim: + if tok == '}' { + return "", true, nil + } + case string: + return tok, false, nil + } + + return "", false, fmt.Errorf("Unexpected token (%T): %v", tok, tok) +} + +func (r *Runtime) builtinJSON_decodeArray(d *json.Decoder) (*Object, error) { + var arrayValue []Value + for { + tok, err := d.Token() + if err != nil { + return nil, err + } + if delim, ok := tok.(json.Delim); ok { + if delim == ']' { + break + } + } + value, err := r.builtinJSON_decodeToken(d, tok) + if err != nil { + return nil, err + } + arrayValue = append(arrayValue, value) + } + return r.newArrayValues(arrayValue), nil +} + +func (r *Runtime) builtinJSON_reviveWalk(reviver func(FunctionCall) Value, holder *Object, name Value) Value { + value := holder.get(name, nil) + if value == nil { + value = _undefined + } + + if object, ok := value.(*Object); ok { + if isArray(object) { + length := object.self.getStr("length", nil).ToInteger() + for index := int64(0); index < length; index++ { + name := intToValue(index) + value := r.builtinJSON_reviveWalk(reviver, object, name) + if value == _undefined { + object.delete(name, false) + } else { + object.setOwn(name, value, false) + } + } + } else { + iter := &enumerableIter{ + wrapped: object.self.enumerateOwnKeys(), + } + for item, next := iter.next(); next != nil; item, next = next() { + value := r.builtinJSON_reviveWalk(reviver, object, stringValueFromRaw(item.name)) + if value == _undefined { + object.self.deleteStr(item.name, false) + } else { + object.self.setOwnStr(item.name, value, false) + } + } + } + } + return reviver(FunctionCall{ + This: holder, + Arguments: []Value{name, value}, + }) +} + +type _builtinJSON_stringifyContext struct { + r *Runtime + stack []*Object + propertyList []Value + replacerFunction func(FunctionCall) Value + gap, indent string + buf bytes.Buffer +} + +func (r *Runtime) builtinJSON_stringify(call FunctionCall) Value { + ctx := _builtinJSON_stringifyContext{ + r: r, + } + + replacer, _ := call.Argument(1).(*Object) + if replacer != nil { + if isArray(replacer) { + length := replacer.self.getStr("length", nil).ToInteger() + seen := map[string]bool{} + propertyList := make([]Value, length) + length = 0 + for index := range propertyList { + var name string + value := replacer.self.getIdx(valueInt(int64(index)), nil) + switch v := value.(type) { + case valueFloat, valueInt, valueString: + name = value.String() + case *Object: + switch v.self.className() { + case classNumber, classString: + name = value.String() + } + } + if seen[name] { + continue + } + seen[name] = true + length += 1 + propertyList[index] = newStringValue(name) + } + ctx.propertyList = propertyList[0:length] + } else if c, ok := replacer.self.assertCallable(); ok { + ctx.replacerFunction = c + } + } + if spaceValue := call.Argument(2); spaceValue != _undefined { + if o, ok := spaceValue.(*Object); ok { + switch o := o.self.(type) { + case *primitiveValueObject: + spaceValue = o.pValue + case *stringObject: + spaceValue = o.value + } + } + isNum := false + var num int64 + if i, ok := spaceValue.(valueInt); ok { + num = int64(i) + isNum = true + } else if f, ok := spaceValue.(valueFloat); ok { + num = int64(f) + isNum = true + } + if isNum { + if num > 0 { + if num > 10 { + num = 10 + } + ctx.gap = strings.Repeat(" ", int(num)) + } + } else { + if s, ok := spaceValue.(valueString); ok { + str := s.String() + if len(str) > 10 { + ctx.gap = str[:10] + } else { + ctx.gap = str + } + } + } + } + + if ctx.do(call.Argument(0)) { + return newStringValue(ctx.buf.String()) + } + return _undefined +} + +func (ctx *_builtinJSON_stringifyContext) do(v Value) bool { + holder := ctx.r.NewObject() + holder.self.setOwnStr("", v, false) + return ctx.str(stringEmpty, holder) +} + +func (ctx *_builtinJSON_stringifyContext) str(key Value, holder *Object) bool { + value := holder.get(key, nil) + if value == nil { + value = _undefined + } + + if object, ok := value.(*Object); ok { + if toJSON, ok := object.self.getStr("toJSON", nil).(*Object); ok { + if c, ok := toJSON.self.assertCallable(); ok { + value = c(FunctionCall{ + This: value, + Arguments: []Value{key}, + }) + } + } + } + + if ctx.replacerFunction != nil { + value = ctx.replacerFunction(FunctionCall{ + This: holder, + Arguments: []Value{key, value}, + }) + } + + if o, ok := value.(*Object); ok { + switch o1 := o.self.(type) { + case *primitiveValueObject: + value = o1.pValue + case *stringObject: + value = o1.value + case *objectGoReflect: + if o1.toJson != nil { + value = ctx.r.ToValue(o1.toJson()) + } else if v, ok := o1.origValue.Interface().(json.Marshaler); ok { + b, err := v.MarshalJSON() + if err != nil { + panic(err) + } + ctx.buf.Write(b) + return true + } else { + switch o1.className() { + case classNumber: + value = o1.toPrimitiveNumber() + case classString: + value = o1.toPrimitiveString() + case classBoolean: + if o.ToInteger() != 0 { + value = valueTrue + } else { + value = valueFalse + } + } + } + } + } + + switch value1 := value.(type) { + case valueBool: + if value1 { + ctx.buf.WriteString("true") + } else { + ctx.buf.WriteString("false") + } + case valueString: + ctx.quote(value1) + case valueInt: + ctx.buf.WriteString(value.String()) + case valueFloat: + if !math.IsNaN(float64(value1)) && !math.IsInf(float64(value1), 0) { + ctx.buf.WriteString(value.String()) + } else { + ctx.buf.WriteString("null") + } + case valueNull: + ctx.buf.WriteString("null") + case *Object: + for _, object := range ctx.stack { + if value1 == object { + ctx.r.typeErrorResult(true, "Converting circular structure to JSON") + } + } + ctx.stack = append(ctx.stack, value1) + defer func() { ctx.stack = ctx.stack[:len(ctx.stack)-1] }() + if _, ok := value1.self.assertCallable(); !ok { + if isArray(value1) { + ctx.ja(value1) + } else { + ctx.jo(value1) + } + } else { + return false + } + default: + return false + } + return true +} + +func (ctx *_builtinJSON_stringifyContext) ja(array *Object) { + var stepback string + if ctx.gap != "" { + stepback = ctx.indent + ctx.indent += ctx.gap + } + length := array.self.getStr("length", nil).ToInteger() + if length == 0 { + ctx.buf.WriteString("[]") + return + } + + ctx.buf.WriteByte('[') + var separator string + if ctx.gap != "" { + ctx.buf.WriteByte('\n') + ctx.buf.WriteString(ctx.indent) + separator = ",\n" + ctx.indent + } else { + separator = "," + } + + for i := int64(0); i < length; i++ { + if !ctx.str(intToValue(i), array) { + ctx.buf.WriteString("null") + } + if i < length-1 { + ctx.buf.WriteString(separator) + } + } + if ctx.gap != "" { + ctx.buf.WriteByte('\n') + ctx.buf.WriteString(stepback) + ctx.indent = stepback + } + ctx.buf.WriteByte(']') +} + +func (ctx *_builtinJSON_stringifyContext) jo(object *Object) { + var stepback string + if ctx.gap != "" { + stepback = ctx.indent + ctx.indent += ctx.gap + } + + ctx.buf.WriteByte('{') + mark := ctx.buf.Len() + var separator string + if ctx.gap != "" { + ctx.buf.WriteByte('\n') + ctx.buf.WriteString(ctx.indent) + separator = ",\n" + ctx.indent + } else { + separator = "," + } + + var props []Value + if ctx.propertyList == nil { + props = object.self.ownKeys(false, nil) + } else { + props = ctx.propertyList + } + + empty := true + for _, name := range props { + off := ctx.buf.Len() + if !empty { + ctx.buf.WriteString(separator) + } + ctx.quote(name.toString()) + if ctx.gap != "" { + ctx.buf.WriteString(": ") + } else { + ctx.buf.WriteByte(':') + } + if ctx.str(name, object) { + if empty { + empty = false + } + } else { + ctx.buf.Truncate(off) + } + } + + if empty { + ctx.buf.Truncate(mark) + } else { + if ctx.gap != "" { + ctx.buf.WriteByte('\n') + ctx.buf.WriteString(stepback) + ctx.indent = stepback + } + } + ctx.buf.WriteByte('}') +} + +func (ctx *_builtinJSON_stringifyContext) quote(str valueString) { + ctx.buf.WriteByte('"') + reader := &lenientUtf16Decoder{utf16Reader: str.utf16Reader(0)} + for { + r, _, err := reader.ReadRune() + if err != nil { + break + } + switch r { + case '"', '\\': + ctx.buf.WriteByte('\\') + ctx.buf.WriteByte(byte(r)) + case 0x08: + ctx.buf.WriteString(`\b`) + case 0x09: + ctx.buf.WriteString(`\t`) + case 0x0A: + ctx.buf.WriteString(`\n`) + case 0x0C: + ctx.buf.WriteString(`\f`) + case 0x0D: + ctx.buf.WriteString(`\r`) + default: + if r < 0x20 { + ctx.buf.WriteString(`\u00`) + ctx.buf.WriteByte(hex[r>>4]) + ctx.buf.WriteByte(hex[r&0xF]) + } else { + if utf16.IsSurrogate(r) { + ctx.buf.WriteString(`\u`) + ctx.buf.WriteByte(hex[r>>12]) + ctx.buf.WriteByte(hex[(r>>8)&0xF]) + ctx.buf.WriteByte(hex[(r>>4)&0xF]) + ctx.buf.WriteByte(hex[r&0xF]) + } else { + ctx.buf.WriteRune(r) + } + } + } + } + ctx.buf.WriteByte('"') +} + +func (r *Runtime) initJSON() { + JSON := r.newBaseObject(r.global.ObjectPrototype, "JSON") + JSON._putProp("parse", r.newNativeFunc(r.builtinJSON_parse, nil, "parse", nil, 2), true, false, true) + JSON._putProp("stringify", r.newNativeFunc(r.builtinJSON_stringify, nil, "stringify", nil, 3), true, false, true) + JSON._putSym(SymToStringTag, valueProp(asciiString(classJSON), false, false, true)) + + r.addToGlobal("JSON", JSON.val) +} diff --git a/vendor/github.com/dop251/goja/builtin_map.go b/vendor/github.com/dop251/goja/builtin_map.go new file mode 100644 index 0000000000..097dbff6bc --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_map.go @@ -0,0 +1,271 @@ +package goja + +type mapObject struct { + baseObject + m *orderedMap +} + +type mapIterObject struct { + baseObject + iter *orderedMapIter + kind iterationKind +} + +func (o *mapIterObject) next() Value { + if o.iter == nil { + return o.val.runtime.createIterResultObject(_undefined, true) + } + + entry := o.iter.next() + if entry == nil { + o.iter = nil + return o.val.runtime.createIterResultObject(_undefined, true) + } + + var result Value + switch o.kind { + case iterationKindKey: + result = entry.key + case iterationKindValue: + result = entry.value + default: + result = o.val.runtime.newArrayValues([]Value{entry.key, entry.value}) + } + + return o.val.runtime.createIterResultObject(result, false) +} + +func (mo *mapObject) init() { + mo.baseObject.init() + mo.m = newOrderedMap(mo.val.runtime.getHash()) +} + +func (r *Runtime) mapProto_clear(call FunctionCall) Value { + thisObj := r.toObject(call.This) + mo, ok := thisObj.self.(*mapObject) + if !ok { + panic(r.NewTypeError("Method Map.prototype.clear called on incompatible receiver %s", thisObj.String())) + } + + mo.m.clear() + + return _undefined +} + +func (r *Runtime) mapProto_delete(call FunctionCall) Value { + thisObj := r.toObject(call.This) + mo, ok := thisObj.self.(*mapObject) + if !ok { + panic(r.NewTypeError("Method Map.prototype.delete called on incompatible receiver %s", thisObj.String())) + } + + return r.toBoolean(mo.m.remove(call.Argument(0))) +} + +func (r *Runtime) mapProto_get(call FunctionCall) Value { + thisObj := r.toObject(call.This) + mo, ok := thisObj.self.(*mapObject) + if !ok { + panic(r.NewTypeError("Method Map.prototype.get called on incompatible receiver %s", thisObj.String())) + } + + return nilSafe(mo.m.get(call.Argument(0))) +} + +func (r *Runtime) mapProto_has(call FunctionCall) Value { + thisObj := r.toObject(call.This) + mo, ok := thisObj.self.(*mapObject) + if !ok { + panic(r.NewTypeError("Method Map.prototype.has called on incompatible receiver %s", thisObj.String())) + } + if mo.m.has(call.Argument(0)) { + return valueTrue + } + return valueFalse +} + +func (r *Runtime) mapProto_set(call FunctionCall) Value { + thisObj := r.toObject(call.This) + mo, ok := thisObj.self.(*mapObject) + if !ok { + panic(r.NewTypeError("Method Map.prototype.set called on incompatible receiver %s", thisObj.String())) + } + mo.m.set(call.Argument(0), call.Argument(1)) + return call.This +} + +func (r *Runtime) mapProto_entries(call FunctionCall) Value { + return r.createMapIterator(call.This, iterationKindKeyValue) +} + +func (r *Runtime) mapProto_forEach(call FunctionCall) Value { + thisObj := r.toObject(call.This) + mo, ok := thisObj.self.(*mapObject) + if !ok { + panic(r.NewTypeError("Method Map.prototype.forEach called on incompatible receiver %s", thisObj.String())) + } + callbackFn, ok := r.toObject(call.Argument(0)).self.assertCallable() + if !ok { + panic(r.NewTypeError("object is not a function %s")) + } + t := call.Argument(1) + iter := mo.m.newIter() + for { + entry := iter.next() + if entry == nil { + break + } + callbackFn(FunctionCall{This: t, Arguments: []Value{entry.value, entry.key, thisObj}}) + } + + return _undefined +} + +func (r *Runtime) mapProto_keys(call FunctionCall) Value { + return r.createMapIterator(call.This, iterationKindKey) +} + +func (r *Runtime) mapProto_values(call FunctionCall) Value { + return r.createMapIterator(call.This, iterationKindValue) +} + +func (r *Runtime) mapProto_getSize(call FunctionCall) Value { + thisObj := r.toObject(call.This) + mo, ok := thisObj.self.(*mapObject) + if !ok { + panic(r.NewTypeError("Method get Map.prototype.size called on incompatible receiver %s", thisObj.String())) + } + return intToValue(int64(mo.m.size)) +} + +func (r *Runtime) builtin_newMap(args []Value, newTarget *Object) *Object { + if newTarget == nil { + panic(r.needNew("Map")) + } + proto := r.getPrototypeFromCtor(newTarget, r.global.Map, r.global.MapPrototype) + o := &Object{runtime: r} + + mo := &mapObject{} + mo.class = classMap + mo.val = o + mo.extensible = true + o.self = mo + mo.prototype = proto + mo.init() + if len(args) > 0 { + if arg := args[0]; arg != nil && arg != _undefined && arg != _null { + adder := mo.getStr("set", nil) + iter := r.getIterator(arg, nil) + i0 := valueInt(0) + i1 := valueInt(1) + if adder == r.global.mapAdder { + r.iterate(iter, func(item Value) { + itemObj := r.toObject(item) + k := nilSafe(itemObj.self.getIdx(i0, nil)) + v := nilSafe(itemObj.self.getIdx(i1, nil)) + mo.m.set(k, v) + }) + } else { + adderFn := toMethod(adder) + if adderFn == nil { + panic(r.NewTypeError("Map.set in missing")) + } + r.iterate(iter, func(item Value) { + itemObj := r.toObject(item) + k := itemObj.self.getIdx(i0, nil) + v := itemObj.self.getIdx(i1, nil) + adderFn(FunctionCall{This: o, Arguments: []Value{k, v}}) + }) + } + } + } + return o +} + +func (r *Runtime) createMapIterator(mapValue Value, kind iterationKind) Value { + obj := r.toObject(mapValue) + mapObj, ok := obj.self.(*mapObject) + if !ok { + panic(r.NewTypeError("Object is not a Map")) + } + + o := &Object{runtime: r} + + mi := &mapIterObject{ + iter: mapObj.m.newIter(), + kind: kind, + } + mi.class = classMapIterator + mi.val = o + mi.extensible = true + o.self = mi + mi.prototype = r.global.MapIteratorPrototype + mi.init() + + return o +} + +func (r *Runtime) mapIterProto_next(call FunctionCall) Value { + thisObj := r.toObject(call.This) + if iter, ok := thisObj.self.(*mapIterObject); ok { + return iter.next() + } + panic(r.NewTypeError("Method Map Iterator.prototype.next called on incompatible receiver %s", thisObj.String())) +} + +func (r *Runtime) createMapProto(val *Object) objectImpl { + o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject) + + o._putProp("constructor", r.global.Map, true, false, true) + o._putProp("clear", r.newNativeFunc(r.mapProto_clear, nil, "clear", nil, 0), true, false, true) + r.global.mapAdder = r.newNativeFunc(r.mapProto_set, nil, "set", nil, 2) + o._putProp("set", r.global.mapAdder, true, false, true) + o._putProp("delete", r.newNativeFunc(r.mapProto_delete, nil, "delete", nil, 1), true, false, true) + o._putProp("forEach", r.newNativeFunc(r.mapProto_forEach, nil, "forEach", nil, 1), true, false, true) + o._putProp("has", r.newNativeFunc(r.mapProto_has, nil, "has", nil, 1), true, false, true) + o._putProp("get", r.newNativeFunc(r.mapProto_get, nil, "get", nil, 1), true, false, true) + o.setOwnStr("size", &valueProperty{ + getterFunc: r.newNativeFunc(r.mapProto_getSize, nil, "get size", nil, 0), + accessor: true, + writable: true, + configurable: true, + }, true) + o._putProp("keys", r.newNativeFunc(r.mapProto_keys, nil, "keys", nil, 0), true, false, true) + o._putProp("values", r.newNativeFunc(r.mapProto_values, nil, "values", nil, 0), true, false, true) + + entriesFunc := r.newNativeFunc(r.mapProto_entries, nil, "entries", nil, 0) + o._putProp("entries", entriesFunc, true, false, true) + o._putSym(SymIterator, valueProp(entriesFunc, true, false, true)) + o._putSym(SymToStringTag, valueProp(asciiString(classMap), false, false, true)) + + return o +} + +func (r *Runtime) createMap(val *Object) objectImpl { + o := r.newNativeConstructOnly(val, r.builtin_newMap, r.global.MapPrototype, "Map", 0) + o._putSym(SymSpecies, &valueProperty{ + getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0), + accessor: true, + configurable: true, + }) + + return o +} + +func (r *Runtime) createMapIterProto(val *Object) objectImpl { + o := newBaseObjectObj(val, r.global.IteratorPrototype, classObject) + + o._putProp("next", r.newNativeFunc(r.mapIterProto_next, nil, "next", nil, 0), true, false, true) + o._putSym(SymToStringTag, valueProp(asciiString(classMapIterator), false, false, true)) + + return o +} + +func (r *Runtime) initMap() { + r.global.MapIteratorPrototype = r.newLazyObject(r.createMapIterProto) + + r.global.MapPrototype = r.newLazyObject(r.createMapProto) + r.global.Map = r.newLazyObject(r.createMap) + + r.addToGlobal("Map", r.global.Map) +} diff --git a/vendor/github.com/dop251/goja/builtin_math.go b/vendor/github.com/dop251/goja/builtin_math.go new file mode 100644 index 0000000000..11439c0f08 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_math.go @@ -0,0 +1,328 @@ +package goja + +import ( + "math" + "math/bits" +) + +func (r *Runtime) math_abs(call FunctionCall) Value { + return floatToValue(math.Abs(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_acos(call FunctionCall) Value { + return floatToValue(math.Acos(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_acosh(call FunctionCall) Value { + return floatToValue(math.Acosh(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_asin(call FunctionCall) Value { + return floatToValue(math.Asin(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_asinh(call FunctionCall) Value { + return floatToValue(math.Asinh(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_atan(call FunctionCall) Value { + return floatToValue(math.Atan(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_atanh(call FunctionCall) Value { + return floatToValue(math.Atanh(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_atan2(call FunctionCall) Value { + y := call.Argument(0).ToFloat() + x := call.Argument(1).ToFloat() + + return floatToValue(math.Atan2(y, x)) +} + +func (r *Runtime) math_cbrt(call FunctionCall) Value { + return floatToValue(math.Cbrt(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_ceil(call FunctionCall) Value { + return floatToValue(math.Ceil(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_clz32(call FunctionCall) Value { + return intToValue(int64(bits.LeadingZeros32(toUint32(call.Argument(0))))) +} + +func (r *Runtime) math_cos(call FunctionCall) Value { + return floatToValue(math.Cos(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_cosh(call FunctionCall) Value { + return floatToValue(math.Cosh(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_exp(call FunctionCall) Value { + return floatToValue(math.Exp(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_expm1(call FunctionCall) Value { + return floatToValue(math.Expm1(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_floor(call FunctionCall) Value { + return floatToValue(math.Floor(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_fround(call FunctionCall) Value { + return floatToValue(float64(float32(call.Argument(0).ToFloat()))) +} + +func (r *Runtime) math_hypot(call FunctionCall) Value { + var max float64 + var hasNaN bool + absValues := make([]float64, 0, len(call.Arguments)) + for _, v := range call.Arguments { + arg := nilSafe(v).ToFloat() + if math.IsNaN(arg) { + hasNaN = true + } else { + abs := math.Abs(arg) + if abs > max { + max = abs + } + absValues = append(absValues, abs) + } + } + if math.IsInf(max, 1) { + return _positiveInf + } + if hasNaN { + return _NaN + } + if max == 0 { + return _positiveZero + } + + // Kahan summation to avoid rounding errors. + // Normalize the numbers to the largest one to avoid overflow. + var sum, compensation float64 + for _, n := range absValues { + n /= max + summand := n*n - compensation + preliminary := sum + summand + compensation = (preliminary - sum) - summand + sum = preliminary + } + return floatToValue(math.Sqrt(sum) * max) +} + +func (r *Runtime) math_imul(call FunctionCall) Value { + x := toUint32(call.Argument(0)) + y := toUint32(call.Argument(1)) + return intToValue(int64(int32(x * y))) +} + +func (r *Runtime) math_log(call FunctionCall) Value { + return floatToValue(math.Log(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_log1p(call FunctionCall) Value { + return floatToValue(math.Log1p(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_log10(call FunctionCall) Value { + return floatToValue(math.Log10(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_log2(call FunctionCall) Value { + return floatToValue(math.Log2(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_max(call FunctionCall) Value { + if len(call.Arguments) == 0 { + return _negativeInf + } + + result := call.Arguments[0].ToFloat() + if math.IsNaN(result) { + return _NaN + } + for _, arg := range call.Arguments[1:] { + f := arg.ToFloat() + if math.IsNaN(f) { + return _NaN + } + result = math.Max(result, f) + } + return floatToValue(result) +} + +func (r *Runtime) math_min(call FunctionCall) Value { + if len(call.Arguments) == 0 { + return _positiveInf + } + + result := call.Arguments[0].ToFloat() + if math.IsNaN(result) { + return _NaN + } + for _, arg := range call.Arguments[1:] { + f := arg.ToFloat() + if math.IsNaN(f) { + return _NaN + } + result = math.Min(result, f) + } + return floatToValue(result) +} + +func (r *Runtime) math_pow(call FunctionCall) Value { + x := call.Argument(0) + y := call.Argument(1) + if x, ok := x.(valueInt); ok { + if y, ok := y.(valueInt); ok && y >= 0 && y < 64 { + if y == 0 { + return intToValue(1) + } + if x == 0 { + return intToValue(0) + } + ip := ipow(int64(x), int64(y)) + if ip != 0 { + return intToValue(ip) + } + } + } + + return floatToValue(math.Pow(x.ToFloat(), y.ToFloat())) +} + +func (r *Runtime) math_random(call FunctionCall) Value { + return floatToValue(r.rand()) +} + +func (r *Runtime) math_round(call FunctionCall) Value { + f := call.Argument(0).ToFloat() + if math.IsNaN(f) { + return _NaN + } + + if f == 0 && math.Signbit(f) { + return _negativeZero + } + + t := math.Trunc(f) + + if f >= 0 { + if f-t >= 0.5 { + return floatToValue(t + 1) + } + } else { + if t-f > 0.5 { + return floatToValue(t - 1) + } + } + + return floatToValue(t) +} + +func (r *Runtime) math_sign(call FunctionCall) Value { + arg := call.Argument(0) + num := arg.ToFloat() + if math.IsNaN(num) || num == 0 { // this will match -0 too + return arg + } + if num > 0 { + return intToValue(1) + } + return intToValue(-1) +} + +func (r *Runtime) math_sin(call FunctionCall) Value { + return floatToValue(math.Sin(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_sinh(call FunctionCall) Value { + return floatToValue(math.Sinh(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_sqrt(call FunctionCall) Value { + return floatToValue(math.Sqrt(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_tan(call FunctionCall) Value { + return floatToValue(math.Tan(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_tanh(call FunctionCall) Value { + return floatToValue(math.Tanh(call.Argument(0).ToFloat())) +} + +func (r *Runtime) math_trunc(call FunctionCall) Value { + arg := call.Argument(0) + if i, ok := arg.(valueInt); ok { + return i + } + return floatToValue(math.Trunc(arg.ToFloat())) +} + +func (r *Runtime) createMath(val *Object) objectImpl { + m := &baseObject{ + class: classMath, + val: val, + extensible: true, + prototype: r.global.ObjectPrototype, + } + m.init() + + m._putProp("E", valueFloat(math.E), false, false, false) + m._putProp("LN10", valueFloat(math.Ln10), false, false, false) + m._putProp("LN2", valueFloat(math.Ln2), false, false, false) + m._putProp("LOG10E", valueFloat(math.Log10E), false, false, false) + m._putProp("LOG2E", valueFloat(math.Log2E), false, false, false) + m._putProp("PI", valueFloat(math.Pi), false, false, false) + m._putProp("SQRT1_2", valueFloat(sqrt1_2), false, false, false) + m._putProp("SQRT2", valueFloat(math.Sqrt2), false, false, false) + m._putSym(SymToStringTag, valueProp(asciiString(classMath), false, false, true)) + + m._putProp("abs", r.newNativeFunc(r.math_abs, nil, "abs", nil, 1), true, false, true) + m._putProp("acos", r.newNativeFunc(r.math_acos, nil, "acos", nil, 1), true, false, true) + m._putProp("acosh", r.newNativeFunc(r.math_acosh, nil, "acosh", nil, 1), true, false, true) + m._putProp("asin", r.newNativeFunc(r.math_asin, nil, "asin", nil, 1), true, false, true) + m._putProp("asinh", r.newNativeFunc(r.math_asinh, nil, "asinh", nil, 1), true, false, true) + m._putProp("atan", r.newNativeFunc(r.math_atan, nil, "atan", nil, 1), true, false, true) + m._putProp("atanh", r.newNativeFunc(r.math_atanh, nil, "atanh", nil, 1), true, false, true) + m._putProp("atan2", r.newNativeFunc(r.math_atan2, nil, "atan2", nil, 2), true, false, true) + m._putProp("cbrt", r.newNativeFunc(r.math_cbrt, nil, "cbrt", nil, 1), true, false, true) + m._putProp("ceil", r.newNativeFunc(r.math_ceil, nil, "ceil", nil, 1), true, false, true) + m._putProp("clz32", r.newNativeFunc(r.math_clz32, nil, "clz32", nil, 1), true, false, true) + m._putProp("cos", r.newNativeFunc(r.math_cos, nil, "cos", nil, 1), true, false, true) + m._putProp("cosh", r.newNativeFunc(r.math_cosh, nil, "cosh", nil, 1), true, false, true) + m._putProp("exp", r.newNativeFunc(r.math_exp, nil, "exp", nil, 1), true, false, true) + m._putProp("expm1", r.newNativeFunc(r.math_expm1, nil, "expm1", nil, 1), true, false, true) + m._putProp("floor", r.newNativeFunc(r.math_floor, nil, "floor", nil, 1), true, false, true) + m._putProp("fround", r.newNativeFunc(r.math_fround, nil, "fround", nil, 1), true, false, true) + m._putProp("hypot", r.newNativeFunc(r.math_hypot, nil, "hypot", nil, 2), true, false, true) + m._putProp("imul", r.newNativeFunc(r.math_imul, nil, "imul", nil, 2), true, false, true) + m._putProp("log", r.newNativeFunc(r.math_log, nil, "log", nil, 1), true, false, true) + m._putProp("log1p", r.newNativeFunc(r.math_log1p, nil, "log1p", nil, 1), true, false, true) + m._putProp("log10", r.newNativeFunc(r.math_log10, nil, "log10", nil, 1), true, false, true) + m._putProp("log2", r.newNativeFunc(r.math_log2, nil, "log2", nil, 1), true, false, true) + m._putProp("max", r.newNativeFunc(r.math_max, nil, "max", nil, 2), true, false, true) + m._putProp("min", r.newNativeFunc(r.math_min, nil, "min", nil, 2), true, false, true) + m._putProp("pow", r.newNativeFunc(r.math_pow, nil, "pow", nil, 2), true, false, true) + m._putProp("random", r.newNativeFunc(r.math_random, nil, "random", nil, 0), true, false, true) + m._putProp("round", r.newNativeFunc(r.math_round, nil, "round", nil, 1), true, false, true) + m._putProp("sign", r.newNativeFunc(r.math_sign, nil, "sign", nil, 1), true, false, true) + m._putProp("sin", r.newNativeFunc(r.math_sin, nil, "sin", nil, 1), true, false, true) + m._putProp("sinh", r.newNativeFunc(r.math_sinh, nil, "sinh", nil, 1), true, false, true) + m._putProp("sqrt", r.newNativeFunc(r.math_sqrt, nil, "sqrt", nil, 1), true, false, true) + m._putProp("tan", r.newNativeFunc(r.math_tan, nil, "tan", nil, 1), true, false, true) + m._putProp("tanh", r.newNativeFunc(r.math_tanh, nil, "tanh", nil, 1), true, false, true) + m._putProp("trunc", r.newNativeFunc(r.math_trunc, nil, "trunc", nil, 1), true, false, true) + + return m +} + +func (r *Runtime) initMath() { + r.addToGlobal("Math", r.newLazyObject(r.createMath)) +} diff --git a/vendor/github.com/dop251/goja/builtin_number.go b/vendor/github.com/dop251/goja/builtin_number.go new file mode 100644 index 0000000000..163e16e01a --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_number.go @@ -0,0 +1,210 @@ +package goja + +import ( + "math" + + "github.com/dop251/goja/ftoa" +) + +func (r *Runtime) numberproto_valueOf(call FunctionCall) Value { + this := call.This + if !isNumber(this) { + r.typeErrorResult(true, "Value is not a number") + } + switch t := this.(type) { + case valueInt, valueFloat: + return this + case *Object: + if v, ok := t.self.(*primitiveValueObject); ok { + return v.pValue + } + } + + panic(r.NewTypeError("Number.prototype.valueOf is not generic")) +} + +func isNumber(v Value) bool { + switch t := v.(type) { + case valueFloat, valueInt: + return true + case *Object: + switch t := t.self.(type) { + case *primitiveValueObject: + return isNumber(t.pValue) + } + } + return false +} + +func (r *Runtime) numberproto_toString(call FunctionCall) Value { + if !isNumber(call.This) { + r.typeErrorResult(true, "Value is not a number") + } + var radix int + if arg := call.Argument(0); arg != _undefined { + radix = int(arg.ToInteger()) + } else { + radix = 10 + } + + if radix < 2 || radix > 36 { + panic(r.newError(r.global.RangeError, "toString() radix argument must be between 2 and 36")) + } + + num := call.This.ToFloat() + + if math.IsNaN(num) { + return stringNaN + } + + if math.IsInf(num, 1) { + return stringInfinity + } + + if math.IsInf(num, -1) { + return stringNegInfinity + } + + if radix == 10 { + return asciiString(fToStr(num, ftoa.ModeStandard, 0)) + } + + return asciiString(ftoa.FToBaseStr(num, radix)) +} + +func (r *Runtime) numberproto_toFixed(call FunctionCall) Value { + num := r.toNumber(call.This).ToFloat() + prec := call.Argument(0).ToInteger() + + if prec < 0 || prec > 100 { + panic(r.newError(r.global.RangeError, "toFixed() precision must be between 0 and 100")) + } + if math.IsNaN(num) { + return stringNaN + } + return asciiString(fToStr(num, ftoa.ModeFixed, int(prec))) +} + +func (r *Runtime) numberproto_toExponential(call FunctionCall) Value { + num := r.toNumber(call.This).ToFloat() + precVal := call.Argument(0) + var prec int64 + if precVal == _undefined { + return asciiString(fToStr(num, ftoa.ModeStandardExponential, 0)) + } else { + prec = precVal.ToInteger() + } + + if math.IsNaN(num) { + return stringNaN + } + if math.IsInf(num, 1) { + return stringInfinity + } + if math.IsInf(num, -1) { + return stringNegInfinity + } + + if prec < 0 || prec > 100 { + panic(r.newError(r.global.RangeError, "toExponential() precision must be between 0 and 100")) + } + + return asciiString(fToStr(num, ftoa.ModeExponential, int(prec+1))) +} + +func (r *Runtime) numberproto_toPrecision(call FunctionCall) Value { + numVal := r.toNumber(call.This) + precVal := call.Argument(0) + if precVal == _undefined { + return numVal.toString() + } + num := numVal.ToFloat() + prec := precVal.ToInteger() + + if math.IsNaN(num) { + return stringNaN + } + if math.IsInf(num, 1) { + return stringInfinity + } + if math.IsInf(num, -1) { + return stringNegInfinity + } + if prec < 1 || prec > 100 { + panic(r.newError(r.global.RangeError, "toPrecision() precision must be between 1 and 100")) + } + + return asciiString(fToStr(num, ftoa.ModePrecision, int(prec))) +} + +func (r *Runtime) number_isFinite(call FunctionCall) Value { + switch arg := call.Argument(0).(type) { + case valueInt: + return valueTrue + case valueFloat: + f := float64(arg) + return r.toBoolean(!math.IsInf(f, 0) && !math.IsNaN(f)) + default: + return valueFalse + } +} + +func (r *Runtime) number_isInteger(call FunctionCall) Value { + switch arg := call.Argument(0).(type) { + case valueInt: + return valueTrue + case valueFloat: + f := float64(arg) + return r.toBoolean(!math.IsNaN(f) && !math.IsInf(f, 0) && math.Floor(f) == f) + default: + return valueFalse + } +} + +func (r *Runtime) number_isNaN(call FunctionCall) Value { + if f, ok := call.Argument(0).(valueFloat); ok && math.IsNaN(float64(f)) { + return valueTrue + } + return valueFalse +} + +func (r *Runtime) number_isSafeInteger(call FunctionCall) Value { + arg := call.Argument(0) + if i, ok := arg.(valueInt); ok && i >= -(maxInt-1) && i <= maxInt-1 { + return valueTrue + } + if arg == _negativeZero { + return valueTrue + } + return valueFalse +} + +func (r *Runtime) initNumber() { + r.global.NumberPrototype = r.newPrimitiveObject(valueInt(0), r.global.ObjectPrototype, classNumber) + o := r.global.NumberPrototype.self + o._putProp("toExponential", r.newNativeFunc(r.numberproto_toExponential, nil, "toExponential", nil, 1), true, false, true) + o._putProp("toFixed", r.newNativeFunc(r.numberproto_toFixed, nil, "toFixed", nil, 1), true, false, true) + o._putProp("toLocaleString", r.newNativeFunc(r.numberproto_toString, nil, "toLocaleString", nil, 0), true, false, true) + o._putProp("toPrecision", r.newNativeFunc(r.numberproto_toPrecision, nil, "toPrecision", nil, 1), true, false, true) + o._putProp("toString", r.newNativeFunc(r.numberproto_toString, nil, "toString", nil, 1), true, false, true) + o._putProp("valueOf", r.newNativeFunc(r.numberproto_valueOf, nil, "valueOf", nil, 0), true, false, true) + + r.global.Number = r.newNativeFunc(r.builtin_Number, r.builtin_newNumber, "Number", r.global.NumberPrototype, 1) + o = r.global.Number.self + o._putProp("EPSILON", _epsilon, false, false, false) + o._putProp("isFinite", r.newNativeFunc(r.number_isFinite, nil, "isFinite", nil, 1), true, false, true) + o._putProp("isInteger", r.newNativeFunc(r.number_isInteger, nil, "isInteger", nil, 1), true, false, true) + o._putProp("isNaN", r.newNativeFunc(r.number_isNaN, nil, "isNaN", nil, 1), true, false, true) + o._putProp("isSafeInteger", r.newNativeFunc(r.number_isSafeInteger, nil, "isSafeInteger", nil, 1), true, false, true) + o._putProp("MAX_SAFE_INTEGER", valueInt(maxInt-1), false, false, false) + o._putProp("MIN_SAFE_INTEGER", valueInt(-(maxInt - 1)), false, false, false) + o._putProp("MIN_VALUE", valueFloat(math.SmallestNonzeroFloat64), false, false, false) + o._putProp("MAX_VALUE", valueFloat(math.MaxFloat64), false, false, false) + o._putProp("NaN", _NaN, false, false, false) + o._putProp("NEGATIVE_INFINITY", _negativeInf, false, false, false) + o._putProp("parseFloat", r.Get("parseFloat"), true, false, true) + o._putProp("parseInt", r.Get("parseInt"), true, false, true) + o._putProp("POSITIVE_INFINITY", _positiveInf, false, false, false) + r.addToGlobal("Number", r.global.Number) + +} diff --git a/vendor/github.com/dop251/goja/builtin_object.go b/vendor/github.com/dop251/goja/builtin_object.go new file mode 100644 index 0000000000..b7070faac3 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_object.go @@ -0,0 +1,584 @@ +package goja + +import ( + "fmt" +) + +func (r *Runtime) builtin_Object(args []Value, proto *Object) *Object { + if len(args) > 0 { + arg := args[0] + if arg != _undefined && arg != _null { + return arg.ToObject(r) + } + } + return r.newBaseObject(proto, classObject).val +} + +func (r *Runtime) object_getPrototypeOf(call FunctionCall) Value { + o := call.Argument(0).ToObject(r) + p := o.self.proto() + if p == nil { + return _null + } + return p +} + +func (r *Runtime) valuePropToDescriptorObject(desc Value) Value { + if desc == nil { + return _undefined + } + var writable, configurable, enumerable, accessor bool + var get, set *Object + var value Value + if v, ok := desc.(*valueProperty); ok { + writable = v.writable + configurable = v.configurable + enumerable = v.enumerable + accessor = v.accessor + value = v.value + get = v.getterFunc + set = v.setterFunc + } else { + writable = true + configurable = true + enumerable = true + value = desc + } + + ret := r.NewObject() + obj := ret.self + if !accessor { + obj.setOwnStr("value", value, false) + obj.setOwnStr("writable", r.toBoolean(writable), false) + } else { + if get != nil { + obj.setOwnStr("get", get, false) + } else { + obj.setOwnStr("get", _undefined, false) + } + if set != nil { + obj.setOwnStr("set", set, false) + } else { + obj.setOwnStr("set", _undefined, false) + } + } + obj.setOwnStr("enumerable", r.toBoolean(enumerable), false) + obj.setOwnStr("configurable", r.toBoolean(configurable), false) + + return ret +} + +func (r *Runtime) object_getOwnPropertyDescriptor(call FunctionCall) Value { + o := call.Argument(0).ToObject(r) + propName := toPropertyKey(call.Argument(1)) + return r.valuePropToDescriptorObject(o.getOwnProp(propName)) +} + +func (r *Runtime) object_getOwnPropertyDescriptors(call FunctionCall) Value { + o := call.Argument(0).ToObject(r) + ownKeys := o.self.ownPropertyKeys(true, nil) + result := r.newBaseObject(r.global.ObjectPrototype, classObject).val + for _, key := range ownKeys { + descriptor := r.valuePropToDescriptorObject(o.getOwnProp(key)) + if descriptor != _undefined { + createDataPropertyOrThrow(result, key, descriptor) + } + } + return result +} + +func (r *Runtime) object_getOwnPropertyNames(call FunctionCall) Value { + obj := call.Argument(0).ToObject(r) + + return r.newArrayValues(obj.self.ownKeys(true, nil)) +} + +func (r *Runtime) object_getOwnPropertySymbols(call FunctionCall) Value { + obj := call.Argument(0).ToObject(r) + return r.newArrayValues(obj.self.ownSymbols(true, nil)) +} + +func (r *Runtime) toValueProp(v Value) *valueProperty { + if v == nil || v == _undefined { + return nil + } + obj := r.toObject(v) + getter := obj.self.getStr("get", nil) + setter := obj.self.getStr("set", nil) + writable := obj.self.getStr("writable", nil) + value := obj.self.getStr("value", nil) + if (getter != nil || setter != nil) && (value != nil || writable != nil) { + r.typeErrorResult(true, "Invalid property descriptor. Cannot both specify accessors and a value or writable attribute") + } + + ret := &valueProperty{} + if writable != nil && writable.ToBoolean() { + ret.writable = true + } + if e := obj.self.getStr("enumerable", nil); e != nil && e.ToBoolean() { + ret.enumerable = true + } + if c := obj.self.getStr("configurable", nil); c != nil && c.ToBoolean() { + ret.configurable = true + } + ret.value = value + + if getter != nil && getter != _undefined { + o := r.toObject(getter) + if _, ok := o.self.assertCallable(); !ok { + r.typeErrorResult(true, "getter must be a function") + } + ret.getterFunc = o + } + + if setter != nil && setter != _undefined { + o := r.toObject(v) + if _, ok := o.self.assertCallable(); !ok { + r.typeErrorResult(true, "setter must be a function") + } + ret.setterFunc = o + } + + if ret.getterFunc != nil || ret.setterFunc != nil { + ret.accessor = true + } + + return ret +} + +func (r *Runtime) toPropertyDescriptor(v Value) (ret PropertyDescriptor) { + if o, ok := v.(*Object); ok { + descr := o.self + + // Save the original descriptor for reference + ret.jsDescriptor = o + + ret.Value = descr.getStr("value", nil) + + if p := descr.getStr("writable", nil); p != nil { + ret.Writable = ToFlag(p.ToBoolean()) + } + if p := descr.getStr("enumerable", nil); p != nil { + ret.Enumerable = ToFlag(p.ToBoolean()) + } + if p := descr.getStr("configurable", nil); p != nil { + ret.Configurable = ToFlag(p.ToBoolean()) + } + + ret.Getter = descr.getStr("get", nil) + ret.Setter = descr.getStr("set", nil) + + if ret.Getter != nil && ret.Getter != _undefined { + if _, ok := r.toObject(ret.Getter).self.assertCallable(); !ok { + r.typeErrorResult(true, "getter must be a function") + } + } + + if ret.Setter != nil && ret.Setter != _undefined { + if _, ok := r.toObject(ret.Setter).self.assertCallable(); !ok { + r.typeErrorResult(true, "setter must be a function") + } + } + + if (ret.Getter != nil || ret.Setter != nil) && (ret.Value != nil || ret.Writable != FLAG_NOT_SET) { + r.typeErrorResult(true, "Invalid property descriptor. Cannot both specify accessors and a value or writable attribute") + } + } else { + r.typeErrorResult(true, "Property description must be an object: %s", v.String()) + } + + return +} + +func (r *Runtime) _defineProperties(o *Object, p Value) { + type propItem struct { + name Value + prop PropertyDescriptor + } + props := p.ToObject(r) + names := props.self.ownPropertyKeys(false, nil) + list := make([]propItem, 0, len(names)) + for _, itemName := range names { + list = append(list, propItem{ + name: itemName, + prop: r.toPropertyDescriptor(props.get(itemName, nil)), + }) + } + for _, prop := range list { + o.defineOwnProperty(prop.name, prop.prop, true) + } +} + +func (r *Runtime) object_create(call FunctionCall) Value { + var proto *Object + if arg := call.Argument(0); arg != _null { + if o, ok := arg.(*Object); ok { + proto = o + } else { + r.typeErrorResult(true, "Object prototype may only be an Object or null: %s", arg.String()) + } + } + o := r.newBaseObject(proto, classObject).val + + if props := call.Argument(1); props != _undefined { + r._defineProperties(o, props) + } + + return o +} + +func (r *Runtime) object_defineProperty(call FunctionCall) (ret Value) { + if obj, ok := call.Argument(0).(*Object); ok { + descr := r.toPropertyDescriptor(call.Argument(2)) + obj.defineOwnProperty(toPropertyKey(call.Argument(1)), descr, true) + ret = call.Argument(0) + } else { + r.typeErrorResult(true, "Object.defineProperty called on non-object") + } + return +} + +func (r *Runtime) object_defineProperties(call FunctionCall) Value { + obj := r.toObject(call.Argument(0)) + r._defineProperties(obj, call.Argument(1)) + return obj +} + +func (r *Runtime) object_seal(call FunctionCall) Value { + // ES6 + arg := call.Argument(0) + if obj, ok := arg.(*Object); ok { + descr := PropertyDescriptor{ + Writable: FLAG_TRUE, + Enumerable: FLAG_TRUE, + Configurable: FLAG_FALSE, + } + for _, key := range obj.self.ownPropertyKeys(true, nil) { + v := obj.getOwnProp(key) + if prop, ok := v.(*valueProperty); ok { + if !prop.configurable { + continue + } + prop.configurable = false + } else { + descr.Value = v + obj.defineOwnProperty(key, descr, true) + } + } + obj.self.preventExtensions(false) + return obj + } + return arg +} + +func (r *Runtime) object_freeze(call FunctionCall) Value { + arg := call.Argument(0) + if obj, ok := arg.(*Object); ok { + descr := PropertyDescriptor{ + Writable: FLAG_FALSE, + Enumerable: FLAG_TRUE, + Configurable: FLAG_FALSE, + } + for _, key := range obj.self.ownPropertyKeys(true, nil) { + v := obj.getOwnProp(key) + if prop, ok := v.(*valueProperty); ok { + prop.configurable = false + if prop.value != nil { + prop.writable = false + } + } else { + descr.Value = v + obj.defineOwnProperty(key, descr, true) + } + } + obj.self.preventExtensions(false) + return obj + } else { + // ES6 behavior + return arg + } +} + +func (r *Runtime) object_preventExtensions(call FunctionCall) (ret Value) { + arg := call.Argument(0) + if obj, ok := arg.(*Object); ok { + obj.self.preventExtensions(false) + return obj + } + // ES6 + //r.typeErrorResult(true, "Object.preventExtensions called on non-object") + //panic("Unreachable") + return arg +} + +func (r *Runtime) object_isSealed(call FunctionCall) Value { + if obj, ok := call.Argument(0).(*Object); ok { + if obj.self.isExtensible() { + return valueFalse + } + for _, key := range obj.self.ownPropertyKeys(true, nil) { + prop := obj.getOwnProp(key) + if prop, ok := prop.(*valueProperty); ok { + if prop.configurable { + return valueFalse + } + } else { + return valueFalse + } + } + } + return valueTrue +} + +func (r *Runtime) object_isFrozen(call FunctionCall) Value { + if obj, ok := call.Argument(0).(*Object); ok { + if obj.self.isExtensible() { + return valueFalse + } + for _, key := range obj.self.ownPropertyKeys(true, nil) { + prop := obj.getOwnProp(key) + if prop, ok := prop.(*valueProperty); ok { + if prop.configurable || prop.value != nil && prop.writable { + return valueFalse + } + } else { + return valueFalse + } + } + } + return valueTrue +} + +func (r *Runtime) object_isExtensible(call FunctionCall) Value { + if obj, ok := call.Argument(0).(*Object); ok { + if obj.self.isExtensible() { + return valueTrue + } + return valueFalse + } else { + // ES6 + //r.typeErrorResult(true, "Object.isExtensible called on non-object") + return valueFalse + } +} + +func (r *Runtime) object_keys(call FunctionCall) Value { + obj := call.Argument(0).ToObject(r) + + return r.newArrayValues(obj.self.ownKeys(false, nil)) +} + +func (r *Runtime) object_entries(call FunctionCall) Value { + obj := call.Argument(0).ToObject(r) + + var values []Value + iter := &enumerableIter{ + wrapped: obj.self.enumerateOwnKeys(), + } + + for item, next := iter.next(); next != nil; item, next = next() { + v := obj.self.getStr(item.name, nil) + values = append(values, r.newArrayValues([]Value{stringValueFromRaw(item.name), v})) + } + + return r.newArrayValues(values) +} + +func (r *Runtime) object_values(call FunctionCall) Value { + obj := call.Argument(0).ToObject(r) + + var values []Value + iter := &enumerableIter{ + wrapped: obj.self.enumerateOwnKeys(), + } + + for item, next := iter.next(); next != nil; item, next = next() { + values = append(values, obj.self.getStr(item.name, nil)) + } + + return r.newArrayValues(values) +} + +func (r *Runtime) objectproto_hasOwnProperty(call FunctionCall) Value { + p := toPropertyKey(call.Argument(0)) + o := call.This.ToObject(r) + if o.hasOwnProperty(p) { + return valueTrue + } else { + return valueFalse + } +} + +func (r *Runtime) objectproto_isPrototypeOf(call FunctionCall) Value { + if v, ok := call.Argument(0).(*Object); ok { + o := call.This.ToObject(r) + for { + v = v.self.proto() + if v == nil { + break + } + if v == o { + return valueTrue + } + } + } + return valueFalse +} + +func (r *Runtime) objectproto_propertyIsEnumerable(call FunctionCall) Value { + p := toPropertyKey(call.Argument(0)) + o := call.This.ToObject(r) + pv := o.getOwnProp(p) + if pv == nil { + return valueFalse + } + if prop, ok := pv.(*valueProperty); ok { + if !prop.enumerable { + return valueFalse + } + } + return valueTrue +} + +func (r *Runtime) objectproto_toString(call FunctionCall) Value { + switch o := call.This.(type) { + case valueNull: + return stringObjectNull + case valueUndefined: + return stringObjectUndefined + default: + obj := o.ToObject(r) + var clsName string + if isArray(obj) { + clsName = classArray + } else { + clsName = obj.self.className() + } + if tag := obj.self.getSym(SymToStringTag, nil); tag != nil { + if str, ok := tag.(valueString); ok { + clsName = str.String() + } + } + return newStringValue(fmt.Sprintf("[object %s]", clsName)) + } +} + +func (r *Runtime) objectproto_toLocaleString(call FunctionCall) Value { + toString := toMethod(r.getVStr(call.This, "toString")) + return toString(FunctionCall{This: call.This}) +} + +func (r *Runtime) objectproto_getProto(call FunctionCall) Value { + proto := call.This.ToObject(r).self.proto() + if proto != nil { + return proto + } + return _null +} + +func (r *Runtime) objectproto_setProto(call FunctionCall) Value { + o := call.This + r.checkObjectCoercible(o) + proto := r.toProto(call.Argument(0)) + if o, ok := o.(*Object); ok { + o.self.setProto(proto, true) + } + + return _undefined +} + +func (r *Runtime) objectproto_valueOf(call FunctionCall) Value { + return call.This.ToObject(r) +} + +func (r *Runtime) object_assign(call FunctionCall) Value { + to := call.Argument(0).ToObject(r) + if len(call.Arguments) > 1 { + for _, arg := range call.Arguments[1:] { + if arg != _undefined && arg != _null { + source := arg.ToObject(r) + for _, key := range source.self.ownPropertyKeys(true, nil) { + p := source.getOwnProp(key) + if p == nil { + continue + } + if v, ok := p.(*valueProperty); ok { + if !v.enumerable { + continue + } + p = v.get(source) + } + to.setOwn(key, p, true) + } + } + } + } + + return to +} + +func (r *Runtime) object_is(call FunctionCall) Value { + return r.toBoolean(call.Argument(0).SameAs(call.Argument(1))) +} + +func (r *Runtime) toProto(proto Value) *Object { + if proto != _null { + if obj, ok := proto.(*Object); ok { + return obj + } else { + panic(r.NewTypeError("Object prototype may only be an Object or null: %s", proto)) + } + } + return nil +} + +func (r *Runtime) object_setPrototypeOf(call FunctionCall) Value { + o := call.Argument(0) + r.checkObjectCoercible(o) + proto := r.toProto(call.Argument(1)) + if o, ok := o.(*Object); ok { + o.self.setProto(proto, true) + } + + return o +} + +func (r *Runtime) initObject() { + o := r.global.ObjectPrototype.self + o._putProp("toString", r.newNativeFunc(r.objectproto_toString, nil, "toString", nil, 0), true, false, true) + o._putProp("toLocaleString", r.newNativeFunc(r.objectproto_toLocaleString, nil, "toLocaleString", nil, 0), true, false, true) + o._putProp("valueOf", r.newNativeFunc(r.objectproto_valueOf, nil, "valueOf", nil, 0), true, false, true) + o._putProp("hasOwnProperty", r.newNativeFunc(r.objectproto_hasOwnProperty, nil, "hasOwnProperty", nil, 1), true, false, true) + o._putProp("isPrototypeOf", r.newNativeFunc(r.objectproto_isPrototypeOf, nil, "isPrototypeOf", nil, 1), true, false, true) + o._putProp("propertyIsEnumerable", r.newNativeFunc(r.objectproto_propertyIsEnumerable, nil, "propertyIsEnumerable", nil, 1), true, false, true) + o.defineOwnPropertyStr(__proto__, PropertyDescriptor{ + Getter: r.newNativeFunc(r.objectproto_getProto, nil, "get __proto__", nil, 0), + Setter: r.newNativeFunc(r.objectproto_setProto, nil, "set __proto__", nil, 1), + Configurable: FLAG_TRUE, + }, true) + + r.global.Object = r.newNativeFuncConstruct(r.builtin_Object, classObject, r.global.ObjectPrototype, 1) + o = r.global.Object.self + o._putProp("assign", r.newNativeFunc(r.object_assign, nil, "assign", nil, 2), true, false, true) + o._putProp("defineProperty", r.newNativeFunc(r.object_defineProperty, nil, "defineProperty", nil, 3), true, false, true) + o._putProp("defineProperties", r.newNativeFunc(r.object_defineProperties, nil, "defineProperties", nil, 2), true, false, true) + o._putProp("entries", r.newNativeFunc(r.object_entries, nil, "entries", nil, 1), true, false, true) + o._putProp("getOwnPropertyDescriptor", r.newNativeFunc(r.object_getOwnPropertyDescriptor, nil, "getOwnPropertyDescriptor", nil, 2), true, false, true) + o._putProp("getOwnPropertyDescriptors", r.newNativeFunc(r.object_getOwnPropertyDescriptors, nil, "getOwnPropertyDescriptors", nil, 1), true, false, true) + o._putProp("getPrototypeOf", r.newNativeFunc(r.object_getPrototypeOf, nil, "getPrototypeOf", nil, 1), true, false, true) + o._putProp("is", r.newNativeFunc(r.object_is, nil, "is", nil, 2), true, false, true) + o._putProp("getOwnPropertyNames", r.newNativeFunc(r.object_getOwnPropertyNames, nil, "getOwnPropertyNames", nil, 1), true, false, true) + o._putProp("getOwnPropertySymbols", r.newNativeFunc(r.object_getOwnPropertySymbols, nil, "getOwnPropertySymbols", nil, 1), true, false, true) + o._putProp("create", r.newNativeFunc(r.object_create, nil, "create", nil, 2), true, false, true) + o._putProp("seal", r.newNativeFunc(r.object_seal, nil, "seal", nil, 1), true, false, true) + o._putProp("freeze", r.newNativeFunc(r.object_freeze, nil, "freeze", nil, 1), true, false, true) + o._putProp("preventExtensions", r.newNativeFunc(r.object_preventExtensions, nil, "preventExtensions", nil, 1), true, false, true) + o._putProp("isSealed", r.newNativeFunc(r.object_isSealed, nil, "isSealed", nil, 1), true, false, true) + o._putProp("isFrozen", r.newNativeFunc(r.object_isFrozen, nil, "isFrozen", nil, 1), true, false, true) + o._putProp("isExtensible", r.newNativeFunc(r.object_isExtensible, nil, "isExtensible", nil, 1), true, false, true) + o._putProp("keys", r.newNativeFunc(r.object_keys, nil, "keys", nil, 1), true, false, true) + o._putProp("setPrototypeOf", r.newNativeFunc(r.object_setPrototypeOf, nil, "setPrototypeOf", nil, 2), true, false, true) + o._putProp("values", r.newNativeFunc(r.object_values, nil, "values", nil, 1), true, false, true) + + r.addToGlobal("Object", r.global.Object) +} diff --git a/vendor/github.com/dop251/goja/builtin_proxy.go b/vendor/github.com/dop251/goja/builtin_proxy.go new file mode 100644 index 0000000000..ee62f75e45 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_proxy.go @@ -0,0 +1,391 @@ +package goja + +import ( + "github.com/dop251/goja/unistring" +) + +type nativeProxyHandler struct { + handler *ProxyTrapConfig +} + +func (h *nativeProxyHandler) getPrototypeOf(target *Object) (Value, bool) { + if trap := h.handler.GetPrototypeOf; trap != nil { + return trap(target), true + } + return nil, false +} + +func (h *nativeProxyHandler) setPrototypeOf(target *Object, proto *Object) (bool, bool) { + if trap := h.handler.SetPrototypeOf; trap != nil { + return trap(target, proto), true + } + return false, false +} + +func (h *nativeProxyHandler) isExtensible(target *Object) (bool, bool) { + if trap := h.handler.IsExtensible; trap != nil { + return trap(target), true + } + return false, false +} + +func (h *nativeProxyHandler) preventExtensions(target *Object) (bool, bool) { + if trap := h.handler.PreventExtensions; trap != nil { + return trap(target), true + } + return false, false +} + +func (h *nativeProxyHandler) getOwnPropertyDescriptorStr(target *Object, prop unistring.String) (Value, bool) { + if trap := h.handler.GetOwnPropertyDescriptorIdx; trap != nil { + if idx, ok := strToInt(prop); ok { + desc := trap(target, idx) + return desc.toValue(target.runtime), true + } + } + if trap := h.handler.GetOwnPropertyDescriptor; trap != nil { + desc := trap(target, prop.String()) + return desc.toValue(target.runtime), true + } + return nil, false +} + +func (h *nativeProxyHandler) getOwnPropertyDescriptorIdx(target *Object, prop valueInt) (Value, bool) { + if trap := h.handler.GetOwnPropertyDescriptorIdx; trap != nil { + desc := trap(target, toIntStrict(int64(prop))) + return desc.toValue(target.runtime), true + } + if trap := h.handler.GetOwnPropertyDescriptor; trap != nil { + desc := trap(target, prop.String()) + return desc.toValue(target.runtime), true + } + return nil, false +} + +func (h *nativeProxyHandler) getOwnPropertyDescriptorSym(target *Object, prop *Symbol) (Value, bool) { + if trap := h.handler.GetOwnPropertyDescriptorSym; trap != nil { + desc := trap(target, prop) + return desc.toValue(target.runtime), true + } + return nil, false +} + +func (h *nativeProxyHandler) definePropertyStr(target *Object, prop unistring.String, desc PropertyDescriptor) (bool, bool) { + if trap := h.handler.DefinePropertyIdx; trap != nil { + if idx, ok := strToInt(prop); ok { + return trap(target, idx, desc), true + } + } + if trap := h.handler.DefineProperty; trap != nil { + return trap(target, prop.String(), desc), true + } + return false, false +} + +func (h *nativeProxyHandler) definePropertyIdx(target *Object, prop valueInt, desc PropertyDescriptor) (bool, bool) { + if trap := h.handler.DefinePropertyIdx; trap != nil { + return trap(target, toIntStrict(int64(prop)), desc), true + } + if trap := h.handler.DefineProperty; trap != nil { + return trap(target, prop.String(), desc), true + } + return false, false +} + +func (h *nativeProxyHandler) definePropertySym(target *Object, prop *Symbol, desc PropertyDescriptor) (bool, bool) { + if trap := h.handler.DefinePropertySym; trap != nil { + return trap(target, prop, desc), true + } + return false, false +} + +func (h *nativeProxyHandler) hasStr(target *Object, prop unistring.String) (bool, bool) { + if trap := h.handler.HasIdx; trap != nil { + if idx, ok := strToInt(prop); ok { + return trap(target, idx), true + } + } + if trap := h.handler.Has; trap != nil { + return trap(target, prop.String()), true + } + return false, false +} + +func (h *nativeProxyHandler) hasIdx(target *Object, prop valueInt) (bool, bool) { + if trap := h.handler.HasIdx; trap != nil { + return trap(target, toIntStrict(int64(prop))), true + } + if trap := h.handler.Has; trap != nil { + return trap(target, prop.String()), true + } + return false, false +} + +func (h *nativeProxyHandler) hasSym(target *Object, prop *Symbol) (bool, bool) { + if trap := h.handler.HasSym; trap != nil { + return trap(target, prop), true + } + return false, false +} + +func (h *nativeProxyHandler) getStr(target *Object, prop unistring.String, receiver Value) (Value, bool) { + if trap := h.handler.GetIdx; trap != nil { + if idx, ok := strToInt(prop); ok { + return trap(target, idx, receiver), true + } + } + if trap := h.handler.Get; trap != nil { + return trap(target, prop.String(), receiver), true + } + return nil, false +} + +func (h *nativeProxyHandler) getIdx(target *Object, prop valueInt, receiver Value) (Value, bool) { + if trap := h.handler.GetIdx; trap != nil { + return trap(target, toIntStrict(int64(prop)), receiver), true + } + if trap := h.handler.Get; trap != nil { + return trap(target, prop.String(), receiver), true + } + return nil, false +} + +func (h *nativeProxyHandler) getSym(target *Object, prop *Symbol, receiver Value) (Value, bool) { + if trap := h.handler.GetSym; trap != nil { + return trap(target, prop, receiver), true + } + return nil, false +} + +func (h *nativeProxyHandler) setStr(target *Object, prop unistring.String, value Value, receiver Value) (bool, bool) { + if trap := h.handler.SetIdx; trap != nil { + if idx, ok := strToInt(prop); ok { + return trap(target, idx, value, receiver), true + } + } + if trap := h.handler.Set; trap != nil { + return trap(target, prop.String(), value, receiver), true + } + return false, false +} + +func (h *nativeProxyHandler) setIdx(target *Object, prop valueInt, value Value, receiver Value) (bool, bool) { + if trap := h.handler.SetIdx; trap != nil { + return trap(target, toIntStrict(int64(prop)), value, receiver), true + } + if trap := h.handler.Set; trap != nil { + return trap(target, prop.String(), value, receiver), true + } + return false, false +} + +func (h *nativeProxyHandler) setSym(target *Object, prop *Symbol, value Value, receiver Value) (bool, bool) { + if trap := h.handler.SetSym; trap != nil { + return trap(target, prop, value, receiver), true + } + return false, false +} + +func (h *nativeProxyHandler) deleteStr(target *Object, prop unistring.String) (bool, bool) { + if trap := h.handler.DeletePropertyIdx; trap != nil { + if idx, ok := strToInt(prop); ok { + return trap(target, idx), true + } + } + if trap := h.handler.DeleteProperty; trap != nil { + return trap(target, prop.String()), true + } + return false, false +} + +func (h *nativeProxyHandler) deleteIdx(target *Object, prop valueInt) (bool, bool) { + if trap := h.handler.DeletePropertyIdx; trap != nil { + return trap(target, toIntStrict(int64(prop))), true + } + if trap := h.handler.DeleteProperty; trap != nil { + return trap(target, prop.String()), true + } + return false, false +} + +func (h *nativeProxyHandler) deleteSym(target *Object, prop *Symbol) (bool, bool) { + if trap := h.handler.DeletePropertySym; trap != nil { + return trap(target, prop), true + } + return false, false +} + +func (h *nativeProxyHandler) ownKeys(target *Object) (*Object, bool) { + if trap := h.handler.OwnKeys; trap != nil { + return trap(target), true + } + return nil, false +} + +func (h *nativeProxyHandler) apply(target *Object, this Value, args []Value) (Value, bool) { + if trap := h.handler.Apply; trap != nil { + return trap(target, this, args), true + } + return nil, false +} + +func (h *nativeProxyHandler) construct(target *Object, args []Value, newTarget *Object) (Value, bool) { + if trap := h.handler.Construct; trap != nil { + return trap(target, args, newTarget), true + } + return nil, false +} + +func (h *nativeProxyHandler) toObject(runtime *Runtime) *Object { + return runtime.ToValue(h.handler).ToObject(runtime) +} + +func (r *Runtime) newNativeProxyHandler(nativeHandler *ProxyTrapConfig) proxyHandler { + return &nativeProxyHandler{handler: nativeHandler} +} + +// ProxyTrapConfig provides a simplified Go-friendly API for implementing Proxy traps. +// If an *Idx trap is defined it gets called for integer property keys, including negative ones. Note that +// this only includes string property keys that represent a canonical integer +// (i.e. "0", "123", but not "00", "01", " 1" or "-0"). +// For efficiency strings representing integers exceeding 2^53 are not checked to see if they are canonical, +// i.e. the *Idx traps will receive "9007199254740993" as well as "9007199254740994", even though the former is not +// a canonical representation in ECMAScript (Number("9007199254740993") === 9007199254740992). +// See https://262.ecma-international.org/#sec-canonicalnumericindexstring +// If an *Idx trap is not set, the corresponding string one is used. +type ProxyTrapConfig struct { + // A trap for Object.getPrototypeOf, Reflect.getPrototypeOf, __proto__, Object.prototype.isPrototypeOf, instanceof + GetPrototypeOf func(target *Object) (prototype *Object) + + // A trap for Object.setPrototypeOf, Reflect.setPrototypeOf + SetPrototypeOf func(target *Object, prototype *Object) (success bool) + + // A trap for Object.isExtensible, Reflect.isExtensible + IsExtensible func(target *Object) (success bool) + + // A trap for Object.preventExtensions, Reflect.preventExtensions + PreventExtensions func(target *Object) (success bool) + + // A trap for Object.getOwnPropertyDescriptor, Reflect.getOwnPropertyDescriptor (string properties) + GetOwnPropertyDescriptor func(target *Object, prop string) (propertyDescriptor PropertyDescriptor) + + // A trap for Object.getOwnPropertyDescriptor, Reflect.getOwnPropertyDescriptor (integer properties) + GetOwnPropertyDescriptorIdx func(target *Object, prop int) (propertyDescriptor PropertyDescriptor) + + // A trap for Object.getOwnPropertyDescriptor, Reflect.getOwnPropertyDescriptor (Symbol properties) + GetOwnPropertyDescriptorSym func(target *Object, prop *Symbol) (propertyDescriptor PropertyDescriptor) + + // A trap for Object.defineProperty, Reflect.defineProperty (string properties) + DefineProperty func(target *Object, key string, propertyDescriptor PropertyDescriptor) (success bool) + + // A trap for Object.defineProperty, Reflect.defineProperty (integer properties) + DefinePropertyIdx func(target *Object, key int, propertyDescriptor PropertyDescriptor) (success bool) + + // A trap for Object.defineProperty, Reflect.defineProperty (Symbol properties) + DefinePropertySym func(target *Object, key *Symbol, propertyDescriptor PropertyDescriptor) (success bool) + + // A trap for the in operator, with operator, Reflect.has (string properties) + Has func(target *Object, property string) (available bool) + + // A trap for the in operator, with operator, Reflect.has (integer properties) + HasIdx func(target *Object, property int) (available bool) + + // A trap for the in operator, with operator, Reflect.has (Symbol properties) + HasSym func(target *Object, property *Symbol) (available bool) + + // A trap for getting property values, Reflect.get (string properties) + Get func(target *Object, property string, receiver Value) (value Value) + + // A trap for getting property values, Reflect.get (integer properties) + GetIdx func(target *Object, property int, receiver Value) (value Value) + + // A trap for getting property values, Reflect.get (Symbol properties) + GetSym func(target *Object, property *Symbol, receiver Value) (value Value) + + // A trap for setting property values, Reflect.set (string properties) + Set func(target *Object, property string, value Value, receiver Value) (success bool) + + // A trap for setting property values, Reflect.set (integer properties) + SetIdx func(target *Object, property int, value Value, receiver Value) (success bool) + + // A trap for setting property values, Reflect.set (Symbol properties) + SetSym func(target *Object, property *Symbol, value Value, receiver Value) (success bool) + + // A trap for the delete operator, Reflect.deleteProperty (string properties) + DeleteProperty func(target *Object, property string) (success bool) + + // A trap for the delete operator, Reflect.deleteProperty (integer properties) + DeletePropertyIdx func(target *Object, property int) (success bool) + + // A trap for the delete operator, Reflect.deleteProperty (Symbol properties) + DeletePropertySym func(target *Object, property *Symbol) (success bool) + + // A trap for Object.getOwnPropertyNames, Object.getOwnPropertySymbols, Object.keys, Reflect.ownKeys + OwnKeys func(target *Object) (object *Object) + + // A trap for a function call, Function.prototype.apply, Function.prototype.call, Reflect.apply + Apply func(target *Object, this Value, argumentsList []Value) (value Value) + + // A trap for the new operator, Reflect.construct + Construct func(target *Object, argumentsList []Value, newTarget *Object) (value *Object) +} + +func (r *Runtime) newProxy(args []Value, proto *Object) *Object { + if len(args) >= 2 { + if target, ok := args[0].(*Object); ok { + if proxyHandler, ok := args[1].(*Object); ok { + return r.newProxyObject(target, proxyHandler, proto).val + } + } + } + panic(r.NewTypeError("Cannot create proxy with a non-object as target or handler")) +} + +func (r *Runtime) builtin_newProxy(args []Value, newTarget *Object) *Object { + if newTarget == nil { + panic(r.needNew("Proxy")) + } + return r.newProxy(args, r.getPrototypeFromCtor(newTarget, r.global.Proxy, r.global.ObjectPrototype)) +} + +func (r *Runtime) NewProxy(target *Object, nativeHandler *ProxyTrapConfig) Proxy { + if p, ok := target.self.(*proxyObject); ok { + if p.handler == nil { + panic(r.NewTypeError("Cannot create proxy with a revoked proxy as target")) + } + } + handler := r.newNativeProxyHandler(nativeHandler) + proxy := r._newProxyObject(target, handler, nil) + return Proxy{proxy: proxy} +} + +func (r *Runtime) builtin_proxy_revocable(call FunctionCall) Value { + if len(call.Arguments) >= 2 { + if target, ok := call.Argument(0).(*Object); ok { + if proxyHandler, ok := call.Argument(1).(*Object); ok { + proxy := r.newProxyObject(target, proxyHandler, nil) + revoke := r.newNativeFunc(func(FunctionCall) Value { + proxy.revoke() + return _undefined + }, nil, "", nil, 0) + ret := r.NewObject() + ret.self._putProp("proxy", proxy.val, true, true, true) + ret.self._putProp("revoke", revoke, true, true, true) + return ret + } + } + } + panic(r.NewTypeError("Cannot create proxy with a non-object as target or handler")) +} + +func (r *Runtime) createProxy(val *Object) objectImpl { + o := r.newNativeConstructOnly(val, r.builtin_newProxy, nil, "Proxy", 2) + + o._putProp("revocable", r.newNativeFunc(r.builtin_proxy_revocable, nil, "revocable", nil, 2), true, false, true) + return o +} + +func (r *Runtime) initProxy() { + r.global.Proxy = r.newLazyObject(r.createProxy) + r.addToGlobal("Proxy", r.global.Proxy) +} diff --git a/vendor/github.com/dop251/goja/builtin_reflect.go b/vendor/github.com/dop251/goja/builtin_reflect.go new file mode 100644 index 0000000000..d8e7847189 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_reflect.go @@ -0,0 +1,132 @@ +package goja + +func (r *Runtime) builtin_reflect_apply(call FunctionCall) Value { + return r.toCallable(call.Argument(0))(FunctionCall{ + This: call.Argument(1), + Arguments: r.createListFromArrayLike(call.Argument(2))}) +} + +func (r *Runtime) toConstructor(v Value) func(args []Value, newTarget *Object) *Object { + if ctor := r.toObject(v).self.assertConstructor(); ctor != nil { + return ctor + } + panic(r.NewTypeError("Value is not a constructor")) +} + +func (r *Runtime) builtin_reflect_construct(call FunctionCall) Value { + target := call.Argument(0) + ctor := r.toConstructor(target) + var newTarget Value + if len(call.Arguments) > 2 { + newTarget = call.Argument(2) + r.toConstructor(newTarget) + } else { + newTarget = target + } + return ctor(r.createListFromArrayLike(call.Argument(1)), r.toObject(newTarget)) +} + +func (r *Runtime) builtin_reflect_defineProperty(call FunctionCall) Value { + target := r.toObject(call.Argument(0)) + key := toPropertyKey(call.Argument(1)) + desc := r.toPropertyDescriptor(call.Argument(2)) + + return r.toBoolean(target.defineOwnProperty(key, desc, false)) +} + +func (r *Runtime) builtin_reflect_deleteProperty(call FunctionCall) Value { + target := r.toObject(call.Argument(0)) + key := toPropertyKey(call.Argument(1)) + + return r.toBoolean(target.delete(key, false)) +} + +func (r *Runtime) builtin_reflect_get(call FunctionCall) Value { + target := r.toObject(call.Argument(0)) + key := toPropertyKey(call.Argument(1)) + var receiver Value + if len(call.Arguments) > 2 { + receiver = call.Arguments[2] + } + return target.get(key, receiver) +} + +func (r *Runtime) builtin_reflect_getOwnPropertyDescriptor(call FunctionCall) Value { + target := r.toObject(call.Argument(0)) + key := toPropertyKey(call.Argument(1)) + return r.valuePropToDescriptorObject(target.getOwnProp(key)) +} + +func (r *Runtime) builtin_reflect_getPrototypeOf(call FunctionCall) Value { + target := r.toObject(call.Argument(0)) + if proto := target.self.proto(); proto != nil { + return proto + } + + return _null +} + +func (r *Runtime) builtin_reflect_has(call FunctionCall) Value { + target := r.toObject(call.Argument(0)) + key := toPropertyKey(call.Argument(1)) + return r.toBoolean(target.hasProperty(key)) +} + +func (r *Runtime) builtin_reflect_isExtensible(call FunctionCall) Value { + target := r.toObject(call.Argument(0)) + return r.toBoolean(target.self.isExtensible()) +} + +func (r *Runtime) builtin_reflect_ownKeys(call FunctionCall) Value { + target := r.toObject(call.Argument(0)) + return r.newArrayValues(target.self.ownPropertyKeys(true, nil)) +} + +func (r *Runtime) builtin_reflect_preventExtensions(call FunctionCall) Value { + target := r.toObject(call.Argument(0)) + return r.toBoolean(target.self.preventExtensions(false)) +} + +func (r *Runtime) builtin_reflect_set(call FunctionCall) Value { + target := r.toObject(call.Argument(0)) + var receiver Value + if len(call.Arguments) >= 4 { + receiver = call.Argument(3) + } else { + receiver = target + } + return r.toBoolean(target.set(call.Argument(1), call.Argument(2), receiver, false)) +} + +func (r *Runtime) builtin_reflect_setPrototypeOf(call FunctionCall) Value { + target := r.toObject(call.Argument(0)) + var proto *Object + if arg := call.Argument(1); arg != _null { + proto = r.toObject(arg) + } + return r.toBoolean(target.self.setProto(proto, false)) +} + +func (r *Runtime) createReflect(val *Object) objectImpl { + o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject) + + o._putProp("apply", r.newNativeFunc(r.builtin_reflect_apply, nil, "apply", nil, 3), true, false, true) + o._putProp("construct", r.newNativeFunc(r.builtin_reflect_construct, nil, "construct", nil, 2), true, false, true) + o._putProp("defineProperty", r.newNativeFunc(r.builtin_reflect_defineProperty, nil, "defineProperty", nil, 3), true, false, true) + o._putProp("deleteProperty", r.newNativeFunc(r.builtin_reflect_deleteProperty, nil, "deleteProperty", nil, 2), true, false, true) + o._putProp("get", r.newNativeFunc(r.builtin_reflect_get, nil, "get", nil, 2), true, false, true) + o._putProp("getOwnPropertyDescriptor", r.newNativeFunc(r.builtin_reflect_getOwnPropertyDescriptor, nil, "getOwnPropertyDescriptor", nil, 2), true, false, true) + o._putProp("getPrototypeOf", r.newNativeFunc(r.builtin_reflect_getPrototypeOf, nil, "getPrototypeOf", nil, 1), true, false, true) + o._putProp("has", r.newNativeFunc(r.builtin_reflect_has, nil, "has", nil, 2), true, false, true) + o._putProp("isExtensible", r.newNativeFunc(r.builtin_reflect_isExtensible, nil, "isExtensible", nil, 1), true, false, true) + o._putProp("ownKeys", r.newNativeFunc(r.builtin_reflect_ownKeys, nil, "ownKeys", nil, 1), true, false, true) + o._putProp("preventExtensions", r.newNativeFunc(r.builtin_reflect_preventExtensions, nil, "preventExtensions", nil, 1), true, false, true) + o._putProp("set", r.newNativeFunc(r.builtin_reflect_set, nil, "set", nil, 3), true, false, true) + o._putProp("setPrototypeOf", r.newNativeFunc(r.builtin_reflect_setPrototypeOf, nil, "setPrototypeOf", nil, 2), true, false, true) + + return o +} + +func (r *Runtime) initReflect() { + r.addToGlobal("Reflect", r.newLazyObject(r.createReflect)) +} diff --git a/vendor/github.com/dop251/goja/builtin_regexp.go b/vendor/github.com/dop251/goja/builtin_regexp.go new file mode 100644 index 0000000000..451b87d24f --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_regexp.go @@ -0,0 +1,1276 @@ +package goja + +import ( + "fmt" + "github.com/dop251/goja/parser" + "regexp" + "strings" + "unicode/utf16" + "unicode/utf8" +) + +func (r *Runtime) newRegexpObject(proto *Object) *regexpObject { + v := &Object{runtime: r} + + o := ®expObject{} + o.class = classRegExp + o.val = v + o.extensible = true + v.self = o + o.prototype = proto + o.init() + return o +} + +func (r *Runtime) newRegExpp(pattern *regexpPattern, patternStr valueString, proto *Object) *regexpObject { + o := r.newRegexpObject(proto) + + o.pattern = pattern + o.source = patternStr + + return o +} + +func decodeHex(s string) (int, bool) { + var hex int + for i := 0; i < len(s); i++ { + var n byte + chr := s[i] + switch { + case '0' <= chr && chr <= '9': + n = chr - '0' + case 'a' <= chr && chr <= 'f': + n = chr - 'a' + 10 + case 'A' <= chr && chr <= 'F': + n = chr - 'A' + 10 + default: + return 0, false + } + hex = hex*16 + int(n) + } + return hex, true +} + +func writeHex4(b *strings.Builder, i int) { + b.WriteByte(hex[i>>12]) + b.WriteByte(hex[(i>>8)&0xF]) + b.WriteByte(hex[(i>>4)&0xF]) + b.WriteByte(hex[i&0xF]) +} + +// Convert any valid surrogate pairs in the form of \uXXXX\uXXXX to unicode characters +func convertRegexpToUnicode(patternStr string) string { + var sb strings.Builder + pos := 0 + for i := 0; i < len(patternStr)-11; { + r, size := utf8.DecodeRuneInString(patternStr[i:]) + if r == '\\' { + i++ + if patternStr[i] == 'u' && patternStr[i+5] == '\\' && patternStr[i+6] == 'u' { + if first, ok := decodeHex(patternStr[i+1 : i+5]); ok { + if isUTF16FirstSurrogate(rune(first)) { + if second, ok := decodeHex(patternStr[i+7 : i+11]); ok { + if isUTF16SecondSurrogate(rune(second)) { + r = utf16.DecodeRune(rune(first), rune(second)) + sb.WriteString(patternStr[pos : i-1]) + sb.WriteRune(r) + i += 11 + pos = i + continue + } + } + } + } + } + i++ + } else { + i += size + } + } + if pos > 0 { + sb.WriteString(patternStr[pos:]) + return sb.String() + } + return patternStr +} + +// Convert any extended unicode characters to UTF-16 in the form of \uXXXX\uXXXX +func convertRegexpToUtf16(patternStr string) string { + var sb strings.Builder + pos := 0 + var prevRune rune + for i := 0; i < len(patternStr); { + r, size := utf8.DecodeRuneInString(patternStr[i:]) + if r > 0xFFFF { + sb.WriteString(patternStr[pos:i]) + if prevRune == '\\' { + sb.WriteRune('\\') + } + first, second := utf16.EncodeRune(r) + sb.WriteString(`\u`) + writeHex4(&sb, int(first)) + sb.WriteString(`\u`) + writeHex4(&sb, int(second)) + pos = i + size + } + i += size + prevRune = r + } + if pos > 0 { + sb.WriteString(patternStr[pos:]) + return sb.String() + } + return patternStr +} + +// convert any broken UTF-16 surrogate pairs to \uXXXX +func escapeInvalidUtf16(s valueString) string { + if ascii, ok := s.(asciiString); ok { + return ascii.String() + } + var sb strings.Builder + rd := &lenientUtf16Decoder{utf16Reader: s.utf16Reader(0)} + pos := 0 + utf8Size := 0 + var utf8Buf [utf8.UTFMax]byte + for { + c, size, err := rd.ReadRune() + if err != nil { + break + } + if utf16.IsSurrogate(c) { + if sb.Len() == 0 { + sb.Grow(utf8Size + 7) + hrd := s.reader(0) + var c rune + for p := 0; p < pos; { + var size int + var err error + c, size, err = hrd.ReadRune() + if err != nil { + // will not happen + panic(fmt.Errorf("error while reading string head %q, pos: %d: %w", s.String(), pos, err)) + } + sb.WriteRune(c) + p += size + } + if c == '\\' { + sb.WriteRune(c) + } + } + sb.WriteString(`\u`) + writeHex4(&sb, int(c)) + } else { + if sb.Len() > 0 { + sb.WriteRune(c) + } else { + utf8Size += utf8.EncodeRune(utf8Buf[:], c) + pos += size + } + } + } + if sb.Len() > 0 { + return sb.String() + } + return s.String() +} + +func compileRegexpFromValueString(patternStr valueString, flags string) (*regexpPattern, error) { + return compileRegexp(escapeInvalidUtf16(patternStr), flags) +} + +func compileRegexp(patternStr, flags string) (p *regexpPattern, err error) { + var global, ignoreCase, multiline, sticky, unicode bool + var wrapper *regexpWrapper + var wrapper2 *regexp2Wrapper + + if flags != "" { + invalidFlags := func() { + err = fmt.Errorf("Invalid flags supplied to RegExp constructor '%s'", flags) + } + for _, chr := range flags { + switch chr { + case 'g': + if global { + invalidFlags() + return + } + global = true + case 'm': + if multiline { + invalidFlags() + return + } + multiline = true + case 'i': + if ignoreCase { + invalidFlags() + return + } + ignoreCase = true + case 'y': + if sticky { + invalidFlags() + return + } + sticky = true + case 'u': + if unicode { + invalidFlags() + } + unicode = true + default: + invalidFlags() + return + } + } + } + + if unicode { + patternStr = convertRegexpToUnicode(patternStr) + } else { + patternStr = convertRegexpToUtf16(patternStr) + } + + re2Str, err1 := parser.TransformRegExp(patternStr) + if err1 == nil { + re2flags := "" + if multiline { + re2flags += "m" + } + if ignoreCase { + re2flags += "i" + } + if len(re2flags) > 0 { + re2Str = fmt.Sprintf("(?%s:%s)", re2flags, re2Str) + } + + pattern, err1 := regexp.Compile(re2Str) + if err1 != nil { + err = fmt.Errorf("Invalid regular expression (re2): %s (%v)", re2Str, err1) + return + } + wrapper = (*regexpWrapper)(pattern) + } else { + if _, incompat := err1.(parser.RegexpErrorIncompatible); !incompat { + err = err1 + return + } + wrapper2, err = compileRegexp2(patternStr, multiline, ignoreCase) + if err != nil { + err = fmt.Errorf("Invalid regular expression (regexp2): %s (%v)", patternStr, err) + return + } + } + + p = ®expPattern{ + src: patternStr, + regexpWrapper: wrapper, + regexp2Wrapper: wrapper2, + global: global, + ignoreCase: ignoreCase, + multiline: multiline, + sticky: sticky, + unicode: unicode, + } + return +} + +func (r *Runtime) _newRegExp(patternStr valueString, flags string, proto *Object) *regexpObject { + pattern, err := compileRegexpFromValueString(patternStr, flags) + if err != nil { + panic(r.newSyntaxError(err.Error(), -1)) + } + return r.newRegExpp(pattern, patternStr, proto) +} + +func (r *Runtime) builtin_newRegExp(args []Value, proto *Object) *Object { + var patternVal, flagsVal Value + if len(args) > 0 { + patternVal = args[0] + } + if len(args) > 1 { + flagsVal = args[1] + } + return r.newRegExp(patternVal, flagsVal, proto).val +} + +func (r *Runtime) newRegExp(patternVal, flagsVal Value, proto *Object) *regexpObject { + var pattern valueString + var flags string + if isRegexp(patternVal) { // this may have side effects so need to call it anyway + if obj, ok := patternVal.(*Object); ok { + if rx, ok := obj.self.(*regexpObject); ok { + if flagsVal == nil || flagsVal == _undefined { + return rx.clone() + } else { + return r._newRegExp(rx.source, flagsVal.toString().String(), proto) + } + } else { + pattern = nilSafe(obj.self.getStr("source", nil)).toString() + if flagsVal == nil || flagsVal == _undefined { + flags = nilSafe(obj.self.getStr("flags", nil)).toString().String() + } else { + flags = flagsVal.toString().String() + } + goto exit + } + } + } + + if patternVal != nil && patternVal != _undefined { + pattern = patternVal.toString() + } + if flagsVal != nil && flagsVal != _undefined { + flags = flagsVal.toString().String() + } + + if pattern == nil { + pattern = stringEmpty + } +exit: + return r._newRegExp(pattern, flags, proto) +} + +func (r *Runtime) builtin_RegExp(call FunctionCall) Value { + pattern := call.Argument(0) + patternIsRegExp := isRegexp(pattern) + flags := call.Argument(1) + if patternIsRegExp && flags == _undefined { + if obj, ok := call.Argument(0).(*Object); ok { + patternConstructor := obj.self.getStr("constructor", nil) + if patternConstructor == r.global.RegExp { + return pattern + } + } + } + return r.newRegExp(pattern, flags, r.global.RegExpPrototype).val +} + +func (r *Runtime) regexpproto_compile(call FunctionCall) Value { + if this, ok := r.toObject(call.This).self.(*regexpObject); ok { + var ( + pattern *regexpPattern + source valueString + flags string + err error + ) + patternVal := call.Argument(0) + flagsVal := call.Argument(1) + if o, ok := patternVal.(*Object); ok { + if p, ok := o.self.(*regexpObject); ok { + if flagsVal != _undefined { + panic(r.NewTypeError("Cannot supply flags when constructing one RegExp from another")) + } + this.pattern = p.pattern + this.source = p.source + goto exit + } + } + if patternVal != _undefined { + source = patternVal.toString() + } else { + source = stringEmpty + } + if flagsVal != _undefined { + flags = flagsVal.toString().String() + } + pattern, err = compileRegexpFromValueString(source, flags) + if err != nil { + panic(r.newSyntaxError(err.Error(), -1)) + } + this.pattern = pattern + this.source = source + exit: + this.setOwnStr("lastIndex", intToValue(0), true) + return call.This + } + + panic(r.NewTypeError("Method RegExp.prototype.compile called on incompatible receiver %s", call.This.toString())) +} + +func (r *Runtime) regexpproto_exec(call FunctionCall) Value { + if this, ok := r.toObject(call.This).self.(*regexpObject); ok { + return this.exec(call.Argument(0).toString()) + } else { + r.typeErrorResult(true, "Method RegExp.prototype.exec called on incompatible receiver %s", call.This.toString()) + return nil + } +} + +func (r *Runtime) regexpproto_test(call FunctionCall) Value { + if this, ok := r.toObject(call.This).self.(*regexpObject); ok { + if this.test(call.Argument(0).toString()) { + return valueTrue + } else { + return valueFalse + } + } else { + r.typeErrorResult(true, "Method RegExp.prototype.test called on incompatible receiver %s", call.This.toString()) + return nil + } +} + +func (r *Runtime) regexpproto_toString(call FunctionCall) Value { + obj := r.toObject(call.This) + if this := r.checkStdRegexp(obj); this != nil { + var sb valueStringBuilder + sb.WriteRune('/') + if !this.writeEscapedSource(&sb) { + sb.WriteString(this.source) + } + sb.WriteRune('/') + if this.pattern.global { + sb.WriteRune('g') + } + if this.pattern.ignoreCase { + sb.WriteRune('i') + } + if this.pattern.multiline { + sb.WriteRune('m') + } + if this.pattern.unicode { + sb.WriteRune('u') + } + if this.pattern.sticky { + sb.WriteRune('y') + } + return sb.String() + } + pattern := nilSafe(obj.self.getStr("source", nil)).toString() + flags := nilSafe(obj.self.getStr("flags", nil)).toString() + var sb valueStringBuilder + sb.WriteRune('/') + sb.WriteString(pattern) + sb.WriteRune('/') + sb.WriteString(flags) + return sb.String() +} + +func (r *regexpObject) writeEscapedSource(sb *valueStringBuilder) bool { + if r.source.length() == 0 { + sb.WriteString(asciiString("(?:)")) + return true + } + pos := 0 + lastPos := 0 + rd := &lenientUtf16Decoder{utf16Reader: r.source.utf16Reader(0)} +L: + for { + c, size, err := rd.ReadRune() + if err != nil { + break + } + switch c { + case '\\': + pos++ + _, size, err = rd.ReadRune() + if err != nil { + break L + } + case '/', '\u000a', '\u000d', '\u2028', '\u2029': + sb.WriteSubstring(r.source, lastPos, pos) + sb.WriteRune('\\') + switch c { + case '\u000a': + sb.WriteRune('n') + case '\u000d': + sb.WriteRune('r') + default: + sb.WriteRune('u') + sb.WriteRune(rune(hex[c>>12])) + sb.WriteRune(rune(hex[(c>>8)&0xF])) + sb.WriteRune(rune(hex[(c>>4)&0xF])) + sb.WriteRune(rune(hex[c&0xF])) + } + lastPos = pos + size + } + pos += size + } + if lastPos > 0 { + sb.WriteSubstring(r.source, lastPos, r.source.length()) + return true + } + return false +} + +func (r *Runtime) regexpproto_getSource(call FunctionCall) Value { + if this, ok := r.toObject(call.This).self.(*regexpObject); ok { + var sb valueStringBuilder + if this.writeEscapedSource(&sb) { + return sb.String() + } + return this.source + } else { + r.typeErrorResult(true, "Method RegExp.prototype.source getter called on incompatible receiver") + return nil + } +} + +func (r *Runtime) regexpproto_getGlobal(call FunctionCall) Value { + if this, ok := r.toObject(call.This).self.(*regexpObject); ok { + if this.pattern.global { + return valueTrue + } else { + return valueFalse + } + } else { + r.typeErrorResult(true, "Method RegExp.prototype.global getter called on incompatible receiver %s", call.This.toString()) + return nil + } +} + +func (r *Runtime) regexpproto_getMultiline(call FunctionCall) Value { + if this, ok := r.toObject(call.This).self.(*regexpObject); ok { + if this.pattern.multiline { + return valueTrue + } else { + return valueFalse + } + } else { + r.typeErrorResult(true, "Method RegExp.prototype.multiline getter called on incompatible receiver %s", call.This.toString()) + return nil + } +} + +func (r *Runtime) regexpproto_getIgnoreCase(call FunctionCall) Value { + if this, ok := r.toObject(call.This).self.(*regexpObject); ok { + if this.pattern.ignoreCase { + return valueTrue + } else { + return valueFalse + } + } else { + r.typeErrorResult(true, "Method RegExp.prototype.ignoreCase getter called on incompatible receiver %s", call.This.toString()) + return nil + } +} + +func (r *Runtime) regexpproto_getUnicode(call FunctionCall) Value { + if this, ok := r.toObject(call.This).self.(*regexpObject); ok { + if this.pattern.unicode { + return valueTrue + } else { + return valueFalse + } + } else { + r.typeErrorResult(true, "Method RegExp.prototype.unicode getter called on incompatible receiver %s", call.This.toString()) + return nil + } +} + +func (r *Runtime) regexpproto_getSticky(call FunctionCall) Value { + if this, ok := r.toObject(call.This).self.(*regexpObject); ok { + if this.pattern.sticky { + return valueTrue + } else { + return valueFalse + } + } else { + r.typeErrorResult(true, "Method RegExp.prototype.sticky getter called on incompatible receiver %s", call.This.toString()) + return nil + } +} + +func (r *Runtime) regexpproto_getFlags(call FunctionCall) Value { + var global, ignoreCase, multiline, sticky, unicode bool + + thisObj := r.toObject(call.This) + size := 0 + if v := thisObj.self.getStr("global", nil); v != nil { + global = v.ToBoolean() + if global { + size++ + } + } + if v := thisObj.self.getStr("ignoreCase", nil); v != nil { + ignoreCase = v.ToBoolean() + if ignoreCase { + size++ + } + } + if v := thisObj.self.getStr("multiline", nil); v != nil { + multiline = v.ToBoolean() + if multiline { + size++ + } + } + if v := thisObj.self.getStr("sticky", nil); v != nil { + sticky = v.ToBoolean() + if sticky { + size++ + } + } + if v := thisObj.self.getStr("unicode", nil); v != nil { + unicode = v.ToBoolean() + if unicode { + size++ + } + } + + var sb strings.Builder + sb.Grow(size) + if global { + sb.WriteByte('g') + } + if ignoreCase { + sb.WriteByte('i') + } + if multiline { + sb.WriteByte('m') + } + if unicode { + sb.WriteByte('u') + } + if sticky { + sb.WriteByte('y') + } + + return asciiString(sb.String()) +} + +func (r *Runtime) regExpExec(execFn func(FunctionCall) Value, rxObj *Object, arg Value) Value { + res := execFn(FunctionCall{ + This: rxObj, + Arguments: []Value{arg}, + }) + + if res != _null { + if _, ok := res.(*Object); !ok { + panic(r.NewTypeError("RegExp exec method returned something other than an Object or null")) + } + } + + return res +} + +func (r *Runtime) getGlobalRegexpMatches(rxObj *Object, s valueString) []Value { + fullUnicode := nilSafe(rxObj.self.getStr("unicode", nil)).ToBoolean() + rxObj.self.setOwnStr("lastIndex", intToValue(0), true) + execFn, ok := r.toObject(rxObj.self.getStr("exec", nil)).self.assertCallable() + if !ok { + panic(r.NewTypeError("exec is not a function")) + } + var a []Value + for { + res := r.regExpExec(execFn, rxObj, s) + if res == _null { + break + } + a = append(a, res) + matchStr := nilSafe(r.toObject(res).self.getIdx(valueInt(0), nil)).toString() + if matchStr.length() == 0 { + thisIndex := toLength(rxObj.self.getStr("lastIndex", nil)) + rxObj.self.setOwnStr("lastIndex", valueInt(advanceStringIndex64(s, thisIndex, fullUnicode)), true) + } + } + + return a +} + +func (r *Runtime) regexpproto_stdMatcherGeneric(rxObj *Object, s valueString) Value { + rx := rxObj.self + global := rx.getStr("global", nil) + if global != nil && global.ToBoolean() { + a := r.getGlobalRegexpMatches(rxObj, s) + if len(a) == 0 { + return _null + } + ar := make([]Value, 0, len(a)) + for _, result := range a { + obj := r.toObject(result) + matchStr := nilSafe(obj.self.getIdx(valueInt(0), nil)).ToString() + ar = append(ar, matchStr) + } + return r.newArrayValues(ar) + } + + execFn, ok := r.toObject(rx.getStr("exec", nil)).self.assertCallable() + if !ok { + panic(r.NewTypeError("exec is not a function")) + } + + return r.regExpExec(execFn, rxObj, s) +} + +func (r *Runtime) checkStdRegexp(rxObj *Object) *regexpObject { + if deoptimiseRegexp { + return nil + } + + rx, ok := rxObj.self.(*regexpObject) + if !ok { + return nil + } + + if !rx.standard || rx.prototype == nil || rx.prototype.self != r.global.stdRegexpProto { + return nil + } + + return rx +} + +func (r *Runtime) regexpproto_stdMatcher(call FunctionCall) Value { + thisObj := r.toObject(call.This) + s := call.Argument(0).toString() + rx := r.checkStdRegexp(thisObj) + if rx == nil { + return r.regexpproto_stdMatcherGeneric(thisObj, s) + } + if rx.pattern.global { + res := rx.pattern.findAllSubmatchIndex(s, 0, -1, rx.pattern.sticky) + if len(res) == 0 { + rx.setOwnStr("lastIndex", intToValue(0), true) + return _null + } + a := make([]Value, 0, len(res)) + for _, result := range res { + a = append(a, s.substring(result[0], result[1])) + } + rx.setOwnStr("lastIndex", intToValue(int64(res[len(res)-1][1])), true) + return r.newArrayValues(a) + } else { + return rx.exec(s) + } +} + +func (r *Runtime) regexpproto_stdSearchGeneric(rxObj *Object, arg valueString) Value { + rx := rxObj.self + previousLastIndex := nilSafe(rx.getStr("lastIndex", nil)) + zero := intToValue(0) + if !previousLastIndex.SameAs(zero) { + rx.setOwnStr("lastIndex", zero, true) + } + execFn, ok := r.toObject(rx.getStr("exec", nil)).self.assertCallable() + if !ok { + panic(r.NewTypeError("exec is not a function")) + } + + result := r.regExpExec(execFn, rxObj, arg) + currentLastIndex := nilSafe(rx.getStr("lastIndex", nil)) + if !currentLastIndex.SameAs(previousLastIndex) { + rx.setOwnStr("lastIndex", previousLastIndex, true) + } + + if result == _null { + return intToValue(-1) + } + + return r.toObject(result).self.getStr("index", nil) +} + +func (r *Runtime) regexpproto_stdMatcherAll(call FunctionCall) Value { + thisObj := r.toObject(call.This) + s := call.Argument(0).toString() + flags := nilSafe(thisObj.self.getStr("flags", nil)).toString() + c := r.speciesConstructorObj(call.This.(*Object), r.global.RegExp) + matcher := r.toConstructor(c)([]Value{call.This, flags}, nil) + matcher.self.setOwnStr("lastIndex", valueInt(toLength(thisObj.self.getStr("lastIndex", nil))), true) + flagsStr := flags.String() + global := strings.Contains(flagsStr, "g") + fullUnicode := strings.Contains(flagsStr, "u") + return r.createRegExpStringIterator(matcher, s, global, fullUnicode) +} + +func (r *Runtime) createRegExpStringIterator(matcher *Object, s valueString, global, fullUnicode bool) Value { + o := &Object{runtime: r} + + ri := ®ExpStringIterObject{ + matcher: matcher, + s: s, + global: global, + fullUnicode: fullUnicode, + } + ri.class = classRegExpStringIterator + ri.val = o + ri.extensible = true + o.self = ri + ri.prototype = r.global.RegExpStringIteratorPrototype + ri.init() + + return o +} + +type regExpStringIterObject struct { + baseObject + matcher *Object + s valueString + global, fullUnicode, done bool +} + +// RegExpExec as defined in 21.2.5.2.1 +func regExpExec(r *Object, s valueString) Value { + exec := r.self.getStr("exec", nil) + if execObject, ok := exec.(*Object); ok { + if execFn, ok := execObject.self.assertCallable(); ok { + return r.runtime.regExpExec(execFn, r, s) + } + } + if rx, ok := r.self.(*regexpObject); ok { + return rx.exec(s) + } + panic(r.runtime.NewTypeError("no RegExpMatcher internal slot")) +} + +func (ri *regExpStringIterObject) next() (v Value) { + if ri.done { + return ri.val.runtime.createIterResultObject(_undefined, true) + } + + match := regExpExec(ri.matcher, ri.s) + if IsNull(match) { + ri.done = true + return ri.val.runtime.createIterResultObject(_undefined, true) + } + if !ri.global { + ri.done = true + return ri.val.runtime.createIterResultObject(match, false) + } + + matchStr := nilSafe(ri.val.runtime.toObject(match).self.getIdx(valueInt(0), nil)).toString() + if matchStr.length() == 0 { + thisIndex := toLength(ri.matcher.self.getStr("lastIndex", nil)) + ri.matcher.self.setOwnStr("lastIndex", valueInt(advanceStringIndex64(ri.s, thisIndex, ri.fullUnicode)), true) + } + return ri.val.runtime.createIterResultObject(match, false) +} + +func (r *Runtime) regexpproto_stdSearch(call FunctionCall) Value { + thisObj := r.toObject(call.This) + s := call.Argument(0).toString() + rx := r.checkStdRegexp(thisObj) + if rx == nil { + return r.regexpproto_stdSearchGeneric(thisObj, s) + } + + previousLastIndex := rx.getStr("lastIndex", nil) + rx.setOwnStr("lastIndex", intToValue(0), true) + + match, result := rx.execRegexp(s) + rx.setOwnStr("lastIndex", previousLastIndex, true) + + if !match { + return intToValue(-1) + } + return intToValue(int64(result[0])) +} + +func (r *Runtime) regexpproto_stdSplitterGeneric(splitter *Object, s valueString, limit Value, unicodeMatching bool) Value { + var a []Value + var lim int64 + if limit == nil || limit == _undefined { + lim = maxInt - 1 + } else { + lim = toLength(limit) + } + if lim == 0 { + return r.newArrayValues(a) + } + size := s.length() + p := 0 + execFn := toMethod(splitter.ToObject(r).self.getStr("exec", nil)) // must be non-nil + + if size == 0 { + if r.regExpExec(execFn, splitter, s) == _null { + a = append(a, s) + } + return r.newArrayValues(a) + } + + q := p + for q < size { + splitter.self.setOwnStr("lastIndex", intToValue(int64(q)), true) + z := r.regExpExec(execFn, splitter, s) + if z == _null { + q = advanceStringIndex(s, q, unicodeMatching) + } else { + z := r.toObject(z) + e := toLength(splitter.self.getStr("lastIndex", nil)) + if e == int64(p) { + q = advanceStringIndex(s, q, unicodeMatching) + } else { + a = append(a, s.substring(p, q)) + if int64(len(a)) == lim { + return r.newArrayValues(a) + } + if e > int64(size) { + p = size + } else { + p = int(e) + } + numberOfCaptures := max(toLength(z.self.getStr("length", nil))-1, 0) + for i := int64(1); i <= numberOfCaptures; i++ { + a = append(a, z.self.getIdx(valueInt(i), nil)) + if int64(len(a)) == lim { + return r.newArrayValues(a) + } + } + q = p + } + } + } + a = append(a, s.substring(p, size)) + return r.newArrayValues(a) +} + +func advanceStringIndex(s valueString, pos int, unicode bool) int { + next := pos + 1 + if !unicode { + return next + } + l := s.length() + if next >= l { + return next + } + if !isUTF16FirstSurrogate(s.charAt(pos)) { + return next + } + if !isUTF16SecondSurrogate(s.charAt(next)) { + return next + } + return next + 1 +} + +func advanceStringIndex64(s valueString, pos int64, unicode bool) int64 { + next := pos + 1 + if !unicode { + return next + } + l := int64(s.length()) + if next >= l { + return next + } + if !isUTF16FirstSurrogate(s.charAt(int(pos))) { + return next + } + if !isUTF16SecondSurrogate(s.charAt(int(next))) { + return next + } + return next + 1 +} + +func (r *Runtime) regexpproto_stdSplitter(call FunctionCall) Value { + rxObj := r.toObject(call.This) + s := call.Argument(0).toString() + limitValue := call.Argument(1) + var splitter *Object + search := r.checkStdRegexp(rxObj) + c := r.speciesConstructorObj(rxObj, r.global.RegExp) + if search == nil || c != r.global.RegExp { + flags := nilSafe(rxObj.self.getStr("flags", nil)).toString() + flagsStr := flags.String() + + // Add 'y' flag if missing + if !strings.Contains(flagsStr, "y") { + flags = flags.concat(asciiString("y")) + } + splitter = r.toConstructor(c)([]Value{rxObj, flags}, nil) + search = r.checkStdRegexp(splitter) + if search == nil { + return r.regexpproto_stdSplitterGeneric(splitter, s, limitValue, strings.Contains(flagsStr, "u")) + } + } + + pattern := search.pattern // toUint32() may recompile the pattern, but we still need to use the original + limit := -1 + if limitValue != _undefined { + limit = int(toUint32(limitValue)) + } + + if limit == 0 { + return r.newArrayValues(nil) + } + + targetLength := s.length() + var valueArray []Value + lastIndex := 0 + found := 0 + + result := pattern.findAllSubmatchIndex(s, 0, -1, false) + if targetLength == 0 { + if result == nil { + valueArray = append(valueArray, s) + } + goto RETURN + } + + for _, match := range result { + if match[0] == match[1] { + // FIXME Ugh, this is a hack + if match[0] == 0 || match[0] == targetLength { + continue + } + } + + if lastIndex != match[0] { + valueArray = append(valueArray, s.substring(lastIndex, match[0])) + found++ + } else if lastIndex == match[0] { + if lastIndex != -1 { + valueArray = append(valueArray, stringEmpty) + found++ + } + } + + lastIndex = match[1] + if found == limit { + goto RETURN + } + + captureCount := len(match) / 2 + for index := 1; index < captureCount; index++ { + offset := index * 2 + var value Value + if match[offset] != -1 { + value = s.substring(match[offset], match[offset+1]) + } else { + value = _undefined + } + valueArray = append(valueArray, value) + found++ + if found == limit { + goto RETURN + } + } + } + + if found != limit { + if lastIndex != targetLength { + valueArray = append(valueArray, s.substring(lastIndex, targetLength)) + } else { + valueArray = append(valueArray, stringEmpty) + } + } + +RETURN: + return r.newArrayValues(valueArray) +} + +func (r *Runtime) regexpproto_stdReplacerGeneric(rxObj *Object, s, replaceStr valueString, rcall func(FunctionCall) Value) Value { + var results []Value + if nilSafe(rxObj.self.getStr("global", nil)).ToBoolean() { + results = r.getGlobalRegexpMatches(rxObj, s) + } else { + execFn := toMethod(rxObj.self.getStr("exec", nil)) // must be non-nil + result := r.regExpExec(execFn, rxObj, s) + if result != _null { + results = append(results, result) + } + } + lengthS := s.length() + nextSourcePosition := 0 + var resultBuf valueStringBuilder + for _, result := range results { + obj := r.toObject(result) + nCaptures := max(toLength(obj.self.getStr("length", nil))-1, 0) + matched := nilSafe(obj.self.getIdx(valueInt(0), nil)).toString() + matchLength := matched.length() + position := toIntStrict(max(min(nilSafe(obj.self.getStr("index", nil)).ToInteger(), int64(lengthS)), 0)) + var captures []Value + if rcall != nil { + captures = make([]Value, 0, nCaptures+3) + } else { + captures = make([]Value, 0, nCaptures+1) + } + captures = append(captures, matched) + for n := int64(1); n <= nCaptures; n++ { + capN := nilSafe(obj.self.getIdx(valueInt(n), nil)) + if capN != _undefined { + capN = capN.ToString() + } + captures = append(captures, capN) + } + var replacement valueString + if rcall != nil { + captures = append(captures, intToValue(int64(position)), s) + replacement = rcall(FunctionCall{ + This: _undefined, + Arguments: captures, + }).toString() + if position >= nextSourcePosition { + resultBuf.WriteString(s.substring(nextSourcePosition, position)) + resultBuf.WriteString(replacement) + nextSourcePosition = position + matchLength + } + } else { + if position >= nextSourcePosition { + resultBuf.WriteString(s.substring(nextSourcePosition, position)) + writeSubstitution(s, position, len(captures), func(idx int) valueString { + capture := captures[idx] + if capture != _undefined { + return capture.toString() + } + return stringEmpty + }, replaceStr, &resultBuf) + nextSourcePosition = position + matchLength + } + } + } + if nextSourcePosition < lengthS { + resultBuf.WriteString(s.substring(nextSourcePosition, lengthS)) + } + return resultBuf.String() +} + +func writeSubstitution(s valueString, position int, numCaptures int, getCapture func(int) valueString, replaceStr valueString, buf *valueStringBuilder) { + l := s.length() + rl := replaceStr.length() + matched := getCapture(0) + tailPos := position + matched.length() + + for i := 0; i < rl; i++ { + c := replaceStr.charAt(i) + if c == '$' && i < rl-1 { + ch := replaceStr.charAt(i + 1) + switch ch { + case '$': + buf.WriteRune('$') + case '`': + buf.WriteString(s.substring(0, position)) + case '\'': + if tailPos < l { + buf.WriteString(s.substring(tailPos, l)) + } + case '&': + buf.WriteString(matched) + default: + matchNumber := 0 + j := i + 1 + for j < rl { + ch := replaceStr.charAt(j) + if ch >= '0' && ch <= '9' { + m := matchNumber*10 + int(ch-'0') + if m >= numCaptures { + break + } + matchNumber = m + j++ + } else { + break + } + } + if matchNumber > 0 { + buf.WriteString(getCapture(matchNumber)) + i = j - 1 + continue + } else { + buf.WriteRune('$') + buf.WriteRune(ch) + } + } + i++ + } else { + buf.WriteRune(c) + } + } +} + +func (r *Runtime) regexpproto_stdReplacer(call FunctionCall) Value { + rxObj := r.toObject(call.This) + s := call.Argument(0).toString() + replaceStr, rcall := getReplaceValue(call.Argument(1)) + + rx := r.checkStdRegexp(rxObj) + if rx == nil { + return r.regexpproto_stdReplacerGeneric(rxObj, s, replaceStr, rcall) + } + + var index int64 + find := 1 + if rx.pattern.global { + find = -1 + rx.setOwnStr("lastIndex", intToValue(0), true) + } else { + index = rx.getLastIndex() + } + found := rx.pattern.findAllSubmatchIndex(s, toIntStrict(index), find, rx.pattern.sticky) + if len(found) > 0 { + if !rx.updateLastIndex(index, found[0], found[len(found)-1]) { + found = nil + } + } else { + rx.updateLastIndex(index, nil, nil) + } + + return stringReplace(s, found, replaceStr, rcall) +} + +func (r *Runtime) regExpStringIteratorProto_next(call FunctionCall) Value { + thisObj := r.toObject(call.This) + if iter, ok := thisObj.self.(*regExpStringIterObject); ok { + return iter.next() + } + panic(r.NewTypeError("Method RegExp String Iterator.prototype.next called on incompatible receiver %s", thisObj.String())) +} + +func (r *Runtime) createRegExpStringIteratorPrototype(val *Object) objectImpl { + o := newBaseObjectObj(val, r.global.IteratorPrototype, classObject) + + o._putProp("next", r.newNativeFunc(r.regExpStringIteratorProto_next, nil, "next", nil, 0), true, false, true) + o._putSym(SymToStringTag, valueProp(asciiString(classRegExpStringIterator), false, false, true)) + + return o +} + +func (r *Runtime) initRegExp() { + o := r.newGuardedObject(r.global.ObjectPrototype, classObject) + r.global.RegExpPrototype = o.val + r.global.stdRegexpProto = o + r.global.RegExpStringIteratorPrototype = r.newLazyObject(r.createRegExpStringIteratorPrototype) + + o._putProp("compile", r.newNativeFunc(r.regexpproto_compile, nil, "compile", nil, 2), true, false, true) + o._putProp("exec", r.newNativeFunc(r.regexpproto_exec, nil, "exec", nil, 1), true, false, true) + o._putProp("test", r.newNativeFunc(r.regexpproto_test, nil, "test", nil, 1), true, false, true) + o._putProp("toString", r.newNativeFunc(r.regexpproto_toString, nil, "toString", nil, 0), true, false, true) + o.setOwnStr("source", &valueProperty{ + configurable: true, + getterFunc: r.newNativeFunc(r.regexpproto_getSource, nil, "get source", nil, 0), + accessor: true, + }, false) + o.setOwnStr("global", &valueProperty{ + configurable: true, + getterFunc: r.newNativeFunc(r.regexpproto_getGlobal, nil, "get global", nil, 0), + accessor: true, + }, false) + o.setOwnStr("multiline", &valueProperty{ + configurable: true, + getterFunc: r.newNativeFunc(r.regexpproto_getMultiline, nil, "get multiline", nil, 0), + accessor: true, + }, false) + o.setOwnStr("ignoreCase", &valueProperty{ + configurable: true, + getterFunc: r.newNativeFunc(r.regexpproto_getIgnoreCase, nil, "get ignoreCase", nil, 0), + accessor: true, + }, false) + o.setOwnStr("unicode", &valueProperty{ + configurable: true, + getterFunc: r.newNativeFunc(r.regexpproto_getUnicode, nil, "get unicode", nil, 0), + accessor: true, + }, false) + o.setOwnStr("sticky", &valueProperty{ + configurable: true, + getterFunc: r.newNativeFunc(r.regexpproto_getSticky, nil, "get sticky", nil, 0), + accessor: true, + }, false) + o.setOwnStr("flags", &valueProperty{ + configurable: true, + getterFunc: r.newNativeFunc(r.regexpproto_getFlags, nil, "get flags", nil, 0), + accessor: true, + }, false) + + o._putSym(SymMatch, valueProp(r.newNativeFunc(r.regexpproto_stdMatcher, nil, "[Symbol.match]", nil, 1), true, false, true)) + o._putSym(SymMatchAll, valueProp(r.newNativeFunc(r.regexpproto_stdMatcherAll, nil, "[Symbol.matchAll]", nil, 1), true, false, true)) + o._putSym(SymSearch, valueProp(r.newNativeFunc(r.regexpproto_stdSearch, nil, "[Symbol.search]", nil, 1), true, false, true)) + o._putSym(SymSplit, valueProp(r.newNativeFunc(r.regexpproto_stdSplitter, nil, "[Symbol.split]", nil, 2), true, false, true)) + o._putSym(SymReplace, valueProp(r.newNativeFunc(r.regexpproto_stdReplacer, nil, "[Symbol.replace]", nil, 2), true, false, true)) + o.guard("exec", "global", "multiline", "ignoreCase", "unicode", "sticky") + + r.global.RegExp = r.newNativeFunc(r.builtin_RegExp, r.builtin_newRegExp, "RegExp", r.global.RegExpPrototype, 2) + rx := r.global.RegExp.self + rx._putSym(SymSpecies, &valueProperty{ + getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0), + accessor: true, + configurable: true, + }) + r.addToGlobal("RegExp", r.global.RegExp) +} diff --git a/vendor/github.com/dop251/goja/builtin_set.go b/vendor/github.com/dop251/goja/builtin_set.go new file mode 100644 index 0000000000..4a26540582 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_set.go @@ -0,0 +1,246 @@ +package goja + +type setObject struct { + baseObject + m *orderedMap +} + +type setIterObject struct { + baseObject + iter *orderedMapIter + kind iterationKind +} + +func (o *setIterObject) next() Value { + if o.iter == nil { + return o.val.runtime.createIterResultObject(_undefined, true) + } + + entry := o.iter.next() + if entry == nil { + o.iter = nil + return o.val.runtime.createIterResultObject(_undefined, true) + } + + var result Value + switch o.kind { + case iterationKindValue: + result = entry.key + default: + result = o.val.runtime.newArrayValues([]Value{entry.key, entry.key}) + } + + return o.val.runtime.createIterResultObject(result, false) +} + +func (so *setObject) init() { + so.baseObject.init() + so.m = newOrderedMap(so.val.runtime.getHash()) +} + +func (r *Runtime) setProto_add(call FunctionCall) Value { + thisObj := r.toObject(call.This) + so, ok := thisObj.self.(*setObject) + if !ok { + panic(r.NewTypeError("Method Set.prototype.add called on incompatible receiver %s", thisObj.String())) + } + + so.m.set(call.Argument(0), nil) + return call.This +} + +func (r *Runtime) setProto_clear(call FunctionCall) Value { + thisObj := r.toObject(call.This) + so, ok := thisObj.self.(*setObject) + if !ok { + panic(r.NewTypeError("Method Set.prototype.clear called on incompatible receiver %s", thisObj.String())) + } + + so.m.clear() + return _undefined +} + +func (r *Runtime) setProto_delete(call FunctionCall) Value { + thisObj := r.toObject(call.This) + so, ok := thisObj.self.(*setObject) + if !ok { + panic(r.NewTypeError("Method Set.prototype.delete called on incompatible receiver %s", thisObj.String())) + } + + return r.toBoolean(so.m.remove(call.Argument(0))) +} + +func (r *Runtime) setProto_entries(call FunctionCall) Value { + return r.createSetIterator(call.This, iterationKindKeyValue) +} + +func (r *Runtime) setProto_forEach(call FunctionCall) Value { + thisObj := r.toObject(call.This) + so, ok := thisObj.self.(*setObject) + if !ok { + panic(r.NewTypeError("Method Set.prototype.forEach called on incompatible receiver %s", thisObj.String())) + } + callbackFn, ok := r.toObject(call.Argument(0)).self.assertCallable() + if !ok { + panic(r.NewTypeError("object is not a function %s")) + } + t := call.Argument(1) + iter := so.m.newIter() + for { + entry := iter.next() + if entry == nil { + break + } + callbackFn(FunctionCall{This: t, Arguments: []Value{entry.key, entry.key, thisObj}}) + } + + return _undefined +} + +func (r *Runtime) setProto_has(call FunctionCall) Value { + thisObj := r.toObject(call.This) + so, ok := thisObj.self.(*setObject) + if !ok { + panic(r.NewTypeError("Method Set.prototype.has called on incompatible receiver %s", thisObj.String())) + } + + return r.toBoolean(so.m.has(call.Argument(0))) +} + +func (r *Runtime) setProto_getSize(call FunctionCall) Value { + thisObj := r.toObject(call.This) + so, ok := thisObj.self.(*setObject) + if !ok { + panic(r.NewTypeError("Method get Set.prototype.size called on incompatible receiver %s", thisObj.String())) + } + + return intToValue(int64(so.m.size)) +} + +func (r *Runtime) setProto_values(call FunctionCall) Value { + return r.createSetIterator(call.This, iterationKindValue) +} + +func (r *Runtime) builtin_newSet(args []Value, newTarget *Object) *Object { + if newTarget == nil { + panic(r.needNew("Set")) + } + proto := r.getPrototypeFromCtor(newTarget, r.global.Set, r.global.SetPrototype) + o := &Object{runtime: r} + + so := &setObject{} + so.class = classSet + so.val = o + so.extensible = true + o.self = so + so.prototype = proto + so.init() + if len(args) > 0 { + if arg := args[0]; arg != nil && arg != _undefined && arg != _null { + adder := so.getStr("add", nil) + iter := r.getIterator(arg, nil) + if adder == r.global.setAdder { + r.iterate(iter, func(item Value) { + so.m.set(item, nil) + }) + } else { + adderFn := toMethod(adder) + if adderFn == nil { + panic(r.NewTypeError("Set.add in missing")) + } + r.iterate(iter, func(item Value) { + adderFn(FunctionCall{This: o, Arguments: []Value{item}}) + }) + } + } + } + return o +} + +func (r *Runtime) createSetIterator(setValue Value, kind iterationKind) Value { + obj := r.toObject(setValue) + setObj, ok := obj.self.(*setObject) + if !ok { + panic(r.NewTypeError("Object is not a Set")) + } + + o := &Object{runtime: r} + + si := &setIterObject{ + iter: setObj.m.newIter(), + kind: kind, + } + si.class = classSetIterator + si.val = o + si.extensible = true + o.self = si + si.prototype = r.global.SetIteratorPrototype + si.init() + + return o +} + +func (r *Runtime) setIterProto_next(call FunctionCall) Value { + thisObj := r.toObject(call.This) + if iter, ok := thisObj.self.(*setIterObject); ok { + return iter.next() + } + panic(r.NewTypeError("Method Set Iterator.prototype.next called on incompatible receiver %s", thisObj.String())) +} + +func (r *Runtime) createSetProto(val *Object) objectImpl { + o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject) + + o._putProp("constructor", r.global.Set, true, false, true) + r.global.setAdder = r.newNativeFunc(r.setProto_add, nil, "add", nil, 1) + o._putProp("add", r.global.setAdder, true, false, true) + + o._putProp("clear", r.newNativeFunc(r.setProto_clear, nil, "clear", nil, 0), true, false, true) + o._putProp("delete", r.newNativeFunc(r.setProto_delete, nil, "delete", nil, 1), true, false, true) + o._putProp("forEach", r.newNativeFunc(r.setProto_forEach, nil, "forEach", nil, 1), true, false, true) + o._putProp("has", r.newNativeFunc(r.setProto_has, nil, "has", nil, 1), true, false, true) + o.setOwnStr("size", &valueProperty{ + getterFunc: r.newNativeFunc(r.setProto_getSize, nil, "get size", nil, 0), + accessor: true, + writable: true, + configurable: true, + }, true) + + valuesFunc := r.newNativeFunc(r.setProto_values, nil, "values", nil, 0) + o._putProp("values", valuesFunc, true, false, true) + o._putProp("keys", valuesFunc, true, false, true) + o._putProp("entries", r.newNativeFunc(r.setProto_entries, nil, "entries", nil, 0), true, false, true) + o._putSym(SymIterator, valueProp(valuesFunc, true, false, true)) + o._putSym(SymToStringTag, valueProp(asciiString(classSet), false, false, true)) + + return o +} + +func (r *Runtime) createSet(val *Object) objectImpl { + o := r.newNativeConstructOnly(val, r.builtin_newSet, r.global.SetPrototype, "Set", 0) + o._putSym(SymSpecies, &valueProperty{ + getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0), + accessor: true, + configurable: true, + }) + + return o +} + +func (r *Runtime) createSetIterProto(val *Object) objectImpl { + o := newBaseObjectObj(val, r.global.IteratorPrototype, classObject) + + o._putProp("next", r.newNativeFunc(r.setIterProto_next, nil, "next", nil, 0), true, false, true) + o._putSym(SymToStringTag, valueProp(asciiString(classSetIterator), false, false, true)) + + return o +} + +func (r *Runtime) initSet() { + r.global.SetIteratorPrototype = r.newLazyObject(r.createSetIterProto) + + r.global.SetPrototype = r.newLazyObject(r.createSetProto) + r.global.Set = r.newLazyObject(r.createSet) + + r.addToGlobal("Set", r.global.Set) +} diff --git a/vendor/github.com/dop251/goja/builtin_string.go b/vendor/github.com/dop251/goja/builtin_string.go new file mode 100644 index 0000000000..63554ff361 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_string.go @@ -0,0 +1,999 @@ +package goja + +import ( + "github.com/dop251/goja/unistring" + "math" + "strings" + "unicode/utf16" + "unicode/utf8" + + "github.com/dop251/goja/parser" + "golang.org/x/text/collate" + "golang.org/x/text/language" + "golang.org/x/text/unicode/norm" +) + +func (r *Runtime) collator() *collate.Collator { + collator := r._collator + if collator == nil { + collator = collate.New(language.Und) + r._collator = collator + } + return collator +} + +func toString(arg Value) valueString { + if s, ok := arg.(valueString); ok { + return s + } + if s, ok := arg.(*Symbol); ok { + return s.descriptiveString() + } + return arg.toString() +} + +func (r *Runtime) builtin_String(call FunctionCall) Value { + if len(call.Arguments) > 0 { + return toString(call.Arguments[0]) + } else { + return stringEmpty + } +} + +func (r *Runtime) _newString(s valueString, proto *Object) *Object { + v := &Object{runtime: r} + + o := &stringObject{} + o.class = classString + o.val = v + o.extensible = true + v.self = o + o.prototype = proto + if s != nil { + o.value = s + } + o.init() + return v +} + +func (r *Runtime) builtin_newString(args []Value, proto *Object) *Object { + var s valueString + if len(args) > 0 { + s = args[0].toString() + } else { + s = stringEmpty + } + return r._newString(s, proto) +} + +func (r *Runtime) stringproto_toStringValueOf(this Value, funcName string) Value { + if str, ok := this.(valueString); ok { + return str + } + if obj, ok := this.(*Object); ok { + if strObj, ok := obj.self.(*stringObject); ok { + return strObj.value + } + } + r.typeErrorResult(true, "String.prototype.%s is called on incompatible receiver", funcName) + return nil +} + +func (r *Runtime) stringproto_toString(call FunctionCall) Value { + return r.stringproto_toStringValueOf(call.This, "toString") +} + +func (r *Runtime) stringproto_valueOf(call FunctionCall) Value { + return r.stringproto_toStringValueOf(call.This, "valueOf") +} + +func (r *Runtime) stringproto_iterator(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + return r.createStringIterator(call.This.toString()) +} + +func (r *Runtime) string_fromcharcode(call FunctionCall) Value { + b := make([]byte, len(call.Arguments)) + for i, arg := range call.Arguments { + chr := toUint16(arg) + if chr >= utf8.RuneSelf { + bb := make([]uint16, len(call.Arguments)+1) + bb[0] = unistring.BOM + bb1 := bb[1:] + for j := 0; j < i; j++ { + bb1[j] = uint16(b[j]) + } + bb1[i] = chr + i++ + for j, arg := range call.Arguments[i:] { + bb1[i+j] = toUint16(arg) + } + return unicodeString(bb) + } + b[i] = byte(chr) + } + + return asciiString(b) +} + +func (r *Runtime) string_fromcodepoint(call FunctionCall) Value { + var sb valueStringBuilder + for _, arg := range call.Arguments { + num := arg.ToNumber() + var c rune + if numInt, ok := num.(valueInt); ok { + if numInt < 0 || numInt > utf8.MaxRune { + panic(r.newError(r.global.RangeError, "Invalid code point %d", numInt)) + } + c = rune(numInt) + } else { + panic(r.newError(r.global.RangeError, "Invalid code point %s", num)) + } + sb.WriteRune(c) + } + return sb.String() +} + +func (r *Runtime) string_raw(call FunctionCall) Value { + cooked := call.Argument(0).ToObject(r) + raw := nilSafe(cooked.self.getStr("raw", nil)).ToObject(r) + literalSegments := toLength(raw.self.getStr("length", nil)) + if literalSegments <= 0 { + return stringEmpty + } + var stringElements valueStringBuilder + nextIndex := int64(0) + numberOfSubstitutions := int64(len(call.Arguments) - 1) + for { + nextSeg := nilSafe(raw.self.getIdx(valueInt(nextIndex), nil)).toString() + stringElements.WriteString(nextSeg) + if nextIndex+1 == literalSegments { + return stringElements.String() + } + if nextIndex < numberOfSubstitutions { + stringElements.WriteString(nilSafe(call.Arguments[nextIndex+1]).toString()) + } + nextIndex++ + } +} + +func (r *Runtime) stringproto_charAt(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + pos := call.Argument(0).ToInteger() + if pos < 0 || pos >= int64(s.length()) { + return stringEmpty + } + return newStringValue(string(s.charAt(toIntStrict(pos)))) +} + +func (r *Runtime) stringproto_charCodeAt(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + pos := call.Argument(0).ToInteger() + if pos < 0 || pos >= int64(s.length()) { + return _NaN + } + return intToValue(int64(s.charAt(toIntStrict(pos)) & 0xFFFF)) +} + +func (r *Runtime) stringproto_codePointAt(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + p := call.Argument(0).ToInteger() + size := s.length() + if p < 0 || p >= int64(size) { + return _undefined + } + pos := toIntStrict(p) + first := s.charAt(pos) + if isUTF16FirstSurrogate(first) { + pos++ + if pos < size { + second := s.charAt(pos) + if isUTF16SecondSurrogate(second) { + return intToValue(int64(utf16.DecodeRune(first, second))) + } + } + } + return intToValue(int64(first & 0xFFFF)) +} + +func (r *Runtime) stringproto_concat(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + strs := make([]valueString, len(call.Arguments)+1) + strs[0] = call.This.toString() + _, allAscii := strs[0].(asciiString) + totalLen := strs[0].length() + for i, arg := range call.Arguments { + s := arg.toString() + if allAscii { + _, allAscii = s.(asciiString) + } + strs[i+1] = s + totalLen += s.length() + } + + if allAscii { + var buf strings.Builder + buf.Grow(totalLen) + for _, s := range strs { + buf.WriteString(s.String()) + } + return asciiString(buf.String()) + } else { + buf := make([]uint16, totalLen+1) + buf[0] = unistring.BOM + pos := 1 + for _, s := range strs { + switch s := s.(type) { + case asciiString: + for i := 0; i < len(s); i++ { + buf[pos] = uint16(s[i]) + pos++ + } + case unicodeString: + copy(buf[pos:], s[1:]) + pos += s.length() + } + } + return unicodeString(buf) + } +} + +func (r *Runtime) stringproto_endsWith(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + searchString := call.Argument(0) + if isRegexp(searchString) { + panic(r.NewTypeError("First argument to String.prototype.endsWith must not be a regular expression")) + } + searchStr := searchString.toString() + l := int64(s.length()) + var pos int64 + if posArg := call.Argument(1); posArg != _undefined { + pos = posArg.ToInteger() + } else { + pos = l + } + end := toIntStrict(min(max(pos, 0), l)) + searchLength := searchStr.length() + start := end - searchLength + if start < 0 { + return valueFalse + } + for i := 0; i < searchLength; i++ { + if s.charAt(start+i) != searchStr.charAt(i) { + return valueFalse + } + } + return valueTrue +} + +func (r *Runtime) stringproto_includes(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + searchString := call.Argument(0) + if isRegexp(searchString) { + panic(r.NewTypeError("First argument to String.prototype.includes must not be a regular expression")) + } + searchStr := searchString.toString() + var pos int64 + if posArg := call.Argument(1); posArg != _undefined { + pos = posArg.ToInteger() + } else { + pos = 0 + } + start := toIntStrict(min(max(pos, 0), int64(s.length()))) + if s.index(searchStr, start) != -1 { + return valueTrue + } + return valueFalse +} + +func (r *Runtime) stringproto_indexOf(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + value := call.This.toString() + target := call.Argument(0).toString() + pos := call.Argument(1).ToInteger() + + if pos < 0 { + pos = 0 + } else { + l := int64(value.length()) + if pos > l { + pos = l + } + } + + return intToValue(int64(value.index(target, toIntStrict(pos)))) +} + +func (r *Runtime) stringproto_lastIndexOf(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + value := call.This.toString() + target := call.Argument(0).toString() + numPos := call.Argument(1).ToNumber() + + var pos int64 + if f, ok := numPos.(valueFloat); ok && math.IsNaN(float64(f)) { + pos = int64(value.length()) + } else { + pos = numPos.ToInteger() + if pos < 0 { + pos = 0 + } else { + l := int64(value.length()) + if pos > l { + pos = l + } + } + } + + return intToValue(int64(value.lastIndex(target, toIntStrict(pos)))) +} + +func (r *Runtime) stringproto_localeCompare(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + this := norm.NFD.String(call.This.toString().String()) + that := norm.NFD.String(call.Argument(0).toString().String()) + return intToValue(int64(r.collator().CompareString(this, that))) +} + +func (r *Runtime) stringproto_match(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + regexp := call.Argument(0) + if regexp != _undefined && regexp != _null { + if matcher := toMethod(r.getV(regexp, SymMatch)); matcher != nil { + return matcher(FunctionCall{ + This: regexp, + Arguments: []Value{call.This}, + }) + } + } + + var rx *regexpObject + if regexp, ok := regexp.(*Object); ok { + rx, _ = regexp.self.(*regexpObject) + } + + if rx == nil { + rx = r.newRegExp(regexp, nil, r.global.RegExpPrototype) + } + + if matcher, ok := r.toObject(rx.getSym(SymMatch, nil)).self.assertCallable(); ok { + return matcher(FunctionCall{ + This: rx.val, + Arguments: []Value{call.This.toString()}, + }) + } + + panic(r.NewTypeError("RegExp matcher is not a function")) +} + +func (r *Runtime) stringproto_matchAll(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + regexp := call.Argument(0) + if regexp != _undefined && regexp != _null { + if isRegexp(regexp) { + if o, ok := regexp.(*Object); ok { + flags := o.self.getStr("flags", nil) + r.checkObjectCoercible(flags) + if !strings.Contains(flags.toString().String(), "g") { + panic(r.NewTypeError("RegExp doesn't have global flag set")) + } + } + } + if matcher := toMethod(r.getV(regexp, SymMatchAll)); matcher != nil { + return matcher(FunctionCall{ + This: regexp, + Arguments: []Value{call.This}, + }) + } + } + + rx := r.newRegExp(regexp, asciiString("g"), r.global.RegExpPrototype) + + if matcher, ok := r.toObject(rx.getSym(SymMatchAll, nil)).self.assertCallable(); ok { + return matcher(FunctionCall{ + This: rx.val, + Arguments: []Value{call.This.toString()}, + }) + } + + panic(r.NewTypeError("RegExp matcher is not a function")) +} + +func (r *Runtime) stringproto_normalize(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + var form string + if formArg := call.Argument(0); formArg != _undefined { + form = formArg.toString().toString().String() + } else { + form = "NFC" + } + var f norm.Form + switch form { + case "NFC": + f = norm.NFC + case "NFD": + f = norm.NFD + case "NFKC": + f = norm.NFKC + case "NFKD": + f = norm.NFKD + default: + panic(r.newError(r.global.RangeError, "The normalization form should be one of NFC, NFD, NFKC, NFKD")) + } + + if s, ok := s.(unicodeString); ok { + ss := s.String() + return newStringValue(f.String(ss)) + } + + return s +} + +func (r *Runtime) stringproto_padEnd(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + maxLength := toLength(call.Argument(0)) + stringLength := int64(s.length()) + if maxLength <= stringLength { + return s + } + var filler valueString + var fillerASCII bool + if fillString := call.Argument(1); fillString != _undefined { + filler = fillString.toString() + if filler.length() == 0 { + return s + } + _, fillerASCII = filler.(asciiString) + } else { + filler = asciiString(" ") + fillerASCII = true + } + remaining := toIntStrict(maxLength - stringLength) + _, stringASCII := s.(asciiString) + if fillerASCII && stringASCII { + fl := filler.length() + var sb strings.Builder + sb.Grow(toIntStrict(maxLength)) + sb.WriteString(s.String()) + fs := filler.String() + for remaining >= fl { + sb.WriteString(fs) + remaining -= fl + } + if remaining > 0 { + sb.WriteString(fs[:remaining]) + } + return asciiString(sb.String()) + } + var sb unicodeStringBuilder + sb.Grow(toIntStrict(maxLength)) + sb.WriteString(s) + fl := filler.length() + for remaining >= fl { + sb.WriteString(filler) + remaining -= fl + } + if remaining > 0 { + sb.WriteString(filler.substring(0, remaining)) + } + + return sb.String() +} + +func (r *Runtime) stringproto_padStart(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + maxLength := toLength(call.Argument(0)) + stringLength := int64(s.length()) + if maxLength <= stringLength { + return s + } + var filler valueString + var fillerASCII bool + if fillString := call.Argument(1); fillString != _undefined { + filler = fillString.toString() + if filler.length() == 0 { + return s + } + _, fillerASCII = filler.(asciiString) + } else { + filler = asciiString(" ") + fillerASCII = true + } + remaining := toIntStrict(maxLength - stringLength) + _, stringASCII := s.(asciiString) + if fillerASCII && stringASCII { + fl := filler.length() + var sb strings.Builder + sb.Grow(toIntStrict(maxLength)) + fs := filler.String() + for remaining >= fl { + sb.WriteString(fs) + remaining -= fl + } + if remaining > 0 { + sb.WriteString(fs[:remaining]) + } + sb.WriteString(s.String()) + return asciiString(sb.String()) + } + var sb unicodeStringBuilder + sb.Grow(toIntStrict(maxLength)) + fl := filler.length() + for remaining >= fl { + sb.WriteString(filler) + remaining -= fl + } + if remaining > 0 { + sb.WriteString(filler.substring(0, remaining)) + } + sb.WriteString(s) + + return sb.String() +} + +func (r *Runtime) stringproto_repeat(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + n := call.Argument(0).ToNumber() + if n == _positiveInf { + panic(r.newError(r.global.RangeError, "Invalid count value")) + } + numInt := n.ToInteger() + if numInt < 0 { + panic(r.newError(r.global.RangeError, "Invalid count value")) + } + if numInt == 0 || s.length() == 0 { + return stringEmpty + } + num := toIntStrict(numInt) + if s, ok := s.(asciiString); ok { + var sb strings.Builder + sb.Grow(len(s) * num) + for i := 0; i < num; i++ { + sb.WriteString(string(s)) + } + return asciiString(sb.String()) + } + + var sb unicodeStringBuilder + sb.Grow(s.length() * num) + for i := 0; i < num; i++ { + sb.WriteString(s) + } + return sb.String() +} + +func getReplaceValue(replaceValue Value) (str valueString, rcall func(FunctionCall) Value) { + if replaceValue, ok := replaceValue.(*Object); ok { + if c, ok := replaceValue.self.assertCallable(); ok { + rcall = c + return + } + } + str = replaceValue.toString() + return +} + +func stringReplace(s valueString, found [][]int, newstring valueString, rcall func(FunctionCall) Value) Value { + if len(found) == 0 { + return s + } + + var str string + var isASCII bool + if astr, ok := s.(asciiString); ok { + str = string(astr) + isASCII = true + } + + var buf valueStringBuilder + + lastIndex := 0 + lengthS := s.length() + if rcall != nil { + for _, item := range found { + if item[0] != lastIndex { + buf.WriteSubstring(s, lastIndex, item[0]) + } + matchCount := len(item) / 2 + argumentList := make([]Value, matchCount+2) + for index := 0; index < matchCount; index++ { + offset := 2 * index + if item[offset] != -1 { + if isASCII { + argumentList[index] = asciiString(str[item[offset]:item[offset+1]]) + } else { + argumentList[index] = s.substring(item[offset], item[offset+1]) + } + } else { + argumentList[index] = _undefined + } + } + argumentList[matchCount] = valueInt(item[0]) + argumentList[matchCount+1] = s + replacement := rcall(FunctionCall{ + This: _undefined, + Arguments: argumentList, + }).toString() + buf.WriteString(replacement) + lastIndex = item[1] + } + } else { + for _, item := range found { + if item[0] != lastIndex { + buf.WriteString(s.substring(lastIndex, item[0])) + } + matchCount := len(item) / 2 + writeSubstitution(s, item[0], matchCount, func(idx int) valueString { + if item[idx*2] != -1 { + if isASCII { + return asciiString(str[item[idx*2]:item[idx*2+1]]) + } + return s.substring(item[idx*2], item[idx*2+1]) + } + return stringEmpty + }, newstring, &buf) + lastIndex = item[1] + } + } + + if lastIndex != lengthS { + buf.WriteString(s.substring(lastIndex, lengthS)) + } + + return buf.String() +} + +func (r *Runtime) stringproto_replace(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + searchValue := call.Argument(0) + replaceValue := call.Argument(1) + if searchValue != _undefined && searchValue != _null { + if replacer := toMethod(r.getV(searchValue, SymReplace)); replacer != nil { + return replacer(FunctionCall{ + This: searchValue, + Arguments: []Value{call.This, replaceValue}, + }) + } + } + + s := call.This.toString() + var found [][]int + searchStr := searchValue.toString() + pos := s.index(searchStr, 0) + if pos != -1 { + found = append(found, []int{pos, pos + searchStr.length()}) + } + + str, rcall := getReplaceValue(replaceValue) + return stringReplace(s, found, str, rcall) +} + +func (r *Runtime) stringproto_search(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + regexp := call.Argument(0) + if regexp != _undefined && regexp != _null { + if searcher := toMethod(r.getV(regexp, SymSearch)); searcher != nil { + return searcher(FunctionCall{ + This: regexp, + Arguments: []Value{call.This}, + }) + } + } + + var rx *regexpObject + if regexp, ok := regexp.(*Object); ok { + rx, _ = regexp.self.(*regexpObject) + } + + if rx == nil { + rx = r.newRegExp(regexp, nil, r.global.RegExpPrototype) + } + + if searcher, ok := r.toObject(rx.getSym(SymSearch, nil)).self.assertCallable(); ok { + return searcher(FunctionCall{ + This: rx.val, + Arguments: []Value{call.This.toString()}, + }) + } + + panic(r.NewTypeError("RegExp searcher is not a function")) +} + +func (r *Runtime) stringproto_slice(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + + l := int64(s.length()) + start := call.Argument(0).ToInteger() + var end int64 + if arg1 := call.Argument(1); arg1 != _undefined { + end = arg1.ToInteger() + } else { + end = l + } + + if start < 0 { + start += l + if start < 0 { + start = 0 + } + } else { + if start > l { + start = l + } + } + + if end < 0 { + end += l + if end < 0 { + end = 0 + } + } else { + if end > l { + end = l + } + } + + if end > start { + return s.substring(int(start), int(end)) + } + return stringEmpty +} + +func (r *Runtime) stringproto_split(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + separatorValue := call.Argument(0) + limitValue := call.Argument(1) + if separatorValue != _undefined && separatorValue != _null { + if splitter := toMethod(r.getV(separatorValue, SymSplit)); splitter != nil { + return splitter(FunctionCall{ + This: separatorValue, + Arguments: []Value{call.This, limitValue}, + }) + } + } + s := call.This.toString() + + limit := -1 + if limitValue != _undefined { + limit = int(toUint32(limitValue)) + } + + separatorValue = separatorValue.ToString() + + if limit == 0 { + return r.newArrayValues(nil) + } + + if separatorValue == _undefined { + return r.newArrayValues([]Value{s}) + } + + separator := separatorValue.String() + + excess := false + str := s.String() + if limit > len(str) { + limit = len(str) + } + splitLimit := limit + if limit > 0 { + splitLimit = limit + 1 + excess = true + } + + // TODO handle invalid UTF-16 + split := strings.SplitN(str, separator, splitLimit) + + if excess && len(split) > limit { + split = split[:limit] + } + + valueArray := make([]Value, len(split)) + for index, value := range split { + valueArray[index] = newStringValue(value) + } + + return r.newArrayValues(valueArray) +} + +func (r *Runtime) stringproto_startsWith(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + searchString := call.Argument(0) + if isRegexp(searchString) { + panic(r.NewTypeError("First argument to String.prototype.startsWith must not be a regular expression")) + } + searchStr := searchString.toString() + l := int64(s.length()) + var pos int64 + if posArg := call.Argument(1); posArg != _undefined { + pos = posArg.ToInteger() + } + start := toIntStrict(min(max(pos, 0), l)) + searchLength := searchStr.length() + if int64(searchLength+start) > l { + return valueFalse + } + for i := 0; i < searchLength; i++ { + if s.charAt(start+i) != searchStr.charAt(i) { + return valueFalse + } + } + return valueTrue +} + +func (r *Runtime) stringproto_substring(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + + l := int64(s.length()) + intStart := call.Argument(0).ToInteger() + var intEnd int64 + if end := call.Argument(1); end != _undefined { + intEnd = end.ToInteger() + } else { + intEnd = l + } + if intStart < 0 { + intStart = 0 + } else if intStart > l { + intStart = l + } + + if intEnd < 0 { + intEnd = 0 + } else if intEnd > l { + intEnd = l + } + + if intStart > intEnd { + intStart, intEnd = intEnd, intStart + } + + return s.substring(int(intStart), int(intEnd)) +} + +func (r *Runtime) stringproto_toLowerCase(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + + return s.toLower() +} + +func (r *Runtime) stringproto_toUpperCase(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + + return s.toUpper() +} + +func (r *Runtime) stringproto_trim(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + + // TODO handle invalid UTF-16 + return newStringValue(strings.Trim(s.String(), parser.WhitespaceChars)) +} + +func (r *Runtime) stringproto_trimEnd(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + + // TODO handle invalid UTF-16 + return newStringValue(strings.TrimRight(s.String(), parser.WhitespaceChars)) +} + +func (r *Runtime) stringproto_trimStart(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + + // TODO handle invalid UTF-16 + return newStringValue(strings.TrimLeft(s.String(), parser.WhitespaceChars)) +} + +func (r *Runtime) stringproto_substr(call FunctionCall) Value { + r.checkObjectCoercible(call.This) + s := call.This.toString() + start := call.Argument(0).ToInteger() + var length int64 + sl := int64(s.length()) + if arg := call.Argument(1); arg != _undefined { + length = arg.ToInteger() + } else { + length = sl + } + + if start < 0 { + start = max(sl+start, 0) + } + + length = min(max(length, 0), sl-start) + if length <= 0 { + return stringEmpty + } + + return s.substring(int(start), int(start+length)) +} + +func (r *Runtime) stringIterProto_next(call FunctionCall) Value { + thisObj := r.toObject(call.This) + if iter, ok := thisObj.self.(*stringIterObject); ok { + return iter.next() + } + panic(r.NewTypeError("Method String Iterator.prototype.next called on incompatible receiver %s", thisObj.String())) +} + +func (r *Runtime) createStringIterProto(val *Object) objectImpl { + o := newBaseObjectObj(val, r.global.IteratorPrototype, classObject) + + o._putProp("next", r.newNativeFunc(r.stringIterProto_next, nil, "next", nil, 0), true, false, true) + o._putSym(SymToStringTag, valueProp(asciiString(classStringIterator), false, false, true)) + + return o +} + +func (r *Runtime) initString() { + r.global.StringIteratorPrototype = r.newLazyObject(r.createStringIterProto) + r.global.StringPrototype = r.builtin_newString([]Value{stringEmpty}, r.global.ObjectPrototype) + + o := r.global.StringPrototype.self + o._putProp("charAt", r.newNativeFunc(r.stringproto_charAt, nil, "charAt", nil, 1), true, false, true) + o._putProp("charCodeAt", r.newNativeFunc(r.stringproto_charCodeAt, nil, "charCodeAt", nil, 1), true, false, true) + o._putProp("codePointAt", r.newNativeFunc(r.stringproto_codePointAt, nil, "codePointAt", nil, 1), true, false, true) + o._putProp("concat", r.newNativeFunc(r.stringproto_concat, nil, "concat", nil, 1), true, false, true) + o._putProp("endsWith", r.newNativeFunc(r.stringproto_endsWith, nil, "endsWith", nil, 1), true, false, true) + o._putProp("includes", r.newNativeFunc(r.stringproto_includes, nil, "includes", nil, 1), true, false, true) + o._putProp("indexOf", r.newNativeFunc(r.stringproto_indexOf, nil, "indexOf", nil, 1), true, false, true) + o._putProp("lastIndexOf", r.newNativeFunc(r.stringproto_lastIndexOf, nil, "lastIndexOf", nil, 1), true, false, true) + o._putProp("localeCompare", r.newNativeFunc(r.stringproto_localeCompare, nil, "localeCompare", nil, 1), true, false, true) + o._putProp("match", r.newNativeFunc(r.stringproto_match, nil, "match", nil, 1), true, false, true) + o._putProp("matchAll", r.newNativeFunc(r.stringproto_matchAll, nil, "matchAll", nil, 1), true, false, true) + o._putProp("normalize", r.newNativeFunc(r.stringproto_normalize, nil, "normalize", nil, 0), true, false, true) + o._putProp("padEnd", r.newNativeFunc(r.stringproto_padEnd, nil, "padEnd", nil, 1), true, false, true) + o._putProp("padStart", r.newNativeFunc(r.stringproto_padStart, nil, "padStart", nil, 1), true, false, true) + o._putProp("repeat", r.newNativeFunc(r.stringproto_repeat, nil, "repeat", nil, 1), true, false, true) + o._putProp("replace", r.newNativeFunc(r.stringproto_replace, nil, "replace", nil, 2), true, false, true) + o._putProp("search", r.newNativeFunc(r.stringproto_search, nil, "search", nil, 1), true, false, true) + o._putProp("slice", r.newNativeFunc(r.stringproto_slice, nil, "slice", nil, 2), true, false, true) + o._putProp("split", r.newNativeFunc(r.stringproto_split, nil, "split", nil, 2), true, false, true) + o._putProp("startsWith", r.newNativeFunc(r.stringproto_startsWith, nil, "startsWith", nil, 1), true, false, true) + o._putProp("substring", r.newNativeFunc(r.stringproto_substring, nil, "substring", nil, 2), true, false, true) + o._putProp("toLocaleLowerCase", r.newNativeFunc(r.stringproto_toLowerCase, nil, "toLocaleLowerCase", nil, 0), true, false, true) + o._putProp("toLocaleUpperCase", r.newNativeFunc(r.stringproto_toUpperCase, nil, "toLocaleUpperCase", nil, 0), true, false, true) + o._putProp("toLowerCase", r.newNativeFunc(r.stringproto_toLowerCase, nil, "toLowerCase", nil, 0), true, false, true) + o._putProp("toString", r.newNativeFunc(r.stringproto_toString, nil, "toString", nil, 0), true, false, true) + o._putProp("toUpperCase", r.newNativeFunc(r.stringproto_toUpperCase, nil, "toUpperCase", nil, 0), true, false, true) + o._putProp("trim", r.newNativeFunc(r.stringproto_trim, nil, "trim", nil, 0), true, false, true) + trimEnd := r.newNativeFunc(r.stringproto_trimEnd, nil, "trimEnd", nil, 0) + trimStart := r.newNativeFunc(r.stringproto_trimStart, nil, "trimStart", nil, 0) + o._putProp("trimEnd", trimEnd, true, false, true) + o._putProp("trimStart", trimStart, true, false, true) + o._putProp("trimRight", trimEnd, true, false, true) + o._putProp("trimLeft", trimStart, true, false, true) + o._putProp("valueOf", r.newNativeFunc(r.stringproto_valueOf, nil, "valueOf", nil, 0), true, false, true) + + o._putSym(SymIterator, valueProp(r.newNativeFunc(r.stringproto_iterator, nil, "[Symbol.iterator]", nil, 0), true, false, true)) + + // Annex B + o._putProp("substr", r.newNativeFunc(r.stringproto_substr, nil, "substr", nil, 2), true, false, true) + + r.global.String = r.newNativeFunc(r.builtin_String, r.builtin_newString, "String", r.global.StringPrototype, 1) + o = r.global.String.self + o._putProp("fromCharCode", r.newNativeFunc(r.string_fromcharcode, nil, "fromCharCode", nil, 1), true, false, true) + o._putProp("fromCodePoint", r.newNativeFunc(r.string_fromcodepoint, nil, "fromCodePoint", nil, 1), true, false, true) + o._putProp("raw", r.newNativeFunc(r.string_raw, nil, "raw", nil, 1), true, false, true) + + r.addToGlobal("String", r.global.String) + + r.stringSingleton = r.builtin_new(r.global.String, nil).self.(*stringObject) +} diff --git a/vendor/github.com/dop251/goja/builtin_symbol.go b/vendor/github.com/dop251/goja/builtin_symbol.go new file mode 100644 index 0000000000..9aa5e771e0 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_symbol.go @@ -0,0 +1,144 @@ +package goja + +import "github.com/dop251/goja/unistring" + +var ( + SymHasInstance = newSymbol(asciiString("Symbol.hasInstance")) + SymIsConcatSpreadable = newSymbol(asciiString("Symbol.isConcatSpreadable")) + SymIterator = newSymbol(asciiString("Symbol.iterator")) + SymMatch = newSymbol(asciiString("Symbol.match")) + SymMatchAll = newSymbol(asciiString("Symbol.matchAll")) + SymReplace = newSymbol(asciiString("Symbol.replace")) + SymSearch = newSymbol(asciiString("Symbol.search")) + SymSpecies = newSymbol(asciiString("Symbol.species")) + SymSplit = newSymbol(asciiString("Symbol.split")) + SymToPrimitive = newSymbol(asciiString("Symbol.toPrimitive")) + SymToStringTag = newSymbol(asciiString("Symbol.toStringTag")) + SymUnscopables = newSymbol(asciiString("Symbol.unscopables")) +) + +func (r *Runtime) builtin_symbol(call FunctionCall) Value { + var desc valueString + if arg := call.Argument(0); !IsUndefined(arg) { + desc = arg.toString() + } else { + desc = stringEmpty + } + return newSymbol(desc) +} + +func (r *Runtime) symbolproto_tostring(call FunctionCall) Value { + sym, ok := call.This.(*Symbol) + if !ok { + if obj, ok := call.This.(*Object); ok { + if v, ok := obj.self.(*primitiveValueObject); ok { + if sym1, ok := v.pValue.(*Symbol); ok { + sym = sym1 + } + } + } + } + if sym == nil { + panic(r.NewTypeError("Method Symbol.prototype.toString is called on incompatible receiver")) + } + return sym.descriptiveString() +} + +func (r *Runtime) symbolproto_valueOf(call FunctionCall) Value { + _, ok := call.This.(*Symbol) + if ok { + return call.This + } + + if obj, ok := call.This.(*Object); ok { + if v, ok := obj.self.(*primitiveValueObject); ok { + if sym, ok := v.pValue.(*Symbol); ok { + return sym + } + } + } + + panic(r.NewTypeError("Symbol.prototype.valueOf requires that 'this' be a Symbol")) +} + +func (r *Runtime) symbol_for(call FunctionCall) Value { + key := call.Argument(0).toString() + keyStr := key.string() + if v := r.symbolRegistry[keyStr]; v != nil { + return v + } + if r.symbolRegistry == nil { + r.symbolRegistry = make(map[unistring.String]*Symbol) + } + v := newSymbol(key) + r.symbolRegistry[keyStr] = v + return v +} + +func (r *Runtime) symbol_keyfor(call FunctionCall) Value { + arg := call.Argument(0) + sym, ok := arg.(*Symbol) + if !ok { + panic(r.NewTypeError("%s is not a symbol", arg.String())) + } + for key, s := range r.symbolRegistry { + if s == sym { + return stringValueFromRaw(key) + } + } + return _undefined +} + +func (r *Runtime) createSymbolProto(val *Object) objectImpl { + o := &baseObject{ + class: classObject, + val: val, + extensible: true, + prototype: r.global.ObjectPrototype, + } + o.init() + + o._putProp("constructor", r.global.Symbol, true, false, true) + o._putProp("toString", r.newNativeFunc(r.symbolproto_tostring, nil, "toString", nil, 0), true, false, true) + o._putProp("valueOf", r.newNativeFunc(r.symbolproto_valueOf, nil, "valueOf", nil, 0), true, false, true) + o._putSym(SymToPrimitive, valueProp(r.newNativeFunc(r.symbolproto_valueOf, nil, "[Symbol.toPrimitive]", nil, 1), false, false, true)) + o._putSym(SymToStringTag, valueProp(newStringValue("Symbol"), false, false, true)) + + return o +} + +func (r *Runtime) createSymbol(val *Object) objectImpl { + o := r.newNativeFuncObj(val, r.builtin_symbol, nil, "Symbol", r.global.SymbolPrototype, 0) + + o._putProp("for", r.newNativeFunc(r.symbol_for, nil, "for", nil, 1), true, false, true) + o._putProp("keyFor", r.newNativeFunc(r.symbol_keyfor, nil, "keyFor", nil, 1), true, false, true) + + for _, s := range []*Symbol{ + SymHasInstance, + SymIsConcatSpreadable, + SymIterator, + SymMatch, + SymMatchAll, + SymReplace, + SymSearch, + SymSpecies, + SymSplit, + SymToPrimitive, + SymToStringTag, + SymUnscopables, + } { + n := s.desc.(asciiString) + n = n[len("Symbol."):] + o._putProp(unistring.String(n), s, false, false, false) + } + + return o +} + +func (r *Runtime) initSymbol() { + r.global.SymbolPrototype = r.newLazyObject(r.createSymbolProto) + + r.global.Symbol = r.newLazyObject(r.createSymbol) + r.addToGlobal("Symbol", r.global.Symbol) + +} diff --git a/vendor/github.com/dop251/goja/builtin_typedarrays.go b/vendor/github.com/dop251/goja/builtin_typedarrays.go new file mode 100644 index 0000000000..0df95418e4 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_typedarrays.go @@ -0,0 +1,1445 @@ +package goja + +import ( + "fmt" + "math" + "sort" + "unsafe" + + "github.com/dop251/goja/unistring" +) + +type typedArraySortCtx struct { + ta *typedArrayObject + compare func(FunctionCall) Value + needValidate bool +} + +func (ctx *typedArraySortCtx) Len() int { + return ctx.ta.length +} + +func (ctx *typedArraySortCtx) Less(i, j int) bool { + if ctx.needValidate { + ctx.ta.viewedArrayBuf.ensureNotDetached(true) + ctx.needValidate = false + } + offset := ctx.ta.offset + if ctx.compare != nil { + x := ctx.ta.typedArray.get(offset + i) + y := ctx.ta.typedArray.get(offset + j) + res := ctx.compare(FunctionCall{ + This: _undefined, + Arguments: []Value{x, y}, + }).ToNumber() + ctx.needValidate = true + if i, ok := res.(valueInt); ok { + return i < 0 + } + f := res.ToFloat() + if f < 0 { + return true + } + if f > 0 { + return false + } + if math.Signbit(f) { + return true + } + return false + } + + return ctx.ta.typedArray.less(offset+i, offset+j) +} + +func (ctx *typedArraySortCtx) Swap(i, j int) { + if ctx.needValidate { + ctx.ta.viewedArrayBuf.ensureNotDetached(true) + ctx.needValidate = false + } + offset := ctx.ta.offset + ctx.ta.typedArray.swap(offset+i, offset+j) +} + +func allocByteSlice(size int) (b []byte) { + defer func() { + if x := recover(); x != nil { + panic(rangeError(fmt.Sprintf("Buffer size is too large: %d", size))) + } + }() + if size < 0 { + panic(rangeError(fmt.Sprintf("Invalid buffer size: %d", size))) + } + b = make([]byte, size) + return +} + +func (r *Runtime) builtin_newArrayBuffer(args []Value, newTarget *Object) *Object { + if newTarget == nil { + panic(r.needNew("ArrayBuffer")) + } + b := r._newArrayBuffer(r.getPrototypeFromCtor(newTarget, r.global.ArrayBuffer, r.global.ArrayBufferPrototype), nil) + if len(args) > 0 { + b.data = allocByteSlice(r.toIndex(args[0])) + } + return b.val +} + +func (r *Runtime) arrayBufferProto_getByteLength(call FunctionCall) Value { + o := r.toObject(call.This) + if b, ok := o.self.(*arrayBufferObject); ok { + if b.ensureNotDetached(false) { + return intToValue(int64(len(b.data))) + } + return intToValue(0) + } + panic(r.NewTypeError("Object is not ArrayBuffer: %s", o)) +} + +func (r *Runtime) arrayBufferProto_slice(call FunctionCall) Value { + o := r.toObject(call.This) + if b, ok := o.self.(*arrayBufferObject); ok { + l := int64(len(b.data)) + start := relToIdx(call.Argument(0).ToInteger(), l) + var stop int64 + if arg := call.Argument(1); arg != _undefined { + stop = arg.ToInteger() + } else { + stop = l + } + stop = relToIdx(stop, l) + newLen := max(stop-start, 0) + ret := r.speciesConstructor(o, r.global.ArrayBuffer)([]Value{intToValue(newLen)}, nil) + if ab, ok := ret.self.(*arrayBufferObject); ok { + if newLen > 0 { + b.ensureNotDetached(true) + if ret == o { + panic(r.NewTypeError("Species constructor returned the same ArrayBuffer")) + } + if int64(len(ab.data)) < newLen { + panic(r.NewTypeError("Species constructor returned an ArrayBuffer that is too small: %d", len(ab.data))) + } + ab.ensureNotDetached(true) + copy(ab.data, b.data[start:stop]) + } + return ret + } + panic(r.NewTypeError("Species constructor did not return an ArrayBuffer: %s", ret.String())) + } + panic(r.NewTypeError("Object is not ArrayBuffer: %s", o)) +} + +func (r *Runtime) arrayBuffer_isView(call FunctionCall) Value { + if o, ok := call.Argument(0).(*Object); ok { + if _, ok := o.self.(*dataViewObject); ok { + return valueTrue + } + if _, ok := o.self.(*typedArrayObject); ok { + return valueTrue + } + } + return valueFalse +} + +func (r *Runtime) newDataView(args []Value, newTarget *Object) *Object { + if newTarget == nil { + panic(r.needNew("DataView")) + } + proto := r.getPrototypeFromCtor(newTarget, r.global.DataView, r.global.DataViewPrototype) + var bufArg Value + if len(args) > 0 { + bufArg = args[0] + } + var buffer *arrayBufferObject + if o, ok := bufArg.(*Object); ok { + if b, ok := o.self.(*arrayBufferObject); ok { + buffer = b + } + } + if buffer == nil { + panic(r.NewTypeError("First argument to DataView constructor must be an ArrayBuffer")) + } + var byteOffset, byteLen int + if len(args) > 1 { + offsetArg := nilSafe(args[1]) + byteOffset = r.toIndex(offsetArg) + buffer.ensureNotDetached(true) + if byteOffset > len(buffer.data) { + panic(r.newError(r.global.RangeError, "Start offset %s is outside the bounds of the buffer", offsetArg.String())) + } + } + if len(args) > 2 && args[2] != nil && args[2] != _undefined { + byteLen = r.toIndex(args[2]) + if byteOffset+byteLen > len(buffer.data) { + panic(r.newError(r.global.RangeError, "Invalid DataView length %d", byteLen)) + } + } else { + byteLen = len(buffer.data) - byteOffset + } + o := &Object{runtime: r} + b := &dataViewObject{ + baseObject: baseObject{ + class: classObject, + val: o, + prototype: proto, + extensible: true, + }, + viewedArrayBuf: buffer, + byteOffset: byteOffset, + byteLen: byteLen, + } + o.self = b + b.init() + return o +} + +func (r *Runtime) dataViewProto_getBuffer(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + return dv.viewedArrayBuf.val + } + panic(r.NewTypeError("Method get DataView.prototype.buffer called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_getByteLen(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + dv.viewedArrayBuf.ensureNotDetached(true) + return intToValue(int64(dv.byteLen)) + } + panic(r.NewTypeError("Method get DataView.prototype.byteLength called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_getByteOffset(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + dv.viewedArrayBuf.ensureNotDetached(true) + return intToValue(int64(dv.byteOffset)) + } + panic(r.NewTypeError("Method get DataView.prototype.byteOffset called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_getFloat32(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + return floatToValue(float64(dv.viewedArrayBuf.getFloat32(dv.getIdxAndByteOrder(call.Argument(0), call.Argument(1), 4)))) + } + panic(r.NewTypeError("Method DataView.prototype.getFloat32 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_getFloat64(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + return floatToValue(dv.viewedArrayBuf.getFloat64(dv.getIdxAndByteOrder(call.Argument(0), call.Argument(1), 8))) + } + panic(r.NewTypeError("Method DataView.prototype.getFloat64 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_getInt8(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + idx, _ := dv.getIdxAndByteOrder(call.Argument(0), call.Argument(1), 1) + return intToValue(int64(dv.viewedArrayBuf.getInt8(idx))) + } + panic(r.NewTypeError("Method DataView.prototype.getInt8 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_getInt16(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + return intToValue(int64(dv.viewedArrayBuf.getInt16(dv.getIdxAndByteOrder(call.Argument(0), call.Argument(1), 2)))) + } + panic(r.NewTypeError("Method DataView.prototype.getInt16 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_getInt32(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + return intToValue(int64(dv.viewedArrayBuf.getInt32(dv.getIdxAndByteOrder(call.Argument(0), call.Argument(1), 4)))) + } + panic(r.NewTypeError("Method DataView.prototype.getInt32 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_getUint8(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + idx, _ := dv.getIdxAndByteOrder(call.Argument(0), call.Argument(1), 1) + return intToValue(int64(dv.viewedArrayBuf.getUint8(idx))) + } + panic(r.NewTypeError("Method DataView.prototype.getUint8 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_getUint16(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + return intToValue(int64(dv.viewedArrayBuf.getUint16(dv.getIdxAndByteOrder(call.Argument(0), call.Argument(1), 2)))) + } + panic(r.NewTypeError("Method DataView.prototype.getUint16 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_getUint32(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + return intToValue(int64(dv.viewedArrayBuf.getUint32(dv.getIdxAndByteOrder(call.Argument(0), call.Argument(1), 4)))) + } + panic(r.NewTypeError("Method DataView.prototype.getUint32 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_setFloat32(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + val := toFloat32(call.Argument(1)) + idx, bo := dv.getIdxAndByteOrder(call.Argument(0), call.Argument(2), 4) + dv.viewedArrayBuf.setFloat32(idx, val, bo) + return _undefined + } + panic(r.NewTypeError("Method DataView.prototype.setFloat32 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_setFloat64(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + val := call.Argument(1).ToFloat() + idx, bo := dv.getIdxAndByteOrder(call.Argument(0), call.Argument(2), 8) + dv.viewedArrayBuf.setFloat64(idx, val, bo) + return _undefined + } + panic(r.NewTypeError("Method DataView.prototype.setFloat64 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_setInt8(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + val := toInt8(call.Argument(1)) + idx, _ := dv.getIdxAndByteOrder(call.Argument(0), call.Argument(2), 1) + dv.viewedArrayBuf.setInt8(idx, val) + return _undefined + } + panic(r.NewTypeError("Method DataView.prototype.setInt8 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_setInt16(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + val := toInt16(call.Argument(1)) + idx, bo := dv.getIdxAndByteOrder(call.Argument(0), call.Argument(2), 2) + dv.viewedArrayBuf.setInt16(idx, val, bo) + return _undefined + } + panic(r.NewTypeError("Method DataView.prototype.setInt16 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_setInt32(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + val := toInt32(call.Argument(1)) + idx, bo := dv.getIdxAndByteOrder(call.Argument(0), call.Argument(2), 4) + dv.viewedArrayBuf.setInt32(idx, val, bo) + return _undefined + } + panic(r.NewTypeError("Method DataView.prototype.setInt32 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_setUint8(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + val := toUint8(call.Argument(1)) + idx, _ := dv.getIdxAndByteOrder(call.Argument(0), call.Argument(2), 1) + dv.viewedArrayBuf.setUint8(idx, val) + return _undefined + } + panic(r.NewTypeError("Method DataView.prototype.setUint8 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_setUint16(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + val := toUint16(call.Argument(1)) + idx, bo := dv.getIdxAndByteOrder(call.Argument(0), call.Argument(2), 2) + dv.viewedArrayBuf.setUint16(idx, val, bo) + return _undefined + } + panic(r.NewTypeError("Method DataView.prototype.setUint16 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) dataViewProto_setUint32(call FunctionCall) Value { + if dv, ok := r.toObject(call.This).self.(*dataViewObject); ok { + val := toUint32(call.Argument(1)) + idx, bo := dv.getIdxAndByteOrder(call.Argument(0), call.Argument(2), 4) + dv.viewedArrayBuf.setUint32(idx, val, bo) + return _undefined + } + panic(r.NewTypeError("Method DataView.prototype.setUint32 called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_getBuffer(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + return ta.viewedArrayBuf.val + } + panic(r.NewTypeError("Method get TypedArray.prototype.buffer called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_getByteLen(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + if ta.viewedArrayBuf.data == nil { + return _positiveZero + } + return intToValue(int64(ta.length) * int64(ta.elemSize)) + } + panic(r.NewTypeError("Method get TypedArray.prototype.byteLength called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_getLength(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + if ta.viewedArrayBuf.data == nil { + return _positiveZero + } + return intToValue(int64(ta.length)) + } + panic(r.NewTypeError("Method get TypedArray.prototype.length called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_getByteOffset(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + if ta.viewedArrayBuf.data == nil { + return _positiveZero + } + return intToValue(int64(ta.offset) * int64(ta.elemSize)) + } + panic(r.NewTypeError("Method get TypedArray.prototype.byteOffset called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_copyWithin(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + l := int64(ta.length) + var relEnd int64 + to := toIntStrict(relToIdx(call.Argument(0).ToInteger(), l)) + from := toIntStrict(relToIdx(call.Argument(1).ToInteger(), l)) + if end := call.Argument(2); end != _undefined { + relEnd = end.ToInteger() + } else { + relEnd = l + } + final := toIntStrict(relToIdx(relEnd, l)) + data := ta.viewedArrayBuf.data + offset := ta.offset + elemSize := ta.elemSize + if final > from { + ta.viewedArrayBuf.ensureNotDetached(true) + copy(data[(offset+to)*elemSize:], data[(offset+from)*elemSize:(offset+final)*elemSize]) + } + return call.This + } + panic(r.NewTypeError("Method TypedArray.prototype.copyWithin called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_entries(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + return r.createArrayIterator(ta.val, iterationKindKeyValue) + } + panic(r.NewTypeError("Method TypedArray.prototype.entries called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_every(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + callbackFn := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, call.This}, + } + for k := 0; k < ta.length; k++ { + ta.viewedArrayBuf.ensureNotDetached(true) + fc.Arguments[0] = ta.typedArray.get(ta.offset + k) + fc.Arguments[1] = intToValue(int64(k)) + if !callbackFn(fc).ToBoolean() { + return valueFalse + } + } + return valueTrue + + } + panic(r.NewTypeError("Method TypedArray.prototype.every called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_fill(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + l := int64(ta.length) + k := toIntStrict(relToIdx(call.Argument(1).ToInteger(), l)) + var relEnd int64 + if endArg := call.Argument(2); endArg != _undefined { + relEnd = endArg.ToInteger() + } else { + relEnd = l + } + final := toIntStrict(relToIdx(relEnd, l)) + value := ta.typedArray.toRaw(call.Argument(0)) + ta.viewedArrayBuf.ensureNotDetached(true) + for ; k < final; k++ { + ta.typedArray.setRaw(ta.offset+k, value) + } + return call.This + } + panic(r.NewTypeError("Method TypedArray.prototype.fill called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_filter(call FunctionCall) Value { + o := r.toObject(call.This) + if ta, ok := o.self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + callbackFn := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, call.This}, + } + buf := make([]byte, 0, ta.length*ta.elemSize) + captured := 0 + for k := 0; k < ta.length; k++ { + ta.viewedArrayBuf.ensureNotDetached(true) + fc.Arguments[0] = ta.typedArray.get(k) + fc.Arguments[1] = intToValue(int64(k)) + if callbackFn(fc).ToBoolean() { + i := (ta.offset + k) * ta.elemSize + buf = append(buf, ta.viewedArrayBuf.data[i:i+ta.elemSize]...) + captured++ + } + } + c := r.speciesConstructorObj(o, ta.defaultCtor) + ab := r._newArrayBuffer(r.global.ArrayBufferPrototype, nil) + ab.data = buf + kept := r.toConstructor(ta.defaultCtor)([]Value{ab.val}, ta.defaultCtor) + if c == ta.defaultCtor { + return kept + } else { + ret := r.typedArrayCreate(c, []Value{intToValue(int64(captured))}) + keptTa := kept.self.(*typedArrayObject) + for i := 0; i < captured; i++ { + ret.typedArray.set(i, keptTa.typedArray.get(i)) + } + return ret.val + } + } + panic(r.NewTypeError("Method TypedArray.prototype.filter called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_find(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + predicate := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, call.This}, + } + for k := 0; k < ta.length; k++ { + ta.viewedArrayBuf.ensureNotDetached(true) + val := ta.typedArray.get(ta.offset + k) + fc.Arguments[0] = val + fc.Arguments[1] = intToValue(int64(k)) + if predicate(fc).ToBoolean() { + return val + } + } + return _undefined + } + panic(r.NewTypeError("Method TypedArray.prototype.find called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_findIndex(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + predicate := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, call.This}, + } + for k := 0; k < ta.length; k++ { + ta.viewedArrayBuf.ensureNotDetached(true) + fc.Arguments[0] = ta.typedArray.get(ta.offset + k) + fc.Arguments[1] = intToValue(int64(k)) + if predicate(fc).ToBoolean() { + return fc.Arguments[1] + } + } + return intToValue(-1) + } + panic(r.NewTypeError("Method TypedArray.prototype.findIndex called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_forEach(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + callbackFn := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, call.This}, + } + for k := 0; k < ta.length; k++ { + ta.viewedArrayBuf.ensureNotDetached(true) + if val := ta.typedArray.get(k); val != nil { + fc.Arguments[0] = val + fc.Arguments[1] = intToValue(int64(k)) + callbackFn(fc) + } + } + return _undefined + } + panic(r.NewTypeError("Method TypedArray.prototype.forEach called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_includes(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + length := int64(ta.length) + if length == 0 { + return valueFalse + } + + n := call.Argument(1).ToInteger() + if n >= length { + return valueFalse + } + + if n < 0 { + n = max(length+n, 0) + } + + ta.viewedArrayBuf.ensureNotDetached(true) + searchElement := call.Argument(0) + if searchElement == _negativeZero { + searchElement = _positiveZero + } + if ta.typedArray.typeMatch(searchElement) { + se := ta.typedArray.toRaw(searchElement) + for k := toIntStrict(n); k < ta.length; k++ { + if ta.typedArray.getRaw(ta.offset+k) == se { + return valueTrue + } + } + } + return valueFalse + } + panic(r.NewTypeError("Method TypedArray.prototype.includes called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_indexOf(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + length := int64(ta.length) + if length == 0 { + return intToValue(-1) + } + + n := call.Argument(1).ToInteger() + if n >= length { + return intToValue(-1) + } + + if n < 0 { + n = max(length+n, 0) + } + + ta.viewedArrayBuf.ensureNotDetached(true) + searchElement := call.Argument(0) + if searchElement == _negativeZero { + searchElement = _positiveZero + } + if !IsNaN(searchElement) && ta.typedArray.typeMatch(searchElement) { + se := ta.typedArray.toRaw(searchElement) + for k := toIntStrict(n); k < ta.length; k++ { + if ta.typedArray.getRaw(ta.offset+k) == se { + return intToValue(int64(k)) + } + } + } + return intToValue(-1) + } + panic(r.NewTypeError("Method TypedArray.prototype.indexOf called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_join(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + s := call.Argument(0) + var sep valueString + if s != _undefined { + sep = s.toString() + } else { + sep = asciiString(",") + } + l := ta.length + if l == 0 { + return stringEmpty + } + + var buf valueStringBuilder + + ta.viewedArrayBuf.ensureNotDetached(true) + element0 := ta.typedArray.get(0) + if element0 != nil && element0 != _undefined && element0 != _null { + buf.WriteString(element0.toString()) + } + + for i := 1; i < l; i++ { + ta.viewedArrayBuf.ensureNotDetached(true) + buf.WriteString(sep) + element := ta.typedArray.get(i) + if element != nil && element != _undefined && element != _null { + buf.WriteString(element.toString()) + } + } + + return buf.String() + } + panic(r.NewTypeError("Method TypedArray.prototype.join called on incompatible receiver")) +} + +func (r *Runtime) typedArrayProto_keys(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + return r.createArrayIterator(ta.val, iterationKindKey) + } + panic(r.NewTypeError("Method TypedArray.prototype.keys called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_lastIndexOf(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + length := int64(ta.length) + if length == 0 { + return intToValue(-1) + } + + var fromIndex int64 + + if len(call.Arguments) < 2 { + fromIndex = length - 1 + } else { + fromIndex = call.Argument(1).ToInteger() + if fromIndex >= 0 { + fromIndex = min(fromIndex, length-1) + } else { + fromIndex += length + if fromIndex < 0 { + fromIndex = -1 // prevent underflow in toIntStrict() on 32-bit platforms + } + } + } + + ta.viewedArrayBuf.ensureNotDetached(true) + searchElement := call.Argument(0) + if searchElement == _negativeZero { + searchElement = _positiveZero + } + if !IsNaN(searchElement) && ta.typedArray.typeMatch(searchElement) { + se := ta.typedArray.toRaw(searchElement) + for k := toIntStrict(fromIndex); k >= 0; k-- { + if ta.typedArray.getRaw(ta.offset+k) == se { + return intToValue(int64(k)) + } + } + } + + return intToValue(-1) + } + panic(r.NewTypeError("Method TypedArray.prototype.lastIndexOf called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_map(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + callbackFn := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, call.This}, + } + dst := r.typedArraySpeciesCreate(ta, []Value{intToValue(int64(ta.length))}) + for i := 0; i < ta.length; i++ { + ta.viewedArrayBuf.ensureNotDetached(true) + fc.Arguments[0] = ta.typedArray.get(ta.offset + i) + fc.Arguments[1] = intToValue(int64(i)) + dst.typedArray.set(i, callbackFn(fc)) + } + return dst.val + } + panic(r.NewTypeError("Method TypedArray.prototype.map called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_reduce(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + callbackFn := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: _undefined, + Arguments: []Value{nil, nil, nil, call.This}, + } + k := 0 + if len(call.Arguments) >= 2 { + fc.Arguments[0] = call.Argument(1) + } else { + if ta.length > 0 { + fc.Arguments[0] = ta.typedArray.get(ta.offset + 0) + k = 1 + } + } + if fc.Arguments[0] == nil { + panic(r.NewTypeError("Reduce of empty array with no initial value")) + } + for ; k < ta.length; k++ { + ta.viewedArrayBuf.ensureNotDetached(true) + idx := valueInt(k) + fc.Arguments[1] = ta.typedArray.get(ta.offset + k) + fc.Arguments[2] = idx + fc.Arguments[0] = callbackFn(fc) + } + return fc.Arguments[0] + } + panic(r.NewTypeError("Method TypedArray.prototype.reduce called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_reduceRight(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + callbackFn := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: _undefined, + Arguments: []Value{nil, nil, nil, call.This}, + } + k := ta.length - 1 + if len(call.Arguments) >= 2 { + fc.Arguments[0] = call.Argument(1) + } else { + if k >= 0 { + fc.Arguments[0] = ta.typedArray.get(ta.offset + k) + k-- + } + } + if fc.Arguments[0] == nil { + panic(r.NewTypeError("Reduce of empty array with no initial value")) + } + for ; k >= 0; k-- { + ta.viewedArrayBuf.ensureNotDetached(true) + idx := valueInt(k) + fc.Arguments[1] = ta.typedArray.get(ta.offset + k) + fc.Arguments[2] = idx + fc.Arguments[0] = callbackFn(fc) + } + return fc.Arguments[0] + } + panic(r.NewTypeError("Method TypedArray.prototype.reduceRight called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_reverse(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + l := ta.length + middle := l / 2 + for lower := 0; lower != middle; lower++ { + upper := l - lower - 1 + ta.typedArray.swap(ta.offset+lower, ta.offset+upper) + } + + return call.This + } + panic(r.NewTypeError("Method TypedArray.prototype.reverse called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_set(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + srcObj := call.Argument(0).ToObject(r) + targetOffset := toIntStrict(call.Argument(1).ToInteger()) + if targetOffset < 0 { + panic(r.newError(r.global.RangeError, "offset should be >= 0")) + } + ta.viewedArrayBuf.ensureNotDetached(true) + targetLen := ta.length + if src, ok := srcObj.self.(*typedArrayObject); ok { + src.viewedArrayBuf.ensureNotDetached(true) + srcLen := src.length + if x := srcLen + targetOffset; x < 0 || x > targetLen { + panic(r.newError(r.global.RangeError, "Source is too large")) + } + if src.defaultCtor == ta.defaultCtor { + copy(ta.viewedArrayBuf.data[(ta.offset+targetOffset)*ta.elemSize:], + src.viewedArrayBuf.data[src.offset*src.elemSize:(src.offset+srcLen)*src.elemSize]) + } else { + curSrc := uintptr(unsafe.Pointer(&src.viewedArrayBuf.data[src.offset*src.elemSize])) + endSrc := curSrc + uintptr(srcLen*src.elemSize) + curDst := uintptr(unsafe.Pointer(&ta.viewedArrayBuf.data[(ta.offset+targetOffset)*ta.elemSize])) + dstOffset := ta.offset + targetOffset + srcOffset := src.offset + if ta.elemSize == src.elemSize { + if curDst <= curSrc || curDst >= endSrc { + for i := 0; i < srcLen; i++ { + ta.typedArray.set(dstOffset+i, src.typedArray.get(srcOffset+i)) + } + } else { + for i := srcLen - 1; i >= 0; i-- { + ta.typedArray.set(dstOffset+i, src.typedArray.get(srcOffset+i)) + } + } + } else { + x := int(curDst-curSrc) / (src.elemSize - ta.elemSize) + if x < 0 { + x = 0 + } else if x > srcLen { + x = srcLen + } + if ta.elemSize < src.elemSize { + for i := x; i < srcLen; i++ { + ta.typedArray.set(dstOffset+i, src.typedArray.get(srcOffset+i)) + } + for i := x - 1; i >= 0; i-- { + ta.typedArray.set(dstOffset+i, src.typedArray.get(srcOffset+i)) + } + } else { + for i := 0; i < x; i++ { + ta.typedArray.set(dstOffset+i, src.typedArray.get(srcOffset+i)) + } + for i := srcLen - 1; i >= x; i-- { + ta.typedArray.set(dstOffset+i, src.typedArray.get(srcOffset+i)) + } + } + } + } + } else { + targetLen := ta.length + srcLen := toIntStrict(toLength(srcObj.self.getStr("length", nil))) + if x := srcLen + targetOffset; x < 0 || x > targetLen { + panic(r.newError(r.global.RangeError, "Source is too large")) + } + for i := 0; i < srcLen; i++ { + val := nilSafe(srcObj.self.getIdx(valueInt(i), nil)) + ta.viewedArrayBuf.ensureNotDetached(true) + ta.typedArray.set(targetOffset+i, val) + } + } + return _undefined + } + panic(r.NewTypeError("Method TypedArray.prototype.set called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_slice(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + length := int64(ta.length) + start := toIntStrict(relToIdx(call.Argument(0).ToInteger(), length)) + var e int64 + if endArg := call.Argument(1); endArg != _undefined { + e = endArg.ToInteger() + } else { + e = length + } + end := toIntStrict(relToIdx(e, length)) + + count := end - start + if count < 0 { + count = 0 + } + dst := r.typedArraySpeciesCreate(ta, []Value{intToValue(int64(count))}) + if dst.defaultCtor == ta.defaultCtor { + if count > 0 { + ta.viewedArrayBuf.ensureNotDetached(true) + offset := ta.offset + elemSize := ta.elemSize + copy(dst.viewedArrayBuf.data, ta.viewedArrayBuf.data[(offset+start)*elemSize:(offset+start+count)*elemSize]) + } + } else { + for i := 0; i < count; i++ { + ta.viewedArrayBuf.ensureNotDetached(true) + dst.typedArray.set(i, ta.typedArray.get(ta.offset+start+i)) + } + } + return dst.val + } + panic(r.NewTypeError("Method TypedArray.prototype.slice called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_some(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + callbackFn := r.toCallable(call.Argument(0)) + fc := FunctionCall{ + This: call.Argument(1), + Arguments: []Value{nil, nil, call.This}, + } + for k := 0; k < ta.length; k++ { + ta.viewedArrayBuf.ensureNotDetached(true) + fc.Arguments[0] = ta.typedArray.get(ta.offset + k) + fc.Arguments[1] = intToValue(int64(k)) + if callbackFn(fc).ToBoolean() { + return valueTrue + } + } + return valueFalse + } + panic(r.NewTypeError("Method TypedArray.prototype.some called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_sort(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + var compareFn func(FunctionCall) Value + + if arg := call.Argument(0); arg != _undefined { + compareFn = r.toCallable(arg) + } + + ctx := typedArraySortCtx{ + ta: ta, + compare: compareFn, + } + + sort.Stable(&ctx) + return call.This + } + panic(r.NewTypeError("Method TypedArray.prototype.sort called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_subarray(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + l := int64(ta.length) + beginIdx := relToIdx(call.Argument(0).ToInteger(), l) + var relEnd int64 + if endArg := call.Argument(1); endArg != _undefined { + relEnd = endArg.ToInteger() + } else { + relEnd = l + } + endIdx := relToIdx(relEnd, l) + newLen := max(endIdx-beginIdx, 0) + return r.typedArraySpeciesCreate(ta, []Value{ta.viewedArrayBuf.val, + intToValue((int64(ta.offset) + beginIdx) * int64(ta.elemSize)), + intToValue(newLen), + }).val + } + panic(r.NewTypeError("Method TypedArray.prototype.subarray called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_toLocaleString(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + length := ta.length + var buf valueStringBuilder + for i := 0; i < length; i++ { + ta.viewedArrayBuf.ensureNotDetached(true) + if i > 0 { + buf.WriteRune(',') + } + item := ta.typedArray.get(i) + r.writeItemLocaleString(item, &buf) + } + return buf.String() + } + panic(r.NewTypeError("Method TypedArray.prototype.toLocaleString called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_values(call FunctionCall) Value { + if ta, ok := r.toObject(call.This).self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + return r.createArrayIterator(ta.val, iterationKindValue) + } + panic(r.NewTypeError("Method TypedArray.prototype.values called on incompatible receiver %s", call.This.String())) +} + +func (r *Runtime) typedArrayProto_toStringTag(call FunctionCall) Value { + if obj, ok := call.This.(*Object); ok { + if ta, ok := obj.self.(*typedArrayObject); ok { + return ta.defaultCtor.self.getStr("name", nil) + } + } + + return _undefined +} + +func (r *Runtime) newTypedArray([]Value, *Object) *Object { + panic(r.NewTypeError("Abstract class TypedArray not directly constructable")) +} + +func (r *Runtime) typedArray_from(call FunctionCall) Value { + mapFn := call.Argument(1) + if mapFn == _undefined { + mapFn = nil + } + return r.typedArrayFrom(r.toObject(call.This), call.Argument(0).ToObject(r), mapFn, call.Argument(2)) +} + +func (r *Runtime) typedArray_of(call FunctionCall) Value { + ta := r.typedArrayCreate(r.toObject(call.This), []Value{intToValue(int64(len(call.Arguments)))}) + for i, val := range call.Arguments { + ta.typedArray.set(i, val) + } + return ta.val +} + +func (r *Runtime) allocateTypedArray(newTarget *Object, length int, taCtor typedArrayObjectCtor) *Object { + buf := r._newArrayBuffer(r.global.ArrayBufferPrototype, nil) + ta := taCtor(buf, 0, length, r.getPrototypeFromCtor(newTarget, nil, r.global.TypedArrayPrototype)) + if length > 0 { + buf.data = allocByteSlice(length * ta.elemSize) + } + return ta.val +} + +func (r *Runtime) typedArraySpeciesCreate(ta *typedArrayObject, args []Value) *typedArrayObject { + return r.typedArrayCreate(r.speciesConstructorObj(ta.val, ta.defaultCtor), args) +} + +func (r *Runtime) typedArrayCreate(ctor *Object, args []Value) *typedArrayObject { + o := r.toConstructor(ctor)(args, ctor) + if ta, ok := o.self.(*typedArrayObject); ok { + ta.viewedArrayBuf.ensureNotDetached(true) + if len(args) == 1 { + if l, ok := args[0].(valueInt); ok { + if ta.length < int(l) { + panic(r.NewTypeError("Derived TypedArray constructor created an array which was too small")) + } + } + } + return ta + } + panic(r.NewTypeError("Invalid TypedArray: %s", o)) +} + +func (r *Runtime) typedArrayFrom(ctor, items *Object, mapFn, thisValue Value) *Object { + var mapFc func(call FunctionCall) Value + if mapFn != nil { + mapFc = r.toCallable(mapFn) + if thisValue == nil { + thisValue = _undefined + } + } + usingIter := toMethod(items.self.getSym(SymIterator, nil)) + if usingIter != nil { + iter := r.getIterator(items, usingIter) + var values []Value + r.iterate(iter, func(item Value) { + values = append(values, item) + }) + ta := r.typedArrayCreate(ctor, []Value{intToValue(int64(len(values)))}) + if mapFc == nil { + for idx, val := range values { + ta.typedArray.set(idx, val) + } + } else { + fc := FunctionCall{ + This: thisValue, + Arguments: []Value{nil, nil}, + } + for idx, val := range values { + fc.Arguments[0], fc.Arguments[1] = val, intToValue(int64(idx)) + val = mapFc(fc) + ta.typedArray.set(idx, val) + } + } + return ta.val + } + length := toIntStrict(toLength(items.self.getStr("length", nil))) + ta := r.typedArrayCreate(ctor, []Value{intToValue(int64(length))}) + if mapFc == nil { + for i := 0; i < length; i++ { + ta.typedArray.set(i, nilSafe(items.self.getIdx(valueInt(i), nil))) + } + } else { + fc := FunctionCall{ + This: thisValue, + Arguments: []Value{nil, nil}, + } + for i := 0; i < length; i++ { + idx := valueInt(i) + fc.Arguments[0], fc.Arguments[1] = items.self.getIdx(idx, nil), idx + ta.typedArray.set(i, mapFc(fc)) + } + } + return ta.val +} + +func (r *Runtime) _newTypedArrayFromArrayBuffer(ab *arrayBufferObject, args []Value, newTarget *Object, taCtor typedArrayObjectCtor) *Object { + ta := taCtor(ab, 0, 0, r.getPrototypeFromCtor(newTarget, nil, r.global.TypedArrayPrototype)) + var byteOffset int + if len(args) > 1 && args[1] != nil && args[1] != _undefined { + byteOffset = r.toIndex(args[1]) + if byteOffset%ta.elemSize != 0 { + panic(r.newError(r.global.RangeError, "Start offset of %s should be a multiple of %d", newTarget.self.getStr("name", nil), ta.elemSize)) + } + } + ab.ensureNotDetached(true) + var length int + if len(args) > 2 && args[2] != nil && args[2] != _undefined { + length = r.toIndex(args[2]) + if byteOffset+length*ta.elemSize > len(ab.data) { + panic(r.newError(r.global.RangeError, "Invalid typed array length: %d", length)) + } + } else { + if len(ab.data)%ta.elemSize != 0 { + panic(r.newError(r.global.RangeError, "Byte length of %s should be a multiple of %d", newTarget.self.getStr("name", nil), ta.elemSize)) + } + length = (len(ab.data) - byteOffset) / ta.elemSize + } + ta.offset = byteOffset / ta.elemSize + ta.length = length + return ta.val +} + +func (r *Runtime) _newTypedArrayFromTypedArray(src *typedArrayObject, newTarget *Object) *Object { + dst := r.typedArrayCreate(newTarget, []Value{_positiveZero}) + src.viewedArrayBuf.ensureNotDetached(true) + l := src.length + dst.viewedArrayBuf.prototype = r.getPrototypeFromCtor(r.toObject(src.viewedArrayBuf.getStr("constructor", nil)), r.global.ArrayBuffer, r.global.ArrayBufferPrototype) + dst.viewedArrayBuf.data = allocByteSlice(toIntStrict(int64(l) * int64(dst.elemSize))) + if src.defaultCtor == dst.defaultCtor { + copy(dst.viewedArrayBuf.data, src.viewedArrayBuf.data[src.offset*src.elemSize:]) + dst.length = src.length + return dst.val + } + dst.length = l + for i := 0; i < l; i++ { + dst.typedArray.set(i, src.typedArray.get(src.offset+i)) + } + return dst.val +} + +func (r *Runtime) _newTypedArray(args []Value, newTarget *Object, taCtor typedArrayObjectCtor) *Object { + if newTarget == nil { + panic(r.needNew("TypedArray")) + } + if len(args) > 0 { + if obj, ok := args[0].(*Object); ok { + switch o := obj.self.(type) { + case *arrayBufferObject: + return r._newTypedArrayFromArrayBuffer(o, args, newTarget, taCtor) + case *typedArrayObject: + return r._newTypedArrayFromTypedArray(o, newTarget) + default: + return r.typedArrayFrom(newTarget, obj, nil, nil) + } + } + } + var l int + if len(args) > 0 { + if arg0 := args[0]; arg0 != nil { + l = r.toIndex(arg0) + } + } + return r.allocateTypedArray(newTarget, l, taCtor) +} + +func (r *Runtime) newUint8Array(args []Value, newTarget *Object) *Object { + return r._newTypedArray(args, newTarget, r.newUint8ArrayObject) +} + +func (r *Runtime) newUint8ClampedArray(args []Value, newTarget *Object) *Object { + return r._newTypedArray(args, newTarget, r.newUint8ClampedArrayObject) +} + +func (r *Runtime) newInt8Array(args []Value, newTarget *Object) *Object { + return r._newTypedArray(args, newTarget, r.newInt8ArrayObject) +} + +func (r *Runtime) newUint16Array(args []Value, newTarget *Object) *Object { + return r._newTypedArray(args, newTarget, r.newUint16ArrayObject) +} + +func (r *Runtime) newInt16Array(args []Value, newTarget *Object) *Object { + return r._newTypedArray(args, newTarget, r.newInt16ArrayObject) +} + +func (r *Runtime) newUint32Array(args []Value, newTarget *Object) *Object { + return r._newTypedArray(args, newTarget, r.newUint32ArrayObject) +} + +func (r *Runtime) newInt32Array(args []Value, newTarget *Object) *Object { + return r._newTypedArray(args, newTarget, r.newInt32ArrayObject) +} + +func (r *Runtime) newFloat32Array(args []Value, newTarget *Object) *Object { + return r._newTypedArray(args, newTarget, r.newFloat32ArrayObject) +} + +func (r *Runtime) newFloat64Array(args []Value, newTarget *Object) *Object { + return r._newTypedArray(args, newTarget, r.newFloat64ArrayObject) +} + +func (r *Runtime) createArrayBufferProto(val *Object) objectImpl { + b := newBaseObjectObj(val, r.global.ObjectPrototype, classObject) + byteLengthProp := &valueProperty{ + accessor: true, + configurable: true, + getterFunc: r.newNativeFunc(r.arrayBufferProto_getByteLength, nil, "get byteLength", nil, 0), + } + b._put("byteLength", byteLengthProp) + b._putProp("constructor", r.global.ArrayBuffer, true, false, true) + b._putProp("slice", r.newNativeFunc(r.arrayBufferProto_slice, nil, "slice", nil, 2), true, false, true) + b._putSym(SymToStringTag, valueProp(asciiString("ArrayBuffer"), false, false, true)) + return b +} + +func (r *Runtime) createArrayBuffer(val *Object) objectImpl { + o := r.newNativeConstructOnly(val, r.builtin_newArrayBuffer, r.global.ArrayBufferPrototype, "ArrayBuffer", 1) + o._putProp("isView", r.newNativeFunc(r.arrayBuffer_isView, nil, "isView", nil, 1), true, false, true) + o._putSym(SymSpecies, &valueProperty{ + getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0), + accessor: true, + configurable: true, + }) + return o +} + +func (r *Runtime) createDataViewProto(val *Object) objectImpl { + b := newBaseObjectObj(val, r.global.ObjectPrototype, classObject) + b._put("buffer", &valueProperty{ + accessor: true, + configurable: true, + getterFunc: r.newNativeFunc(r.dataViewProto_getBuffer, nil, "get buffer", nil, 0), + }) + b._put("byteLength", &valueProperty{ + accessor: true, + configurable: true, + getterFunc: r.newNativeFunc(r.dataViewProto_getByteLen, nil, "get byteLength", nil, 0), + }) + b._put("byteOffset", &valueProperty{ + accessor: true, + configurable: true, + getterFunc: r.newNativeFunc(r.dataViewProto_getByteOffset, nil, "get byteOffset", nil, 0), + }) + b._putProp("constructor", r.global.DataView, true, false, true) + b._putProp("getFloat32", r.newNativeFunc(r.dataViewProto_getFloat32, nil, "getFloat32", nil, 1), true, false, true) + b._putProp("getFloat64", r.newNativeFunc(r.dataViewProto_getFloat64, nil, "getFloat64", nil, 1), true, false, true) + b._putProp("getInt8", r.newNativeFunc(r.dataViewProto_getInt8, nil, "getInt8", nil, 1), true, false, true) + b._putProp("getInt16", r.newNativeFunc(r.dataViewProto_getInt16, nil, "getInt16", nil, 1), true, false, true) + b._putProp("getInt32", r.newNativeFunc(r.dataViewProto_getInt32, nil, "getInt32", nil, 1), true, false, true) + b._putProp("getUint8", r.newNativeFunc(r.dataViewProto_getUint8, nil, "getUint8", nil, 1), true, false, true) + b._putProp("getUint16", r.newNativeFunc(r.dataViewProto_getUint16, nil, "getUint16", nil, 1), true, false, true) + b._putProp("getUint32", r.newNativeFunc(r.dataViewProto_getUint32, nil, "getUint32", nil, 1), true, false, true) + b._putProp("setFloat32", r.newNativeFunc(r.dataViewProto_setFloat32, nil, "setFloat32", nil, 2), true, false, true) + b._putProp("setFloat64", r.newNativeFunc(r.dataViewProto_setFloat64, nil, "setFloat64", nil, 2), true, false, true) + b._putProp("setInt8", r.newNativeFunc(r.dataViewProto_setInt8, nil, "setInt8", nil, 2), true, false, true) + b._putProp("setInt16", r.newNativeFunc(r.dataViewProto_setInt16, nil, "setInt16", nil, 2), true, false, true) + b._putProp("setInt32", r.newNativeFunc(r.dataViewProto_setInt32, nil, "setInt32", nil, 2), true, false, true) + b._putProp("setUint8", r.newNativeFunc(r.dataViewProto_setUint8, nil, "setUint8", nil, 2), true, false, true) + b._putProp("setUint16", r.newNativeFunc(r.dataViewProto_setUint16, nil, "setUint16", nil, 2), true, false, true) + b._putProp("setUint32", r.newNativeFunc(r.dataViewProto_setUint32, nil, "setUint32", nil, 2), true, false, true) + b._putSym(SymToStringTag, valueProp(asciiString("DataView"), false, false, true)) + + return b +} + +func (r *Runtime) createDataView(val *Object) objectImpl { + o := r.newNativeConstructOnly(val, r.newDataView, r.global.DataViewPrototype, "DataView", 3) + return o +} + +func (r *Runtime) createTypedArrayProto(val *Object) objectImpl { + b := newBaseObjectObj(val, r.global.ObjectPrototype, classObject) + b._put("buffer", &valueProperty{ + accessor: true, + configurable: true, + getterFunc: r.newNativeFunc(r.typedArrayProto_getBuffer, nil, "get buffer", nil, 0), + }) + b._put("byteLength", &valueProperty{ + accessor: true, + configurable: true, + getterFunc: r.newNativeFunc(r.typedArrayProto_getByteLen, nil, "get byteLength", nil, 0), + }) + b._put("byteOffset", &valueProperty{ + accessor: true, + configurable: true, + getterFunc: r.newNativeFunc(r.typedArrayProto_getByteOffset, nil, "get byteOffset", nil, 0), + }) + b._putProp("constructor", r.global.TypedArray, true, false, true) + b._putProp("copyWithin", r.newNativeFunc(r.typedArrayProto_copyWithin, nil, "copyWithin", nil, 2), true, false, true) + b._putProp("entries", r.newNativeFunc(r.typedArrayProto_entries, nil, "entries", nil, 0), true, false, true) + b._putProp("every", r.newNativeFunc(r.typedArrayProto_every, nil, "every", nil, 1), true, false, true) + b._putProp("fill", r.newNativeFunc(r.typedArrayProto_fill, nil, "fill", nil, 1), true, false, true) + b._putProp("filter", r.newNativeFunc(r.typedArrayProto_filter, nil, "filter", nil, 1), true, false, true) + b._putProp("find", r.newNativeFunc(r.typedArrayProto_find, nil, "find", nil, 1), true, false, true) + b._putProp("findIndex", r.newNativeFunc(r.typedArrayProto_findIndex, nil, "findIndex", nil, 1), true, false, true) + b._putProp("forEach", r.newNativeFunc(r.typedArrayProto_forEach, nil, "forEach", nil, 1), true, false, true) + b._putProp("includes", r.newNativeFunc(r.typedArrayProto_includes, nil, "includes", nil, 1), true, false, true) + b._putProp("indexOf", r.newNativeFunc(r.typedArrayProto_indexOf, nil, "indexOf", nil, 1), true, false, true) + b._putProp("join", r.newNativeFunc(r.typedArrayProto_join, nil, "join", nil, 1), true, false, true) + b._putProp("keys", r.newNativeFunc(r.typedArrayProto_keys, nil, "keys", nil, 0), true, false, true) + b._putProp("lastIndexOf", r.newNativeFunc(r.typedArrayProto_lastIndexOf, nil, "lastIndexOf", nil, 1), true, false, true) + b._put("length", &valueProperty{ + accessor: true, + configurable: true, + getterFunc: r.newNativeFunc(r.typedArrayProto_getLength, nil, "get length", nil, 0), + }) + b._putProp("map", r.newNativeFunc(r.typedArrayProto_map, nil, "map", nil, 1), true, false, true) + b._putProp("reduce", r.newNativeFunc(r.typedArrayProto_reduce, nil, "reduce", nil, 1), true, false, true) + b._putProp("reduceRight", r.newNativeFunc(r.typedArrayProto_reduceRight, nil, "reduceRight", nil, 1), true, false, true) + b._putProp("reverse", r.newNativeFunc(r.typedArrayProto_reverse, nil, "reverse", nil, 0), true, false, true) + b._putProp("set", r.newNativeFunc(r.typedArrayProto_set, nil, "set", nil, 1), true, false, true) + b._putProp("slice", r.newNativeFunc(r.typedArrayProto_slice, nil, "slice", nil, 2), true, false, true) + b._putProp("some", r.newNativeFunc(r.typedArrayProto_some, nil, "some", nil, 1), true, false, true) + b._putProp("sort", r.newNativeFunc(r.typedArrayProto_sort, nil, "sort", nil, 1), true, false, true) + b._putProp("subarray", r.newNativeFunc(r.typedArrayProto_subarray, nil, "subarray", nil, 2), true, false, true) + b._putProp("toLocaleString", r.newNativeFunc(r.typedArrayProto_toLocaleString, nil, "toLocaleString", nil, 0), true, false, true) + b._putProp("toString", r.global.arrayToString, true, false, true) + valuesFunc := r.newNativeFunc(r.typedArrayProto_values, nil, "values", nil, 0) + b._putProp("values", valuesFunc, true, false, true) + b._putSym(SymIterator, valueProp(valuesFunc, true, false, true)) + b._putSym(SymToStringTag, &valueProperty{ + getterFunc: r.newNativeFunc(r.typedArrayProto_toStringTag, nil, "get [Symbol.toStringTag]", nil, 0), + accessor: true, + configurable: true, + }) + + return b +} + +func (r *Runtime) createTypedArray(val *Object) objectImpl { + o := r.newNativeConstructOnly(val, r.newTypedArray, r.global.TypedArrayPrototype, "TypedArray", 0) + o._putProp("from", r.newNativeFunc(r.typedArray_from, nil, "from", nil, 1), true, false, true) + o._putProp("of", r.newNativeFunc(r.typedArray_of, nil, "of", nil, 0), true, false, true) + o._putSym(SymSpecies, &valueProperty{ + getterFunc: r.newNativeFunc(r.returnThis, nil, "get [Symbol.species]", nil, 0), + accessor: true, + configurable: true, + }) + + return o +} + +func (r *Runtime) addPrototype(ctor *Object, proto *Object) *baseObject { + p := r.newBaseObject(proto, classObject) + p._putProp("constructor", ctor, true, false, true) + ctor.self._putProp("prototype", p.val, false, false, false) + return p +} + +func (r *Runtime) typedArrayCreator(ctor func(args []Value, newTarget *Object) *Object, name unistring.String, bytesPerElement int) func(val *Object) objectImpl { + return func(val *Object) objectImpl { + o := r.newNativeConstructOnly(val, ctor, nil, name, 3) + o.prototype = r.global.TypedArray + proto := r.addPrototype(o.val, r.global.TypedArrayPrototype) + bpe := intToValue(int64(bytesPerElement)) + o._putProp("BYTES_PER_ELEMENT", bpe, false, false, false) + proto._putProp("BYTES_PER_ELEMENT", bpe, false, false, false) + return o + } +} + +func (r *Runtime) initTypedArrays() { + + r.global.ArrayBufferPrototype = r.newLazyObject(r.createArrayBufferProto) + r.global.ArrayBuffer = r.newLazyObject(r.createArrayBuffer) + r.addToGlobal("ArrayBuffer", r.global.ArrayBuffer) + + r.global.DataViewPrototype = r.newLazyObject(r.createDataViewProto) + r.global.DataView = r.newLazyObject(r.createDataView) + r.addToGlobal("DataView", r.global.DataView) + + r.global.TypedArrayPrototype = r.newLazyObject(r.createTypedArrayProto) + r.global.TypedArray = r.newLazyObject(r.createTypedArray) + + r.global.Uint8Array = r.newLazyObject(r.typedArrayCreator(r.newUint8Array, "Uint8Array", 1)) + r.addToGlobal("Uint8Array", r.global.Uint8Array) + + r.global.Uint8ClampedArray = r.newLazyObject(r.typedArrayCreator(r.newUint8ClampedArray, "Uint8ClampedArray", 1)) + r.addToGlobal("Uint8ClampedArray", r.global.Uint8ClampedArray) + + r.global.Int8Array = r.newLazyObject(r.typedArrayCreator(r.newInt8Array, "Int8Array", 1)) + r.addToGlobal("Int8Array", r.global.Int8Array) + + r.global.Uint16Array = r.newLazyObject(r.typedArrayCreator(r.newUint16Array, "Uint16Array", 2)) + r.addToGlobal("Uint16Array", r.global.Uint16Array) + + r.global.Int16Array = r.newLazyObject(r.typedArrayCreator(r.newInt16Array, "Int16Array", 2)) + r.addToGlobal("Int16Array", r.global.Int16Array) + + r.global.Uint32Array = r.newLazyObject(r.typedArrayCreator(r.newUint32Array, "Uint32Array", 4)) + r.addToGlobal("Uint32Array", r.global.Uint32Array) + + r.global.Int32Array = r.newLazyObject(r.typedArrayCreator(r.newInt32Array, "Int32Array", 4)) + r.addToGlobal("Int32Array", r.global.Int32Array) + + r.global.Float32Array = r.newLazyObject(r.typedArrayCreator(r.newFloat32Array, "Float32Array", 4)) + r.addToGlobal("Float32Array", r.global.Float32Array) + + r.global.Float64Array = r.newLazyObject(r.typedArrayCreator(r.newFloat64Array, "Float64Array", 8)) + r.addToGlobal("Float64Array", r.global.Float64Array) +} diff --git a/vendor/github.com/dop251/goja/builtin_weakmap.go b/vendor/github.com/dop251/goja/builtin_weakmap.go new file mode 100644 index 0000000000..a32c5c88ba --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_weakmap.go @@ -0,0 +1,174 @@ +package goja + +type weakMap uint64 + +type weakMapObject struct { + baseObject + m weakMap +} + +func (wmo *weakMapObject) init() { + wmo.baseObject.init() + wmo.m = weakMap(wmo.val.runtime.genId()) +} + +func (wm weakMap) set(key *Object, value Value) { + key.getWeakRefs()[wm] = value +} + +func (wm weakMap) get(key *Object) Value { + return key.weakRefs[wm] +} + +func (wm weakMap) remove(key *Object) bool { + if _, exists := key.weakRefs[wm]; exists { + delete(key.weakRefs, wm) + return true + } + return false +} + +func (wm weakMap) has(key *Object) bool { + _, exists := key.weakRefs[wm] + return exists +} + +func (r *Runtime) weakMapProto_delete(call FunctionCall) Value { + thisObj := r.toObject(call.This) + wmo, ok := thisObj.self.(*weakMapObject) + if !ok { + panic(r.NewTypeError("Method WeakMap.prototype.delete called on incompatible receiver %s", thisObj.String())) + } + key, ok := call.Argument(0).(*Object) + if ok && wmo.m.remove(key) { + return valueTrue + } + return valueFalse +} + +func (r *Runtime) weakMapProto_get(call FunctionCall) Value { + thisObj := r.toObject(call.This) + wmo, ok := thisObj.self.(*weakMapObject) + if !ok { + panic(r.NewTypeError("Method WeakMap.prototype.get called on incompatible receiver %s", thisObj.String())) + } + var res Value + if key, ok := call.Argument(0).(*Object); ok { + res = wmo.m.get(key) + } + if res == nil { + return _undefined + } + return res +} + +func (r *Runtime) weakMapProto_has(call FunctionCall) Value { + thisObj := r.toObject(call.This) + wmo, ok := thisObj.self.(*weakMapObject) + if !ok { + panic(r.NewTypeError("Method WeakMap.prototype.has called on incompatible receiver %s", thisObj.String())) + } + key, ok := call.Argument(0).(*Object) + if ok && wmo.m.has(key) { + return valueTrue + } + return valueFalse +} + +func (r *Runtime) weakMapProto_set(call FunctionCall) Value { + thisObj := r.toObject(call.This) + wmo, ok := thisObj.self.(*weakMapObject) + if !ok { + panic(r.NewTypeError("Method WeakMap.prototype.set called on incompatible receiver %s", thisObj.String())) + } + key := r.toObject(call.Argument(0)) + wmo.m.set(key, call.Argument(1)) + return call.This +} + +func (r *Runtime) needNew(name string) *Object { + return r.NewTypeError("Constructor %s requires 'new'", name) +} + +func (r *Runtime) getPrototypeFromCtor(newTarget, defCtor, defProto *Object) *Object { + if newTarget == defCtor { + return defProto + } + proto := newTarget.self.getStr("prototype", nil) + if obj, ok := proto.(*Object); ok { + return obj + } + return defProto +} + +func (r *Runtime) builtin_newWeakMap(args []Value, newTarget *Object) *Object { + if newTarget == nil { + panic(r.needNew("WeakMap")) + } + proto := r.getPrototypeFromCtor(newTarget, r.global.WeakMap, r.global.WeakMapPrototype) + o := &Object{runtime: r} + + wmo := &weakMapObject{} + wmo.class = classWeakMap + wmo.val = o + wmo.extensible = true + o.self = wmo + wmo.prototype = proto + wmo.init() + if len(args) > 0 { + if arg := args[0]; arg != nil && arg != _undefined && arg != _null { + adder := wmo.getStr("set", nil) + iter := r.getIterator(arg, nil) + i0 := valueInt(0) + i1 := valueInt(1) + if adder == r.global.weakMapAdder { + r.iterate(iter, func(item Value) { + itemObj := r.toObject(item) + k := itemObj.self.getIdx(i0, nil) + v := nilSafe(itemObj.self.getIdx(i1, nil)) + wmo.m.set(r.toObject(k), v) + }) + } else { + adderFn := toMethod(adder) + if adderFn == nil { + panic(r.NewTypeError("WeakMap.set in missing")) + } + r.iterate(iter, func(item Value) { + itemObj := r.toObject(item) + k := itemObj.self.getIdx(i0, nil) + v := itemObj.self.getIdx(i1, nil) + adderFn(FunctionCall{This: o, Arguments: []Value{k, v}}) + }) + } + } + } + return o +} + +func (r *Runtime) createWeakMapProto(val *Object) objectImpl { + o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject) + + o._putProp("constructor", r.global.WeakMap, true, false, true) + r.global.weakMapAdder = r.newNativeFunc(r.weakMapProto_set, nil, "set", nil, 2) + o._putProp("set", r.global.weakMapAdder, true, false, true) + o._putProp("delete", r.newNativeFunc(r.weakMapProto_delete, nil, "delete", nil, 1), true, false, true) + o._putProp("has", r.newNativeFunc(r.weakMapProto_has, nil, "has", nil, 1), true, false, true) + o._putProp("get", r.newNativeFunc(r.weakMapProto_get, nil, "get", nil, 1), true, false, true) + + o._putSym(SymToStringTag, valueProp(asciiString(classWeakMap), false, false, true)) + + return o +} + +func (r *Runtime) createWeakMap(val *Object) objectImpl { + o := r.newNativeConstructOnly(val, r.builtin_newWeakMap, r.global.WeakMapPrototype, "WeakMap", 0) + + return o +} + +func (r *Runtime) initWeakMap() { + r.global.WeakMapPrototype = r.newLazyObject(r.createWeakMapProto) + r.global.WeakMap = r.newLazyObject(r.createWeakMap) + + r.addToGlobal("WeakMap", r.global.WeakMap) +} diff --git a/vendor/github.com/dop251/goja/builtin_weakset.go b/vendor/github.com/dop251/goja/builtin_weakset.go new file mode 100644 index 0000000000..25c99e7718 --- /dev/null +++ b/vendor/github.com/dop251/goja/builtin_weakset.go @@ -0,0 +1,116 @@ +package goja + +type weakSetObject struct { + baseObject + s weakMap +} + +func (ws *weakSetObject) init() { + ws.baseObject.init() + ws.s = weakMap(ws.val.runtime.genId()) +} + +func (r *Runtime) weakSetProto_add(call FunctionCall) Value { + thisObj := r.toObject(call.This) + wso, ok := thisObj.self.(*weakSetObject) + if !ok { + panic(r.NewTypeError("Method WeakSet.prototype.add called on incompatible receiver %s", thisObj.String())) + } + wso.s.set(r.toObject(call.Argument(0)), nil) + return call.This +} + +func (r *Runtime) weakSetProto_delete(call FunctionCall) Value { + thisObj := r.toObject(call.This) + wso, ok := thisObj.self.(*weakSetObject) + if !ok { + panic(r.NewTypeError("Method WeakSet.prototype.delete called on incompatible receiver %s", thisObj.String())) + } + obj, ok := call.Argument(0).(*Object) + if ok && wso.s.remove(obj) { + return valueTrue + } + return valueFalse +} + +func (r *Runtime) weakSetProto_has(call FunctionCall) Value { + thisObj := r.toObject(call.This) + wso, ok := thisObj.self.(*weakSetObject) + if !ok { + panic(r.NewTypeError("Method WeakSet.prototype.has called on incompatible receiver %s", thisObj.String())) + } + obj, ok := call.Argument(0).(*Object) + if ok && wso.s.has(obj) { + return valueTrue + } + return valueFalse +} + +func (r *Runtime) populateWeakSetGeneric(s *Object, adderValue Value, iterable Value) { + adder := toMethod(adderValue) + if adder == nil { + panic(r.NewTypeError("WeakSet.add is not set")) + } + iter := r.getIterator(iterable, nil) + r.iterate(iter, func(val Value) { + adder(FunctionCall{This: s, Arguments: []Value{val}}) + }) +} + +func (r *Runtime) builtin_newWeakSet(args []Value, newTarget *Object) *Object { + if newTarget == nil { + panic(r.needNew("WeakSet")) + } + proto := r.getPrototypeFromCtor(newTarget, r.global.WeakSet, r.global.WeakSetPrototype) + o := &Object{runtime: r} + + wso := &weakSetObject{} + wso.class = classWeakSet + wso.val = o + wso.extensible = true + o.self = wso + wso.prototype = proto + wso.init() + if len(args) > 0 { + if arg := args[0]; arg != nil && arg != _undefined && arg != _null { + adder := wso.getStr("add", nil) + if adder == r.global.weakSetAdder { + if arr := r.checkStdArrayIter(arg); arr != nil { + for _, v := range arr.values { + wso.s.set(r.toObject(v), nil) + } + return o + } + } + r.populateWeakSetGeneric(o, adder, arg) + } + } + return o +} + +func (r *Runtime) createWeakSetProto(val *Object) objectImpl { + o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject) + + o._putProp("constructor", r.global.WeakSet, true, false, true) + r.global.weakSetAdder = r.newNativeFunc(r.weakSetProto_add, nil, "add", nil, 1) + o._putProp("add", r.global.weakSetAdder, true, false, true) + o._putProp("delete", r.newNativeFunc(r.weakSetProto_delete, nil, "delete", nil, 1), true, false, true) + o._putProp("has", r.newNativeFunc(r.weakSetProto_has, nil, "has", nil, 1), true, false, true) + + o._putSym(SymToStringTag, valueProp(asciiString(classWeakSet), false, false, true)) + + return o +} + +func (r *Runtime) createWeakSet(val *Object) objectImpl { + o := r.newNativeConstructOnly(val, r.builtin_newWeakSet, r.global.WeakSetPrototype, "WeakSet", 0) + + return o +} + +func (r *Runtime) initWeakSet() { + r.global.WeakSetPrototype = r.newLazyObject(r.createWeakSetProto) + r.global.WeakSet = r.newLazyObject(r.createWeakSet) + + r.addToGlobal("WeakSet", r.global.WeakSet) +} diff --git a/vendor/github.com/dop251/goja/compiler.go b/vendor/github.com/dop251/goja/compiler.go new file mode 100644 index 0000000000..aa1b7f7baa --- /dev/null +++ b/vendor/github.com/dop251/goja/compiler.go @@ -0,0 +1,1066 @@ +package goja + +import ( + "fmt" + "github.com/dop251/goja/token" + "sort" + + "github.com/dop251/goja/ast" + "github.com/dop251/goja/file" + "github.com/dop251/goja/unistring" +) + +type blockType int + +const ( + blockLoop blockType = iota + blockLoopEnum + blockTry + blockLabel + blockSwitch + blockWith + blockScope + blockIterScope +) + +const ( + maskConst = 1 << 31 + maskVar = 1 << 30 + maskDeletable = 1 << 29 + maskStrict = maskDeletable + + maskTyp = maskConst | maskVar | maskDeletable +) + +type varType byte + +const ( + varTypeVar varType = iota + varTypeLet + varTypeStrictConst + varTypeConst +) + +type CompilerError struct { + Message string + File *file.File + Offset int +} + +type CompilerSyntaxError struct { + CompilerError +} + +type CompilerReferenceError struct { + CompilerError +} + +type srcMapItem struct { + pc int + srcPos int +} + +type Program struct { + code []instruction + values []Value + + funcName unistring.String + src *file.File + srcMap []srcMapItem +} + +type compiler struct { + p *Program + scope *scope + block *block + + enumGetExpr compiledEnumGetExpr + + evalVM *vm +} + +type binding struct { + scope *scope + name unistring.String + accessPoints map[*scope]*[]int + isConst bool + isStrict bool + isArg bool + isVar bool + inStash bool +} + +func (b *binding) getAccessPointsForScope(s *scope) *[]int { + m := b.accessPoints[s] + if m == nil { + a := make([]int, 0, 1) + m = &a + if b.accessPoints == nil { + b.accessPoints = make(map[*scope]*[]int) + } + b.accessPoints[s] = m + } + return m +} + +func (b *binding) markAccessPointAt(pos int) { + scope := b.scope.c.scope + m := b.getAccessPointsForScope(scope) + *m = append(*m, pos-scope.base) +} + +func (b *binding) markAccessPointAtScope(scope *scope, pos int) { + m := b.getAccessPointsForScope(scope) + *m = append(*m, pos-scope.base) +} + +func (b *binding) markAccessPoint() { + scope := b.scope.c.scope + m := b.getAccessPointsForScope(scope) + *m = append(*m, len(scope.prg.code)-scope.base) +} + +func (b *binding) emitGet() { + b.markAccessPoint() + if b.isVar && !b.isArg { + b.scope.c.emit(loadStash(0)) + } else { + b.scope.c.emit(loadStashLex(0)) + } +} + +func (b *binding) emitGetAt(pos int) { + b.markAccessPointAt(pos) + if b.isVar && !b.isArg { + b.scope.c.p.code[pos] = loadStash(0) + } else { + b.scope.c.p.code[pos] = loadStashLex(0) + } +} + +func (b *binding) emitGetP() { + if b.isVar && !b.isArg { + // no-op + } else { + // make sure TDZ is checked + b.markAccessPoint() + b.scope.c.emit(loadStashLex(0), pop) + } +} + +func (b *binding) emitSet() { + if b.isConst { + if b.isStrict || b.scope.c.scope.strict { + b.scope.c.emit(throwAssignToConst) + } + return + } + b.markAccessPoint() + if b.isVar && !b.isArg { + b.scope.c.emit(storeStash(0)) + } else { + b.scope.c.emit(storeStashLex(0)) + } +} + +func (b *binding) emitSetP() { + if b.isConst { + if b.isStrict || b.scope.c.scope.strict { + b.scope.c.emit(throwAssignToConst) + } + return + } + b.markAccessPoint() + if b.isVar && !b.isArg { + b.scope.c.emit(storeStashP(0)) + } else { + b.scope.c.emit(storeStashLexP(0)) + } +} + +func (b *binding) emitInit() { + b.markAccessPoint() + b.scope.c.emit(initStash(0)) +} + +func (b *binding) emitGetVar(callee bool) { + b.markAccessPoint() + if b.isVar && !b.isArg { + b.scope.c.emit(&loadMixed{name: b.name, callee: callee}) + } else { + b.scope.c.emit(&loadMixedLex{name: b.name, callee: callee}) + } +} + +func (b *binding) emitResolveVar(strict bool) { + b.markAccessPoint() + if b.isVar && !b.isArg { + b.scope.c.emit(&resolveMixed{name: b.name, strict: strict, typ: varTypeVar}) + } else { + var typ varType + if b.isConst { + if b.isStrict { + typ = varTypeStrictConst + } else { + typ = varTypeConst + } + } else { + typ = varTypeLet + } + b.scope.c.emit(&resolveMixed{name: b.name, strict: strict, typ: typ}) + } +} + +func (b *binding) moveToStash() { + if b.isArg && !b.scope.argsInStash { + b.scope.moveArgsToStash() + } else { + b.inStash = true + b.scope.needStash = true + } +} + +func (b *binding) useCount() (count int) { + for _, a := range b.accessPoints { + count += len(*a) + } + return +} + +type scope struct { + c *compiler + prg *Program + outer *scope + nested []*scope + boundNames map[unistring.String]*binding + bindings []*binding + base int + numArgs int + + // in strict mode + strict bool + // eval top-level scope + eval bool + // at least one inner scope has direct eval() which can lookup names dynamically (by name) + dynLookup bool + // at least one binding has been marked for placement in stash + needStash bool + + // is a function or a top-level lexical environment + function bool + // is a variable environment, i.e. the target for dynamically created var bindings + variable bool + // a function scope that has at least one direct eval() and non-strict, so the variables can be added dynamically + dynamic bool + // arguments have been marked for placement in stash (functions only) + argsInStash bool + // need 'arguments' object (functions only) + argsNeeded bool + // 'this' is used and non-strict, so need to box it (functions only) + thisNeeded bool +} + +type block struct { + typ blockType + label unistring.String + cont int + breaks []int + conts []int + outer *block + breaking *block // set when the 'finally' block is an empty break statement sequence + needResult bool +} + +func (c *compiler) leaveScopeBlock(enter *enterBlock) { + c.updateEnterBlock(enter) + leave := &leaveBlock{ + stackSize: enter.stackSize, + popStash: enter.stashSize > 0, + } + c.emit(leave) + for _, pc := range c.block.breaks { + c.p.code[pc] = leave + } + c.block.breaks = nil + c.leaveBlock() +} + +func (c *compiler) leaveBlock() { + lbl := len(c.p.code) + for _, item := range c.block.breaks { + c.p.code[item] = jump(lbl - item) + } + if t := c.block.typ; t == blockLoop || t == blockLoopEnum { + for _, item := range c.block.conts { + c.p.code[item] = jump(c.block.cont - item) + } + } + c.block = c.block.outer +} + +func (e *CompilerSyntaxError) Error() string { + if e.File != nil { + return fmt.Sprintf("SyntaxError: %s at %s", e.Message, e.File.Position(e.Offset)) + } + return fmt.Sprintf("SyntaxError: %s", e.Message) +} + +func (e *CompilerReferenceError) Error() string { + return fmt.Sprintf("ReferenceError: %s", e.Message) +} + +func (c *compiler) newScope() { + strict := false + if c.scope != nil { + strict = c.scope.strict + } + c.scope = &scope{ + c: c, + prg: c.p, + outer: c.scope, + strict: strict, + } +} + +func (c *compiler) newBlockScope() { + c.newScope() + if outer := c.scope.outer; outer != nil { + outer.nested = append(outer.nested, c.scope) + } + c.scope.base = len(c.p.code) +} + +func (c *compiler) popScope() { + c.scope = c.scope.outer +} + +func newCompiler() *compiler { + c := &compiler{ + p: &Program{}, + } + + c.enumGetExpr.init(c, file.Idx(0)) + + return c +} + +func (p *Program) defineLiteralValue(val Value) uint32 { + for idx, v := range p.values { + if v.SameAs(val) { + return uint32(idx) + } + } + idx := uint32(len(p.values)) + p.values = append(p.values, val) + return idx +} + +func (p *Program) dumpCode(logger func(format string, args ...interface{})) { + p._dumpCode("", logger) +} + +func (p *Program) _dumpCode(indent string, logger func(format string, args ...interface{})) { + logger("values: %+v", p.values) + for pc, ins := range p.code { + logger("%s %d: %T(%v)", indent, pc, ins, ins) + if f, ok := ins.(*newFunc); ok { + f.prg._dumpCode(indent+">", logger) + } + } +} + +func (p *Program) sourceOffset(pc int) int { + i := sort.Search(len(p.srcMap), func(idx int) bool { + return p.srcMap[idx].pc > pc + }) - 1 + if i >= 0 { + return p.srcMap[i].srcPos + } + + return 0 +} + +func (s *scope) lookupName(name unistring.String) (binding *binding, noDynamics bool) { + noDynamics = true + toStash := false + for curScope := s; curScope != nil; curScope = curScope.outer { + if curScope.dynamic { + noDynamics = false + } else { + if b, exists := curScope.boundNames[name]; exists { + if toStash && !b.inStash { + b.moveToStash() + } + binding = b + return + } + } + if name == "arguments" && curScope.function { + curScope.argsNeeded = true + binding, _ = curScope.bindName(name) + return + } + if curScope.function { + toStash = true + } + } + return +} + +func (s *scope) ensureBoundNamesCreated() { + if s.boundNames == nil { + s.boundNames = make(map[unistring.String]*binding) + } +} + +func (s *scope) addBinding(offset int) *binding { + if len(s.bindings) >= (1<<24)-1 { + s.c.throwSyntaxError(offset, "Too many variables") + } + b := &binding{ + scope: s, + } + s.bindings = append(s.bindings, b) + return b +} + +func (s *scope) bindNameLexical(name unistring.String, unique bool, offset int) (*binding, bool) { + if b := s.boundNames[name]; b != nil { + if unique { + s.c.throwSyntaxError(offset, "Identifier '%s' has already been declared", name) + } + return b, false + } + b := s.addBinding(offset) + b.name = name + s.ensureBoundNamesCreated() + s.boundNames[name] = b + return b, true +} + +func (s *scope) bindName(name unistring.String) (*binding, bool) { + if !s.function && !s.variable && s.outer != nil { + return s.outer.bindName(name) + } + b, created := s.bindNameLexical(name, false, 0) + if created { + b.isVar = true + } + return b, created +} + +func (s *scope) bindNameShadow(name unistring.String) (*binding, bool) { + if !s.function && s.outer != nil { + return s.outer.bindNameShadow(name) + } + + _, exists := s.boundNames[name] + b := &binding{ + scope: s, + name: name, + } + s.bindings = append(s.bindings, b) + s.ensureBoundNamesCreated() + s.boundNames[name] = b + return b, !exists +} + +func (s *scope) nearestFunction() *scope { + for sc := s; sc != nil; sc = sc.outer { + if sc.function { + return sc + } + } + return nil +} + +func (s *scope) finaliseVarAlloc(stackOffset int) (stashSize, stackSize int) { + argsInStash := false + if f := s.nearestFunction(); f != nil { + argsInStash = f.argsInStash + } + stackIdx, stashIdx := 0, 0 + allInStash := s.isDynamic() + for i, b := range s.bindings { + if allInStash || b.inStash { + for scope, aps := range b.accessPoints { + var level uint32 + for sc := scope; sc != nil && sc != s; sc = sc.outer { + if sc.needStash || sc.isDynamic() { + level++ + } + } + if level > 255 { + s.c.throwSyntaxError(0, "Maximum nesting level (256) exceeded") + } + idx := (level << 24) | uint32(stashIdx) + base := scope.base + code := scope.prg.code + for _, pc := range *aps { + ap := &code[base+pc] + switch i := (*ap).(type) { + case loadStash: + *ap = loadStash(idx) + case storeStash: + *ap = storeStash(idx) + case storeStashP: + *ap = storeStashP(idx) + case loadStashLex: + *ap = loadStashLex(idx) + case storeStashLex: + *ap = storeStashLex(idx) + case storeStashLexP: + *ap = storeStashLexP(idx) + case initStash: + *ap = initStash(idx) + case *loadMixed: + i.idx = idx + case *loadMixedLex: + i.idx = idx + case *resolveMixed: + i.idx = idx + } + } + } + stashIdx++ + } else { + var idx int + if i < s.numArgs { + idx = -(i + 1) + } else { + stackIdx++ + idx = stackIdx + stackOffset + } + for scope, aps := range b.accessPoints { + var level int + for sc := scope; sc != nil && sc != s; sc = sc.outer { + if sc.needStash || sc.isDynamic() { + level++ + } + } + if level > 255 { + s.c.throwSyntaxError(0, "Maximum nesting level (256) exceeded") + } + code := scope.prg.code + base := scope.base + if argsInStash { + for _, pc := range *aps { + ap := &code[base+pc] + switch i := (*ap).(type) { + case loadStash: + *ap = loadStack1(idx) + case storeStash: + *ap = storeStack1(idx) + case storeStashP: + *ap = storeStack1P(idx) + case loadStashLex: + *ap = loadStack1Lex(idx) + case storeStashLex: + *ap = storeStack1Lex(idx) + case storeStashLexP: + *ap = storeStack1LexP(idx) + case initStash: + *ap = initStack1(idx) + case *loadMixed: + *ap = &loadMixedStack1{name: i.name, idx: idx, level: uint8(level), callee: i.callee} + case *loadMixedLex: + *ap = &loadMixedStack1Lex{name: i.name, idx: idx, level: uint8(level), callee: i.callee} + case *resolveMixed: + *ap = &resolveMixedStack1{typ: i.typ, name: i.name, idx: idx, level: uint8(level), strict: i.strict} + } + } + } else { + for _, pc := range *aps { + ap := &code[base+pc] + switch i := (*ap).(type) { + case loadStash: + *ap = loadStack(idx) + case storeStash: + *ap = storeStack(idx) + case storeStashP: + *ap = storeStackP(idx) + case loadStashLex: + *ap = loadStackLex(idx) + case storeStashLex: + *ap = storeStackLex(idx) + case storeStashLexP: + *ap = storeStackLexP(idx) + case initStash: + *ap = initStack(idx) + case *loadMixed: + *ap = &loadMixedStack{name: i.name, idx: idx, level: uint8(level), callee: i.callee} + case *loadMixedLex: + *ap = &loadMixedStackLex{name: i.name, idx: idx, level: uint8(level), callee: i.callee} + case *resolveMixed: + *ap = &resolveMixedStack{typ: i.typ, name: i.name, idx: idx, level: uint8(level), strict: i.strict} + } + } + } + } + } + } + for _, nested := range s.nested { + nested.finaliseVarAlloc(stackIdx + stackOffset) + } + return stashIdx, stackIdx +} + +func (s *scope) moveArgsToStash() { + for _, b := range s.bindings { + if !b.isArg { + break + } + b.inStash = true + } + s.argsInStash = true + s.needStash = true +} + +func (s *scope) adjustBase(delta int) { + s.base += delta + for _, nested := range s.nested { + nested.adjustBase(delta) + } +} + +func (s *scope) makeNamesMap() map[unistring.String]uint32 { + l := len(s.bindings) + if l == 0 { + return nil + } + names := make(map[unistring.String]uint32, l) + for i, b := range s.bindings { + idx := uint32(i) + if b.isConst { + idx |= maskConst + if b.isStrict { + idx |= maskStrict + } + } + if b.isVar { + idx |= maskVar + } + names[b.name] = idx + } + return names +} + +func (s *scope) isDynamic() bool { + return s.dynLookup || s.dynamic +} + +func (s *scope) deleteBinding(b *binding) { + idx := 0 + for i, bb := range s.bindings { + if bb == b { + idx = i + goto found + } + } + return +found: + delete(s.boundNames, b.name) + copy(s.bindings[idx:], s.bindings[idx+1:]) + l := len(s.bindings) - 1 + s.bindings[l] = nil + s.bindings = s.bindings[:l] +} + +func (c *compiler) compile(in *ast.Program, strict, eval, inGlobal bool) { + c.p.src = in.File + c.newScope() + scope := c.scope + scope.dynamic = true + scope.eval = eval + if !strict && len(in.Body) > 0 { + strict = c.isStrict(in.Body) != nil + } + scope.strict = strict + ownVarScope := eval && strict + ownLexScope := !inGlobal || eval + if ownVarScope { + c.newBlockScope() + scope = c.scope + scope.function = true + } + funcs := c.extractFunctions(in.Body) + c.createFunctionBindings(funcs) + numFuncs := len(scope.bindings) + if inGlobal && !ownVarScope { + if numFuncs == len(funcs) { + c.compileFunctionsGlobalAllUnique(funcs) + } else { + c.compileFunctionsGlobal(funcs) + } + } + c.compileDeclList(in.DeclarationList, false) + numVars := len(scope.bindings) - numFuncs + vars := make([]unistring.String, len(scope.bindings)) + for i, b := range scope.bindings { + vars[i] = b.name + } + if len(vars) > 0 && !ownVarScope && ownLexScope { + if inGlobal { + c.emit(&bindGlobal{ + vars: vars[numFuncs:], + funcs: vars[:numFuncs], + deletable: eval, + }) + } else { + c.emit(&bindVars{names: vars, deletable: eval}) + } + } + var enter *enterBlock + if c.compileLexicalDeclarations(in.Body, ownVarScope || !ownLexScope) { + if ownLexScope { + c.block = &block{ + outer: c.block, + typ: blockScope, + needResult: true, + } + enter = &enterBlock{} + c.emit(enter) + } + } + if len(scope.bindings) > 0 && !ownLexScope { + var lets, consts []unistring.String + for _, b := range c.scope.bindings[numFuncs+numVars:] { + if b.isConst { + consts = append(consts, b.name) + } else { + lets = append(lets, b.name) + } + } + c.emit(&bindGlobal{ + vars: vars[numFuncs:], + funcs: vars[:numFuncs], + lets: lets, + consts: consts, + }) + } + if !inGlobal || ownVarScope { + c.compileFunctions(funcs) + } + c.compileStatements(in.Body, true) + if enter != nil { + c.leaveScopeBlock(enter) + c.popScope() + } + + c.p.code = append(c.p.code, halt) + + scope.finaliseVarAlloc(0) +} + +func (c *compiler) compileDeclList(v []*ast.VariableDeclaration, inFunc bool) { + for _, value := range v { + c.createVarBindings(value, inFunc) + } +} + +func (c *compiler) extractLabelled(st ast.Statement) ast.Statement { + if st, ok := st.(*ast.LabelledStatement); ok { + return c.extractLabelled(st.Statement) + } + return st +} + +func (c *compiler) extractFunctions(list []ast.Statement) (funcs []*ast.FunctionDeclaration) { + for _, st := range list { + var decl *ast.FunctionDeclaration + switch st := c.extractLabelled(st).(type) { + case *ast.FunctionDeclaration: + decl = st + case *ast.LabelledStatement: + if st1, ok := st.Statement.(*ast.FunctionDeclaration); ok { + decl = st1 + } else { + continue + } + default: + continue + } + funcs = append(funcs, decl) + } + return +} + +func (c *compiler) createFunctionBindings(funcs []*ast.FunctionDeclaration) { + s := c.scope + if s.outer != nil { + unique := !s.function && !s.variable && s.strict + for _, decl := range funcs { + s.bindNameLexical(decl.Function.Name.Name, unique, int(decl.Function.Name.Idx1())-1) + } + } else { + for _, decl := range funcs { + s.bindName(decl.Function.Name.Name) + } + } +} + +func (c *compiler) compileFunctions(list []*ast.FunctionDeclaration) { + for _, decl := range list { + c.compileFunction(decl) + } +} + +func (c *compiler) compileFunctionsGlobalAllUnique(list []*ast.FunctionDeclaration) { + for _, decl := range list { + c.compileFunctionLiteral(decl.Function, false).emitGetter(true) + } +} + +func (c *compiler) compileFunctionsGlobal(list []*ast.FunctionDeclaration) { + m := make(map[unistring.String]int, len(list)) + for i := len(list) - 1; i >= 0; i-- { + name := list[i].Function.Name.Name + if _, exists := m[name]; !exists { + m[name] = i + } + } + for i, decl := range list { + if m[decl.Function.Name.Name] == i { + c.compileFunctionLiteral(decl.Function, false).emitGetter(true) + } else { + leave := c.enterDummyMode() + c.compileFunctionLiteral(decl.Function, false).emitGetter(false) + leave() + } + } +} + +func (c *compiler) createVarIdBinding(name unistring.String, offset int, inFunc bool) { + if c.scope.strict { + c.checkIdentifierLName(name, offset) + c.checkIdentifierName(name, offset) + } + if !inFunc || name != "arguments" { + c.scope.bindName(name) + } +} + +func (c *compiler) createBindings(target ast.Expression, createIdBinding func(name unistring.String, offset int)) { + switch target := target.(type) { + case *ast.Identifier: + createIdBinding(target.Name, int(target.Idx)-1) + case *ast.ObjectPattern: + for _, prop := range target.Properties { + switch prop := prop.(type) { + case *ast.PropertyShort: + createIdBinding(prop.Name.Name, int(prop.Name.Idx)-1) + case *ast.PropertyKeyed: + c.createBindings(prop.Value, createIdBinding) + default: + c.throwSyntaxError(int(target.Idx0()-1), "unsupported property type in ObjectPattern: %T", prop) + } + } + if target.Rest != nil { + c.createBindings(target.Rest, createIdBinding) + } + case *ast.ArrayPattern: + for _, elt := range target.Elements { + if elt != nil { + c.createBindings(elt, createIdBinding) + } + } + if target.Rest != nil { + c.createBindings(target.Rest, createIdBinding) + } + case *ast.AssignExpression: + c.createBindings(target.Left, createIdBinding) + default: + c.throwSyntaxError(int(target.Idx0()-1), "unsupported binding target: %T", target) + } +} + +func (c *compiler) createVarBinding(target ast.Expression, inFunc bool) { + c.createBindings(target, func(name unistring.String, offset int) { + c.createVarIdBinding(name, offset, inFunc) + }) +} + +func (c *compiler) createVarBindings(v *ast.VariableDeclaration, inFunc bool) { + for _, item := range v.List { + c.createVarBinding(item.Target, inFunc) + } +} + +func (c *compiler) createLexicalIdBinding(name unistring.String, isConst bool, offset int) *binding { + if name == "let" { + c.throwSyntaxError(offset, "let is disallowed as a lexically bound name") + } + if c.scope.strict { + c.checkIdentifierLName(name, offset) + c.checkIdentifierName(name, offset) + } + b, _ := c.scope.bindNameLexical(name, true, offset) + if isConst { + b.isConst, b.isStrict = true, true + } + return b +} + +func (c *compiler) createLexicalIdBindingFuncBody(name unistring.String, isConst bool, offset int, calleeBinding *binding) *binding { + if name == "let" { + c.throwSyntaxError(offset, "let is disallowed as a lexically bound name") + } + if c.scope.strict { + c.checkIdentifierLName(name, offset) + c.checkIdentifierName(name, offset) + } + paramScope := c.scope.outer + parentBinding := paramScope.boundNames[name] + if parentBinding != nil { + if parentBinding != calleeBinding && (name != "arguments" || !paramScope.argsNeeded) { + c.throwSyntaxError(offset, "Identifier '%s' has already been declared", name) + } + } + b, _ := c.scope.bindNameLexical(name, true, offset) + if isConst { + b.isConst, b.isStrict = true, true + } + return b +} + +func (c *compiler) createLexicalBinding(target ast.Expression, isConst bool) { + c.createBindings(target, func(name unistring.String, offset int) { + c.createLexicalIdBinding(name, isConst, offset) + }) +} + +func (c *compiler) createLexicalBindings(lex *ast.LexicalDeclaration) { + for _, d := range lex.List { + c.createLexicalBinding(d.Target, lex.Token == token.CONST) + } +} + +func (c *compiler) compileLexicalDeclarations(list []ast.Statement, scopeDeclared bool) bool { + for _, st := range list { + if lex, ok := st.(*ast.LexicalDeclaration); ok { + if !scopeDeclared { + c.newBlockScope() + scopeDeclared = true + } + c.createLexicalBindings(lex) + } + } + return scopeDeclared +} + +func (c *compiler) compileLexicalDeclarationsFuncBody(list []ast.Statement, calleeBinding *binding) { + for _, st := range list { + if lex, ok := st.(*ast.LexicalDeclaration); ok { + isConst := lex.Token == token.CONST + for _, d := range lex.List { + c.createBindings(d.Target, func(name unistring.String, offset int) { + c.createLexicalIdBindingFuncBody(name, isConst, offset, calleeBinding) + }) + } + } + } +} + +func (c *compiler) compileFunction(v *ast.FunctionDeclaration) { + name := v.Function.Name.Name + b := c.scope.boundNames[name] + if b == nil || b.isVar { + e := &compiledIdentifierExpr{ + name: v.Function.Name.Name, + } + e.init(c, v.Function.Idx0()) + e.emitSetter(c.compileFunctionLiteral(v.Function, false), false) + } else { + c.compileFunctionLiteral(v.Function, false).emitGetter(true) + b.emitInit() + } +} + +func (c *compiler) compileStandaloneFunctionDecl(v *ast.FunctionDeclaration) { + if c.scope.strict { + c.throwSyntaxError(int(v.Idx0())-1, "In strict mode code, functions can only be declared at top level or inside a block.") + } + c.throwSyntaxError(int(v.Idx0())-1, "In non-strict mode code, functions can only be declared at top level, inside a block, or as the body of an if statement.") +} + +func (c *compiler) emit(instructions ...instruction) { + c.p.code = append(c.p.code, instructions...) +} + +func (c *compiler) throwSyntaxError(offset int, format string, args ...interface{}) { + panic(&CompilerSyntaxError{ + CompilerError: CompilerError{ + File: c.p.src, + Offset: offset, + Message: fmt.Sprintf(format, args...), + }, + }) +} + +func (c *compiler) isStrict(list []ast.Statement) *ast.StringLiteral { + for _, st := range list { + if st, ok := st.(*ast.ExpressionStatement); ok { + if e, ok := st.Expression.(*ast.StringLiteral); ok { + if e.Literal == `"use strict"` || e.Literal == `'use strict'` { + return e + } + } else { + break + } + } else { + break + } + } + return nil +} + +func (c *compiler) isStrictStatement(s ast.Statement) *ast.StringLiteral { + if s, ok := s.(*ast.BlockStatement); ok { + return c.isStrict(s.List) + } + return nil +} + +func (c *compiler) checkIdentifierName(name unistring.String, offset int) { + switch name { + case "implements", "interface", "let", "package", "private", "protected", "public", "static", "yield": + c.throwSyntaxError(offset, "Unexpected strict mode reserved word") + } +} + +func (c *compiler) checkIdentifierLName(name unistring.String, offset int) { + switch name { + case "eval", "arguments": + c.throwSyntaxError(offset, "Assignment to eval or arguments is not allowed in strict mode") + } +} + +// Enter a 'dummy' compilation mode. Any code produced after this method is called will be discarded after +// leaveFunc is called with no additional side effects. This is useful for compiling code inside a +// constant falsy condition 'if' branch or a loop (i.e 'if (false) { ... } or while (false) { ... }). +// Such code should not be included in the final compilation result as it's never called, but it must +// still produce compilation errors if there are any. +// TODO: make sure variable lookups do not de-optimise parent scopes +func (c *compiler) enterDummyMode() (leaveFunc func()) { + savedBlock, savedProgram := c.block, c.p + if savedBlock != nil { + c.block = &block{ + typ: savedBlock.typ, + label: savedBlock.label, + outer: savedBlock.outer, + breaking: savedBlock.breaking, + } + } + c.p = &Program{} + c.newScope() + return func() { + c.block, c.p = savedBlock, savedProgram + c.popScope() + } +} + +func (c *compiler) compileStatementDummy(statement ast.Statement) { + leave := c.enterDummyMode() + c.compileStatement(statement, false) + leave() +} diff --git a/vendor/github.com/dop251/goja/compiler_expr.go b/vendor/github.com/dop251/goja/compiler_expr.go new file mode 100644 index 0000000000..248622c893 --- /dev/null +++ b/vendor/github.com/dop251/goja/compiler_expr.go @@ -0,0 +1,2263 @@ +package goja + +import ( + "fmt" + "regexp" + + "github.com/dop251/goja/ast" + "github.com/dop251/goja/file" + "github.com/dop251/goja/token" + "github.com/dop251/goja/unistring" +) + +var ( + octalRegexp = regexp.MustCompile(`^0[0-7]`) +) + +type compiledExpr interface { + emitGetter(putOnStack bool) + emitSetter(valueExpr compiledExpr, putOnStack bool) + emitRef() + emitUnary(prepare, body func(), postfix, putOnStack bool) + deleteExpr() compiledExpr + constant() bool + addSrcMap() +} + +type compiledExprOrRef interface { + compiledExpr + emitGetterOrRef() +} + +type compiledCallExpr struct { + baseCompiledExpr + args []compiledExpr + callee compiledExpr + + isVariadic bool +} + +type compiledNewExpr struct { + compiledCallExpr +} + +type compiledObjectLiteral struct { + baseCompiledExpr + expr *ast.ObjectLiteral +} + +type compiledArrayLiteral struct { + baseCompiledExpr + expr *ast.ArrayLiteral +} + +type compiledRegexpLiteral struct { + baseCompiledExpr + expr *ast.RegExpLiteral +} + +type compiledLiteral struct { + baseCompiledExpr + val Value +} + +type compiledAssignExpr struct { + baseCompiledExpr + left, right compiledExpr + operator token.Token +} + +type compiledObjectAssignmentPattern struct { + baseCompiledExpr + expr *ast.ObjectPattern +} + +type compiledArrayAssignmentPattern struct { + baseCompiledExpr + expr *ast.ArrayPattern +} + +type deleteGlobalExpr struct { + baseCompiledExpr + name unistring.String +} + +type deleteVarExpr struct { + baseCompiledExpr + name unistring.String +} + +type deletePropExpr struct { + baseCompiledExpr + left compiledExpr + name unistring.String +} + +type deleteElemExpr struct { + baseCompiledExpr + left, member compiledExpr +} + +type constantExpr struct { + baseCompiledExpr + val Value +} + +type baseCompiledExpr struct { + c *compiler + offset int +} + +type compiledIdentifierExpr struct { + baseCompiledExpr + name unistring.String +} + +type compiledFunctionLiteral struct { + baseCompiledExpr + expr *ast.FunctionLiteral + lhsName unistring.String + strict *ast.StringLiteral + isExpr bool +} + +type compiledBracketExpr struct { + baseCompiledExpr + left, member compiledExpr +} + +type compiledThisExpr struct { + baseCompiledExpr +} + +type compiledNewTarget struct { + baseCompiledExpr +} + +type compiledSequenceExpr struct { + baseCompiledExpr + sequence []compiledExpr +} + +type compiledUnaryExpr struct { + baseCompiledExpr + operand compiledExpr + operator token.Token + postfix bool +} + +type compiledConditionalExpr struct { + baseCompiledExpr + test, consequent, alternate compiledExpr +} + +type compiledLogicalOr struct { + baseCompiledExpr + left, right compiledExpr +} + +type compiledLogicalAnd struct { + baseCompiledExpr + left, right compiledExpr +} + +type compiledBinaryExpr struct { + baseCompiledExpr + left, right compiledExpr + operator token.Token +} + +type compiledEnumGetExpr struct { + baseCompiledExpr +} + +type defaultDeleteExpr struct { + baseCompiledExpr + expr compiledExpr +} + +type compiledSpreadCallArgument struct { + baseCompiledExpr + expr compiledExpr +} + +func (e *defaultDeleteExpr) emitGetter(putOnStack bool) { + e.expr.emitGetter(false) + if putOnStack { + e.c.emit(loadVal(e.c.p.defineLiteralValue(valueTrue))) + } +} + +func (c *compiler) compileExpression(v ast.Expression) compiledExpr { + // log.Printf("compileExpression: %T", v) + switch v := v.(type) { + case nil: + return nil + case *ast.AssignExpression: + return c.compileAssignExpression(v) + case *ast.NumberLiteral: + return c.compileNumberLiteral(v) + case *ast.StringLiteral: + return c.compileStringLiteral(v) + case *ast.BooleanLiteral: + return c.compileBooleanLiteral(v) + case *ast.NullLiteral: + r := &compiledLiteral{ + val: _null, + } + r.init(c, v.Idx0()) + return r + case *ast.Identifier: + return c.compileIdentifierExpression(v) + case *ast.CallExpression: + return c.compileCallExpression(v) + case *ast.ObjectLiteral: + return c.compileObjectLiteral(v) + case *ast.ArrayLiteral: + return c.compileArrayLiteral(v) + case *ast.RegExpLiteral: + return c.compileRegexpLiteral(v) + case *ast.BinaryExpression: + return c.compileBinaryExpression(v) + case *ast.UnaryExpression: + return c.compileUnaryExpression(v) + case *ast.ConditionalExpression: + return c.compileConditionalExpression(v) + case *ast.FunctionLiteral: + return c.compileFunctionLiteral(v, true) + case *ast.DotExpression: + r := &compiledDotExpr{ + left: c.compileExpression(v.Left), + name: v.Identifier.Name, + } + r.init(c, v.Idx0()) + return r + case *ast.BracketExpression: + r := &compiledBracketExpr{ + left: c.compileExpression(v.Left), + member: c.compileExpression(v.Member), + } + r.init(c, v.Idx0()) + return r + case *ast.ThisExpression: + r := &compiledThisExpr{} + r.init(c, v.Idx0()) + return r + case *ast.SequenceExpression: + return c.compileSequenceExpression(v) + case *ast.NewExpression: + return c.compileNewExpression(v) + case *ast.MetaProperty: + return c.compileMetaProperty(v) + case *ast.ObjectPattern: + return c.compileObjectAssignmentPattern(v) + case *ast.ArrayPattern: + return c.compileArrayAssignmentPattern(v) + default: + panic(fmt.Errorf("Unknown expression type: %T", v)) + } +} + +func (e *baseCompiledExpr) constant() bool { + return false +} + +func (e *baseCompiledExpr) init(c *compiler, idx file.Idx) { + e.c = c + e.offset = int(idx) - 1 +} + +func (e *baseCompiledExpr) emitSetter(compiledExpr, bool) { + e.c.throwSyntaxError(e.offset, "Not a valid left-value expression") +} + +func (e *baseCompiledExpr) emitRef() { + e.c.throwSyntaxError(e.offset, "Cannot emit reference for this type of expression") +} + +func (e *baseCompiledExpr) deleteExpr() compiledExpr { + r := &constantExpr{ + val: valueTrue, + } + r.init(e.c, file.Idx(e.offset+1)) + return r +} + +func (e *baseCompiledExpr) emitUnary(func(), func(), bool, bool) { + e.c.throwSyntaxError(e.offset, "Not a valid left-value expression") +} + +func (e *baseCompiledExpr) addSrcMap() { + if e.offset > 0 { + e.c.p.srcMap = append(e.c.p.srcMap, srcMapItem{pc: len(e.c.p.code), srcPos: e.offset}) + } +} + +func (e *constantExpr) emitGetter(putOnStack bool) { + if putOnStack { + e.addSrcMap() + e.c.emit(loadVal(e.c.p.defineLiteralValue(e.val))) + } +} + +func (e *compiledIdentifierExpr) emitGetter(putOnStack bool) { + e.addSrcMap() + if b, noDynamics := e.c.scope.lookupName(e.name); noDynamics { + if b != nil { + if putOnStack { + b.emitGet() + } else { + b.emitGetP() + } + } else { + panic("No dynamics and not found") + } + } else { + if b != nil { + b.emitGetVar(false) + } else { + e.c.emit(loadDynamic(e.name)) + } + if !putOnStack { + e.c.emit(pop) + } + } +} + +func (e *compiledIdentifierExpr) emitGetterOrRef() { + e.addSrcMap() + if b, noDynamics := e.c.scope.lookupName(e.name); noDynamics { + if b != nil { + b.emitGet() + } else { + panic("No dynamics and not found") + } + } else { + if b != nil { + b.emitGetVar(false) + } else { + e.c.emit(loadDynamicRef(e.name)) + } + } +} + +func (e *compiledIdentifierExpr) emitGetterAndCallee() { + e.addSrcMap() + if b, noDynamics := e.c.scope.lookupName(e.name); noDynamics { + if b != nil { + e.c.emit(loadUndef) + b.emitGet() + } else { + panic("No dynamics and not found") + } + } else { + if b != nil { + b.emitGetVar(true) + } else { + e.c.emit(loadDynamicCallee(e.name)) + } + } +} + +func (c *compiler) emitVarSetter1(name unistring.String, offset int, putOnStack bool, emitRight func(isRef bool)) { + if c.scope.strict { + c.checkIdentifierLName(name, offset) + } + + if b, noDynamics := c.scope.lookupName(name); noDynamics { + emitRight(false) + if b != nil { + if putOnStack { + b.emitSet() + } else { + b.emitSetP() + } + } else { + if c.scope.strict { + c.emit(setGlobalStrict(name)) + } else { + c.emit(setGlobal(name)) + } + if !putOnStack { + c.emit(pop) + } + } + } else { + if b != nil { + b.emitResolveVar(c.scope.strict) + } else { + if c.scope.strict { + c.emit(resolveVar1Strict(name)) + } else { + c.emit(resolveVar1(name)) + } + } + emitRight(true) + if putOnStack { + c.emit(putValue) + } else { + c.emit(putValueP) + } + } +} + +func (c *compiler) emitVarSetter(name unistring.String, offset int, valueExpr compiledExpr, putOnStack bool) { + c.emitVarSetter1(name, offset, putOnStack, func(bool) { + c.emitExpr(valueExpr, true) + }) +} + +func (c *compiler) emitVarRef(name unistring.String, offset int) { + if c.scope.strict { + c.checkIdentifierLName(name, offset) + } + + b, _ := c.scope.lookupName(name) + if b != nil { + b.emitResolveVar(c.scope.strict) + } else { + if c.scope.strict { + c.emit(resolveVar1Strict(name)) + } else { + c.emit(resolveVar1(name)) + } + } +} + +func (e *compiledIdentifierExpr) emitRef() { + e.c.emitVarRef(e.name, e.offset) +} + +func (e *compiledIdentifierExpr) emitSetter(valueExpr compiledExpr, putOnStack bool) { + e.c.emitVarSetter(e.name, e.offset, valueExpr, putOnStack) +} + +func (e *compiledIdentifierExpr) emitUnary(prepare, body func(), postfix, putOnStack bool) { + if putOnStack { + e.c.emitVarSetter1(e.name, e.offset, true, func(isRef bool) { + e.c.emit(loadUndef) + if isRef { + e.c.emit(getValue) + } else { + e.emitGetter(true) + } + if prepare != nil { + prepare() + } + if !postfix { + body() + } + e.c.emit(rdupN(1)) + if postfix { + body() + } + }) + e.c.emit(pop) + } else { + e.c.emitVarSetter1(e.name, e.offset, false, func(isRef bool) { + if isRef { + e.c.emit(getValue) + } else { + e.emitGetter(true) + } + body() + }) + } +} + +func (e *compiledIdentifierExpr) deleteExpr() compiledExpr { + if e.c.scope.strict { + e.c.throwSyntaxError(e.offset, "Delete of an unqualified identifier in strict mode") + panic("Unreachable") + } + if b, noDynamics := e.c.scope.lookupName(e.name); noDynamics { + if b == nil { + r := &deleteGlobalExpr{ + name: e.name, + } + r.init(e.c, file.Idx(0)) + return r + } + } else { + if b == nil { + r := &deleteVarExpr{ + name: e.name, + } + r.init(e.c, file.Idx(e.offset+1)) + return r + } + } + r := &compiledLiteral{ + val: valueFalse, + } + r.init(e.c, file.Idx(e.offset+1)) + return r +} + +type compiledDotExpr struct { + baseCompiledExpr + left compiledExpr + name unistring.String +} + +func (e *compiledDotExpr) emitGetter(putOnStack bool) { + e.left.emitGetter(true) + e.addSrcMap() + e.c.emit(getProp(e.name)) + if !putOnStack { + e.c.emit(pop) + } +} + +func (e *compiledDotExpr) emitRef() { + e.left.emitGetter(true) + if e.c.scope.strict { + e.c.emit(getPropRefStrict(e.name)) + } else { + e.c.emit(getPropRef(e.name)) + } +} + +func (e *compiledDotExpr) emitSetter(valueExpr compiledExpr, putOnStack bool) { + e.left.emitGetter(true) + valueExpr.emitGetter(true) + if e.c.scope.strict { + if putOnStack { + e.c.emit(setPropStrict(e.name)) + } else { + e.c.emit(setPropStrictP(e.name)) + } + } else { + if putOnStack { + e.c.emit(setProp(e.name)) + } else { + e.c.emit(setPropP(e.name)) + } + } +} + +func (e *compiledDotExpr) emitUnary(prepare, body func(), postfix, putOnStack bool) { + if !putOnStack { + e.left.emitGetter(true) + e.c.emit(dup) + e.c.emit(getProp(e.name)) + body() + if e.c.scope.strict { + e.c.emit(setPropStrict(e.name), pop) + } else { + e.c.emit(setProp(e.name), pop) + } + } else { + if !postfix { + e.left.emitGetter(true) + e.c.emit(dup) + e.c.emit(getProp(e.name)) + if prepare != nil { + prepare() + } + body() + if e.c.scope.strict { + e.c.emit(setPropStrict(e.name)) + } else { + e.c.emit(setProp(e.name)) + } + } else { + e.c.emit(loadUndef) + e.left.emitGetter(true) + e.c.emit(dup) + e.c.emit(getProp(e.name)) + if prepare != nil { + prepare() + } + e.c.emit(rdupN(2)) + body() + if e.c.scope.strict { + e.c.emit(setPropStrict(e.name)) + } else { + e.c.emit(setProp(e.name)) + } + e.c.emit(pop) + } + } +} + +func (e *compiledDotExpr) deleteExpr() compiledExpr { + r := &deletePropExpr{ + left: e.left, + name: e.name, + } + r.init(e.c, file.Idx(0)) + return r +} + +func (e *compiledBracketExpr) emitGetter(putOnStack bool) { + e.left.emitGetter(true) + e.member.emitGetter(true) + e.addSrcMap() + e.c.emit(getElem) + if !putOnStack { + e.c.emit(pop) + } +} + +func (e *compiledBracketExpr) emitRef() { + e.left.emitGetter(true) + e.member.emitGetter(true) + if e.c.scope.strict { + e.c.emit(getElemRefStrict) + } else { + e.c.emit(getElemRef) + } +} + +func (e *compiledBracketExpr) emitSetter(valueExpr compiledExpr, putOnStack bool) { + e.left.emitGetter(true) + e.member.emitGetter(true) + valueExpr.emitGetter(true) + if e.c.scope.strict { + if putOnStack { + e.c.emit(setElemStrict) + } else { + e.c.emit(setElemStrictP) + } + } else { + if putOnStack { + e.c.emit(setElem) + } else { + e.c.emit(setElemP) + } + } +} + +func (e *compiledBracketExpr) emitUnary(prepare, body func(), postfix, putOnStack bool) { + if !putOnStack { + e.left.emitGetter(true) + e.member.emitGetter(true) + e.c.emit(dupN(1), dupN(1)) + e.c.emit(getElem) + body() + if e.c.scope.strict { + e.c.emit(setElemStrict, pop) + } else { + e.c.emit(setElem, pop) + } + } else { + if !postfix { + e.left.emitGetter(true) + e.member.emitGetter(true) + e.c.emit(dupN(1), dupN(1)) + e.c.emit(getElem) + if prepare != nil { + prepare() + } + body() + if e.c.scope.strict { + e.c.emit(setElemStrict) + } else { + e.c.emit(setElem) + } + } else { + e.c.emit(loadUndef) + e.left.emitGetter(true) + e.member.emitGetter(true) + e.c.emit(dupN(1), dupN(1)) + e.c.emit(getElem) + if prepare != nil { + prepare() + } + e.c.emit(rdupN(3)) + body() + if e.c.scope.strict { + e.c.emit(setElemStrict, pop) + } else { + e.c.emit(setElem, pop) + } + } + } +} + +func (e *compiledBracketExpr) deleteExpr() compiledExpr { + r := &deleteElemExpr{ + left: e.left, + member: e.member, + } + r.init(e.c, file.Idx(0)) + return r +} + +func (e *deleteElemExpr) emitGetter(putOnStack bool) { + e.left.emitGetter(true) + e.member.emitGetter(true) + e.addSrcMap() + if e.c.scope.strict { + e.c.emit(deleteElemStrict) + } else { + e.c.emit(deleteElem) + } + if !putOnStack { + e.c.emit(pop) + } +} + +func (e *deletePropExpr) emitGetter(putOnStack bool) { + e.left.emitGetter(true) + e.addSrcMap() + if e.c.scope.strict { + e.c.emit(deletePropStrict(e.name)) + } else { + e.c.emit(deleteProp(e.name)) + } + if !putOnStack { + e.c.emit(pop) + } +} + +func (e *deleteVarExpr) emitGetter(putOnStack bool) { + /*if e.c.scope.strict { + e.c.throwSyntaxError(e.offset, "Delete of an unqualified identifier in strict mode") + return + }*/ + e.c.emit(deleteVar(e.name)) + if !putOnStack { + e.c.emit(pop) + } +} + +func (e *deleteGlobalExpr) emitGetter(putOnStack bool) { + /*if e.c.scope.strict { + e.c.throwSyntaxError(e.offset, "Delete of an unqualified identifier in strict mode") + return + }*/ + + e.c.emit(deleteGlobal(e.name)) + if !putOnStack { + e.c.emit(pop) + } +} + +func (e *compiledAssignExpr) emitGetter(putOnStack bool) { + e.addSrcMap() + switch e.operator { + case token.ASSIGN: + if fn, ok := e.right.(*compiledFunctionLiteral); ok { + if fn.expr.Name == nil { + if id, ok := e.left.(*compiledIdentifierExpr); ok { + fn.lhsName = id.name + } + } + } + e.left.emitSetter(e.right, putOnStack) + case token.PLUS: + e.left.emitUnary(nil, func() { + e.right.emitGetter(true) + e.c.emit(add) + }, false, putOnStack) + case token.MINUS: + e.left.emitUnary(nil, func() { + e.right.emitGetter(true) + e.c.emit(sub) + }, false, putOnStack) + case token.MULTIPLY: + e.left.emitUnary(nil, func() { + e.right.emitGetter(true) + e.c.emit(mul) + }, false, putOnStack) + case token.SLASH: + e.left.emitUnary(nil, func() { + e.right.emitGetter(true) + e.c.emit(div) + }, false, putOnStack) + case token.REMAINDER: + e.left.emitUnary(nil, func() { + e.right.emitGetter(true) + e.c.emit(mod) + }, false, putOnStack) + case token.OR: + e.left.emitUnary(nil, func() { + e.right.emitGetter(true) + e.c.emit(or) + }, false, putOnStack) + case token.AND: + e.left.emitUnary(nil, func() { + e.right.emitGetter(true) + e.c.emit(and) + }, false, putOnStack) + case token.EXCLUSIVE_OR: + e.left.emitUnary(nil, func() { + e.right.emitGetter(true) + e.c.emit(xor) + }, false, putOnStack) + case token.SHIFT_LEFT: + e.left.emitUnary(nil, func() { + e.right.emitGetter(true) + e.c.emit(sal) + }, false, putOnStack) + case token.SHIFT_RIGHT: + e.left.emitUnary(nil, func() { + e.right.emitGetter(true) + e.c.emit(sar) + }, false, putOnStack) + case token.UNSIGNED_SHIFT_RIGHT: + e.left.emitUnary(nil, func() { + e.right.emitGetter(true) + e.c.emit(shr) + }, false, putOnStack) + default: + panic(fmt.Errorf("Unknown assign operator: %s", e.operator.String())) + } +} + +func (e *compiledLiteral) emitGetter(putOnStack bool) { + if putOnStack { + e.addSrcMap() + e.c.emit(loadVal(e.c.p.defineLiteralValue(e.val))) + } +} + +func (e *compiledLiteral) constant() bool { + return true +} + +func (c *compiler) compileParameterBindingIdentifier(name unistring.String, offset int) (*binding, bool) { + if c.scope.strict { + c.checkIdentifierName(name, offset) + c.checkIdentifierLName(name, offset) + } + b, unique := c.scope.bindNameShadow(name) + if !unique && c.scope.strict { + c.throwSyntaxError(offset, "Strict mode function may not have duplicate parameter names (%s)", name) + return nil, false + } + return b, unique +} + +func (c *compiler) compileParameterPatternIdBinding(name unistring.String, offset int) { + if _, unique := c.compileParameterBindingIdentifier(name, offset); !unique { + c.throwSyntaxError(offset, "Duplicate parameter name not allowed in this context") + } +} + +func (c *compiler) compileParameterPatternBinding(item ast.Expression) { + c.createBindings(item, c.compileParameterPatternIdBinding) +} + +func (e *compiledFunctionLiteral) emitGetter(putOnStack bool) { + savedPrg := e.c.p + e.c.p = &Program{ + src: e.c.p.src, + } + e.c.newScope() + e.c.scope.function = true + + var name unistring.String + if e.expr.Name != nil { + name = e.expr.Name.Name + } else { + name = e.lhsName + } + + if name != "" { + e.c.p.funcName = name + } + savedBlock := e.c.block + defer func() { + e.c.block = savedBlock + }() + + e.c.block = &block{ + typ: blockScope, + } + + if !e.c.scope.strict { + e.c.scope.strict = e.strict != nil + } + + hasPatterns := false + hasInits := false + firstDupIdx := -1 + length := 0 + + if e.expr.ParameterList.Rest != nil { + hasPatterns = true // strictly speaking not, but we need to activate all the checks + } + + // First, make sure that the first bindings correspond to the formal parameters + for _, item := range e.expr.ParameterList.List { + switch tgt := item.Target.(type) { + case *ast.Identifier: + offset := int(tgt.Idx) - 1 + b, unique := e.c.compileParameterBindingIdentifier(tgt.Name, offset) + if !unique { + firstDupIdx = offset + } + b.isArg = true + case ast.Pattern: + b := e.c.scope.addBinding(int(item.Idx0()) - 1) + b.isArg = true + hasPatterns = true + default: + e.c.throwSyntaxError(int(item.Idx0())-1, "Unsupported BindingElement type: %T", item) + return + } + if item.Initializer != nil { + hasInits = true + } + if hasPatterns || hasInits { + if firstDupIdx >= 0 { + e.c.throwSyntaxError(firstDupIdx, "Duplicate parameter name not allowed in this context") + return + } + if e.strict != nil { + e.c.throwSyntaxError(int(e.strict.Idx)-1, "Illegal 'use strict' directive in function with non-simple parameter list") + return + } + } + if !hasInits { + length++ + } + } + + // create pattern bindings + if hasPatterns { + for _, item := range e.expr.ParameterList.List { + switch tgt := item.Target.(type) { + case *ast.Identifier: + // we already created those in the previous loop, skipping + default: + e.c.compileParameterPatternBinding(tgt) + } + } + if rest := e.expr.ParameterList.Rest; rest != nil { + e.c.compileParameterPatternBinding(rest) + } + } + + paramsCount := len(e.expr.ParameterList.List) + + e.c.scope.numArgs = paramsCount + body := e.expr.Body.List + funcs := e.c.extractFunctions(body) + s := e.c.scope + var calleeBinding *binding + preambleLen := 4 // enter, boxThis, createArgs, set + e.c.p.code = make([]instruction, preambleLen, 8) + + emitArgsRestMark := -1 + firstForwardRef := -1 + enterFunc2Mark := -1 + + if hasPatterns || hasInits { + if e.isExpr && e.expr.Name != nil { + if b, created := s.bindNameLexical(e.expr.Name.Name, false, 0); created { + b.isConst = true + calleeBinding = b + } + } + if calleeBinding != nil { + e.c.emit(loadCallee) + calleeBinding.emitInit() + } + for i, item := range e.expr.ParameterList.List { + if pattern, ok := item.Target.(ast.Pattern); ok { + i := i + e.c.compilePatternInitExpr(func() { + if firstForwardRef == -1 { + s.bindings[i].emitGet() + } else { + e.c.emit(loadStackLex(-i - 1)) + } + }, item.Initializer, item.Target.Idx0()).emitGetter(true) + e.c.emitPattern(pattern, func(target, init compiledExpr) { + e.c.emitPatternLexicalAssign(target, init, false) + }, false) + } else if item.Initializer != nil { + markGet := len(e.c.p.code) + e.c.emit(nil) + mark := len(e.c.p.code) + e.c.emit(nil) + e.c.compileExpression(item.Initializer).emitGetter(true) + if firstForwardRef == -1 && (s.isDynamic() || s.bindings[i].useCount() > 0) { + firstForwardRef = i + } + if firstForwardRef == -1 { + s.bindings[i].emitGetAt(markGet) + } else { + e.c.p.code[markGet] = loadStackLex(-i - 1) + } + s.bindings[i].emitInit() + e.c.p.code[mark] = jdefP(len(e.c.p.code) - mark) + } else { + if firstForwardRef == -1 && s.bindings[i].useCount() > 0 { + firstForwardRef = i + } + if firstForwardRef != -1 { + e.c.emit(loadStackLex(-i - 1)) + s.bindings[i].emitInit() + } + } + } + if rest := e.expr.ParameterList.Rest; rest != nil { + e.c.emitAssign(rest, e.c.compileEmitterExpr( + func() { + emitArgsRestMark = len(e.c.p.code) + e.c.emit(createArgsRestStack(paramsCount)) + }, rest.Idx0()), + func(target, init compiledExpr) { + e.c.emitPatternLexicalAssign(target, init, false) + }) + } + if firstForwardRef != -1 { + for _, b := range s.bindings { + b.inStash = true + } + s.argsInStash = true + s.needStash = true + } + + e.c.newBlockScope() + varScope := e.c.scope + varScope.variable = true + enterFunc2Mark = len(e.c.p.code) + e.c.emit(nil) + e.c.compileDeclList(e.expr.DeclarationList, true) + e.c.createFunctionBindings(funcs) + e.c.compileLexicalDeclarationsFuncBody(body, calleeBinding) + for _, b := range varScope.bindings { + if b.isVar { + if parentBinding := s.boundNames[b.name]; parentBinding != nil && parentBinding != calleeBinding { + parentBinding.emitGet() + b.emitSetP() + } + } + } + } else { + // To avoid triggering variable conflict when binding from non-strict direct eval(). + // Parameters are supposed to be in a parent scope, hence no conflict. + for _, b := range s.bindings[:paramsCount] { + b.isVar = true + } + e.c.compileDeclList(e.expr.DeclarationList, true) + e.c.createFunctionBindings(funcs) + e.c.compileLexicalDeclarations(body, true) + if e.isExpr && e.expr.Name != nil { + if b, created := s.bindNameLexical(e.expr.Name.Name, false, 0); created { + b.isConst = true + calleeBinding = b + } + } + if calleeBinding != nil { + e.c.emit(loadCallee) + calleeBinding.emitInit() + } + } + + e.c.compileFunctions(funcs) + e.c.compileStatements(body, false) + + var last ast.Statement + if l := len(body); l > 0 { + last = body[l-1] + } + if _, ok := last.(*ast.ReturnStatement); !ok { + e.c.emit(loadUndef, ret) + } + + delta := 0 + code := e.c.p.code + + if calleeBinding != nil && !s.isDynamic() && calleeBinding.useCount() == 1 { + s.deleteBinding(calleeBinding) + preambleLen += 2 + } + + if !s.argsInStash && (s.argsNeeded || s.isDynamic()) { + s.moveArgsToStash() + } + + if s.argsNeeded { + b, created := s.bindNameLexical("arguments", false, 0) + if !created && !b.isVar { + s.argsNeeded = false + } else { + if s.strict { + b.isConst = true + } else { + b.isVar = true + } + pos := preambleLen - 2 + delta += 2 + if s.strict || hasPatterns || hasInits { + code[pos] = createArgsUnmapped(paramsCount) + } else { + code[pos] = createArgsMapped(paramsCount) + } + pos++ + b.markAccessPointAtScope(s, pos) + code[pos] = storeStashP(0) + } + } + + stashSize, stackSize := s.finaliseVarAlloc(0) + + if !s.strict && s.thisNeeded { + delta++ + code[preambleLen-delta] = boxThis + } + delta++ + delta = preambleLen - delta + var enter instruction + if stashSize > 0 || s.argsInStash { + if firstForwardRef == -1 { + enter1 := enterFunc{ + numArgs: uint32(paramsCount), + argsToStash: s.argsInStash, + stashSize: uint32(stashSize), + stackSize: uint32(stackSize), + extensible: s.dynamic, + } + if s.isDynamic() { + enter1.names = s.makeNamesMap() + } + enter = &enter1 + if enterFunc2Mark != -1 { + ef2 := &enterFuncBody{ + extensible: e.c.scope.dynamic, + } + e.c.updateEnterBlock(&ef2.enterBlock) + e.c.p.code[enterFunc2Mark] = ef2 + } + } else { + enter1 := enterFunc1{ + stashSize: uint32(stashSize), + numArgs: uint32(paramsCount), + argsToCopy: uint32(firstForwardRef), + extensible: s.dynamic, + } + if s.isDynamic() { + enter1.names = s.makeNamesMap() + } + enter = &enter1 + if enterFunc2Mark != -1 { + ef2 := &enterFuncBody{ + adjustStack: true, + extensible: e.c.scope.dynamic, + } + e.c.updateEnterBlock(&ef2.enterBlock) + e.c.p.code[enterFunc2Mark] = ef2 + } + } + if emitArgsRestMark != -1 { + e.c.p.code[emitArgsRestMark] = createArgsRestStash + } + } else { + enter = &enterFuncStashless{ + stackSize: uint32(stackSize), + args: uint32(paramsCount), + } + if enterFunc2Mark != -1 { + ef2 := &enterFuncBody{ + extensible: e.c.scope.dynamic, + } + e.c.updateEnterBlock(&ef2.enterBlock) + e.c.p.code[enterFunc2Mark] = ef2 + } + } + code[delta] = enter + if delta != 0 { + e.c.p.code = code[delta:] + for i := range e.c.p.srcMap { + e.c.p.srcMap[i].pc -= delta + } + s.adjustBase(-delta) + } + + strict := s.strict + p := e.c.p + // e.c.p.dumpCode() + if enterFunc2Mark != -1 { + e.c.popScope() + } + e.c.popScope() + e.c.p = savedPrg + e.c.emit(&newFunc{prg: p, length: uint32(length), name: name, srcStart: uint32(e.expr.Idx0() - 1), srcEnd: uint32(e.expr.Idx1() - 1), strict: strict}) + if !putOnStack { + e.c.emit(pop) + } +} + +func (c *compiler) compileFunctionLiteral(v *ast.FunctionLiteral, isExpr bool) *compiledFunctionLiteral { + strictBody := c.isStrictStatement(v.Body) + if v.Name != nil && (c.scope.strict || strictBody != nil) { + c.checkIdentifierLName(v.Name.Name, int(v.Name.Idx)-1) + } + r := &compiledFunctionLiteral{ + expr: v, + isExpr: isExpr, + strict: strictBody, + } + r.init(c, v.Idx0()) + return r +} + +func (e *compiledThisExpr) emitGetter(putOnStack bool) { + if putOnStack { + e.addSrcMap() + scope := e.c.scope + for ; scope != nil && !scope.function && !scope.eval; scope = scope.outer { + } + + if scope != nil { + scope.thisNeeded = true + e.c.emit(loadStack(0)) + } else { + e.c.emit(loadGlobalObject) + } + } +} + +func (e *compiledNewExpr) emitGetter(putOnStack bool) { + if e.isVariadic { + e.c.emit(startVariadic) + } + e.callee.emitGetter(true) + for _, expr := range e.args { + expr.emitGetter(true) + } + e.addSrcMap() + if e.isVariadic { + e.c.emit(newVariadic, endVariadic) + } else { + e.c.emit(_new(len(e.args))) + } + if !putOnStack { + e.c.emit(pop) + } +} + +func (c *compiler) compileCallArgs(list []ast.Expression) (args []compiledExpr, isVariadic bool) { + args = make([]compiledExpr, len(list)) + for i, argExpr := range list { + if spread, ok := argExpr.(*ast.SpreadElement); ok { + args[i] = c.compileSpreadCallArgument(spread) + isVariadic = true + } else { + args[i] = c.compileExpression(argExpr) + } + } + return +} + +func (c *compiler) compileNewExpression(v *ast.NewExpression) compiledExpr { + args, isVariadic := c.compileCallArgs(v.ArgumentList) + r := &compiledNewExpr{ + compiledCallExpr: compiledCallExpr{ + callee: c.compileExpression(v.Callee), + args: args, + isVariadic: isVariadic, + }, + } + r.init(c, v.Idx0()) + return r +} + +func (e *compiledNewTarget) emitGetter(putOnStack bool) { + if putOnStack { + e.addSrcMap() + e.c.emit(loadNewTarget) + } +} + +func (c *compiler) compileMetaProperty(v *ast.MetaProperty) compiledExpr { + if v.Meta.Name == "new" || v.Property.Name != "target" { + r := &compiledNewTarget{} + r.init(c, v.Idx0()) + return r + } + c.throwSyntaxError(int(v.Idx)-1, "Unsupported meta property: %s.%s", v.Meta.Name, v.Property.Name) + return nil +} + +func (e *compiledSequenceExpr) emitGetter(putOnStack bool) { + if len(e.sequence) > 0 { + for i := 0; i < len(e.sequence)-1; i++ { + e.sequence[i].emitGetter(false) + } + e.sequence[len(e.sequence)-1].emitGetter(putOnStack) + } +} + +func (c *compiler) compileSequenceExpression(v *ast.SequenceExpression) compiledExpr { + s := make([]compiledExpr, len(v.Sequence)) + for i, expr := range v.Sequence { + s[i] = c.compileExpression(expr) + } + r := &compiledSequenceExpr{ + sequence: s, + } + var idx file.Idx + if len(v.Sequence) > 0 { + idx = v.Idx0() + } + r.init(c, idx) + return r +} + +func (c *compiler) emitThrow(v Value) { + if o, ok := v.(*Object); ok { + t := nilSafe(o.self.getStr("name", nil)).toString().String() + switch t { + case "TypeError": + c.emit(loadDynamic(t)) + msg := o.self.getStr("message", nil) + if msg != nil { + c.emit(loadVal(c.p.defineLiteralValue(msg))) + c.emit(_new(1)) + } else { + c.emit(_new(0)) + } + c.emit(throw) + return + } + } + panic(fmt.Errorf("unknown exception type thrown while evaliating constant expression: %s", v.String())) +} + +func (c *compiler) emitConst(expr compiledExpr, putOnStack bool) { + v, ex := c.evalConst(expr) + if ex == nil { + if putOnStack { + c.emit(loadVal(c.p.defineLiteralValue(v))) + } + } else { + c.emitThrow(ex.val) + } +} + +func (c *compiler) emitExpr(expr compiledExpr, putOnStack bool) { + if expr.constant() { + c.emitConst(expr, putOnStack) + } else { + expr.emitGetter(putOnStack) + } +} + +func (c *compiler) evalConst(expr compiledExpr) (Value, *Exception) { + if expr, ok := expr.(*compiledLiteral); ok { + return expr.val, nil + } + if c.evalVM == nil { + c.evalVM = New().vm + } + var savedPrg *Program + createdPrg := false + if c.evalVM.prg == nil { + c.evalVM.prg = &Program{} + savedPrg = c.p + c.p = c.evalVM.prg + createdPrg = true + } + savedPc := len(c.p.code) + expr.emitGetter(true) + c.emit(halt) + c.evalVM.pc = savedPc + ex := c.evalVM.runTry() + if createdPrg { + c.evalVM.prg = nil + c.evalVM.pc = 0 + c.p = savedPrg + } else { + c.evalVM.prg.code = c.evalVM.prg.code[:savedPc] + c.p.code = c.evalVM.prg.code + } + if ex == nil { + return c.evalVM.pop(), nil + } + return nil, ex +} + +func (e *compiledUnaryExpr) constant() bool { + return e.operand.constant() +} + +func (e *compiledUnaryExpr) emitGetter(putOnStack bool) { + var prepare, body func() + + toNumber := func() { + e.c.emit(toNumber) + } + + switch e.operator { + case token.NOT: + e.operand.emitGetter(true) + e.c.emit(not) + goto end + case token.BITWISE_NOT: + e.operand.emitGetter(true) + e.c.emit(bnot) + goto end + case token.TYPEOF: + if o, ok := e.operand.(compiledExprOrRef); ok { + o.emitGetterOrRef() + } else { + e.operand.emitGetter(true) + } + e.c.emit(typeof) + goto end + case token.DELETE: + e.operand.deleteExpr().emitGetter(putOnStack) + return + case token.MINUS: + e.c.emitExpr(e.operand, true) + e.c.emit(neg) + goto end + case token.PLUS: + e.c.emitExpr(e.operand, true) + e.c.emit(plus) + goto end + case token.INCREMENT: + prepare = toNumber + body = func() { + e.c.emit(inc) + } + case token.DECREMENT: + prepare = toNumber + body = func() { + e.c.emit(dec) + } + case token.VOID: + e.c.emitExpr(e.operand, false) + if putOnStack { + e.c.emit(loadUndef) + } + return + default: + panic(fmt.Errorf("Unknown unary operator: %s", e.operator.String())) + } + + e.operand.emitUnary(prepare, body, e.postfix, putOnStack) + return + +end: + if !putOnStack { + e.c.emit(pop) + } +} + +func (c *compiler) compileUnaryExpression(v *ast.UnaryExpression) compiledExpr { + r := &compiledUnaryExpr{ + operand: c.compileExpression(v.Operand), + operator: v.Operator, + postfix: v.Postfix, + } + r.init(c, v.Idx0()) + return r +} + +func (e *compiledConditionalExpr) emitGetter(putOnStack bool) { + e.test.emitGetter(true) + j := len(e.c.p.code) + e.c.emit(nil) + e.consequent.emitGetter(putOnStack) + j1 := len(e.c.p.code) + e.c.emit(nil) + e.c.p.code[j] = jne(len(e.c.p.code) - j) + e.alternate.emitGetter(putOnStack) + e.c.p.code[j1] = jump(len(e.c.p.code) - j1) +} + +func (c *compiler) compileConditionalExpression(v *ast.ConditionalExpression) compiledExpr { + r := &compiledConditionalExpr{ + test: c.compileExpression(v.Test), + consequent: c.compileExpression(v.Consequent), + alternate: c.compileExpression(v.Alternate), + } + r.init(c, v.Idx0()) + return r +} + +func (e *compiledLogicalOr) constant() bool { + if e.left.constant() { + if v, ex := e.c.evalConst(e.left); ex == nil { + if v.ToBoolean() { + return true + } + return e.right.constant() + } else { + return true + } + } + + return false +} + +func (e *compiledLogicalOr) emitGetter(putOnStack bool) { + if e.left.constant() { + if v, ex := e.c.evalConst(e.left); ex == nil { + if !v.ToBoolean() { + e.c.emitExpr(e.right, putOnStack) + } else { + if putOnStack { + e.c.emit(loadVal(e.c.p.defineLiteralValue(v))) + } + } + } else { + e.c.emitThrow(ex.val) + } + return + } + e.c.emitExpr(e.left, true) + j := len(e.c.p.code) + e.addSrcMap() + e.c.emit(nil) + e.c.emit(pop) + e.c.emitExpr(e.right, true) + e.c.p.code[j] = jeq1(len(e.c.p.code) - j) + if !putOnStack { + e.c.emit(pop) + } +} + +func (e *compiledLogicalAnd) constant() bool { + if e.left.constant() { + if v, ex := e.c.evalConst(e.left); ex == nil { + if !v.ToBoolean() { + return true + } else { + return e.right.constant() + } + } else { + return true + } + } + + return false +} + +func (e *compiledLogicalAnd) emitGetter(putOnStack bool) { + var j int + if e.left.constant() { + if v, ex := e.c.evalConst(e.left); ex == nil { + if !v.ToBoolean() { + e.c.emit(loadVal(e.c.p.defineLiteralValue(v))) + } else { + e.c.emitExpr(e.right, putOnStack) + } + } else { + e.c.emitThrow(ex.val) + } + return + } + e.left.emitGetter(true) + j = len(e.c.p.code) + e.addSrcMap() + e.c.emit(nil) + e.c.emit(pop) + e.c.emitExpr(e.right, true) + e.c.p.code[j] = jneq1(len(e.c.p.code) - j) + if !putOnStack { + e.c.emit(pop) + } +} + +func (e *compiledBinaryExpr) constant() bool { + return e.left.constant() && e.right.constant() +} + +func (e *compiledBinaryExpr) emitGetter(putOnStack bool) { + e.c.emitExpr(e.left, true) + e.c.emitExpr(e.right, true) + e.addSrcMap() + + switch e.operator { + case token.LESS: + e.c.emit(op_lt) + case token.GREATER: + e.c.emit(op_gt) + case token.LESS_OR_EQUAL: + e.c.emit(op_lte) + case token.GREATER_OR_EQUAL: + e.c.emit(op_gte) + case token.EQUAL: + e.c.emit(op_eq) + case token.NOT_EQUAL: + e.c.emit(op_neq) + case token.STRICT_EQUAL: + e.c.emit(op_strict_eq) + case token.STRICT_NOT_EQUAL: + e.c.emit(op_strict_neq) + case token.PLUS: + e.c.emit(add) + case token.MINUS: + e.c.emit(sub) + case token.MULTIPLY: + e.c.emit(mul) + case token.SLASH: + e.c.emit(div) + case token.REMAINDER: + e.c.emit(mod) + case token.AND: + e.c.emit(and) + case token.OR: + e.c.emit(or) + case token.EXCLUSIVE_OR: + e.c.emit(xor) + case token.INSTANCEOF: + e.c.emit(op_instanceof) + case token.IN: + e.c.emit(op_in) + case token.SHIFT_LEFT: + e.c.emit(sal) + case token.SHIFT_RIGHT: + e.c.emit(sar) + case token.UNSIGNED_SHIFT_RIGHT: + e.c.emit(shr) + default: + panic(fmt.Errorf("Unknown operator: %s", e.operator.String())) + } + + if !putOnStack { + e.c.emit(pop) + } +} + +func (c *compiler) compileBinaryExpression(v *ast.BinaryExpression) compiledExpr { + + switch v.Operator { + case token.LOGICAL_OR: + return c.compileLogicalOr(v.Left, v.Right, v.Idx0()) + case token.LOGICAL_AND: + return c.compileLogicalAnd(v.Left, v.Right, v.Idx0()) + } + + r := &compiledBinaryExpr{ + left: c.compileExpression(v.Left), + right: c.compileExpression(v.Right), + operator: v.Operator, + } + r.init(c, v.Idx0()) + return r +} + +func (c *compiler) compileLogicalOr(left, right ast.Expression, idx file.Idx) compiledExpr { + r := &compiledLogicalOr{ + left: c.compileExpression(left), + right: c.compileExpression(right), + } + r.init(c, idx) + return r +} + +func (c *compiler) compileLogicalAnd(left, right ast.Expression, idx file.Idx) compiledExpr { + r := &compiledLogicalAnd{ + left: c.compileExpression(left), + right: c.compileExpression(right), + } + r.init(c, idx) + return r +} + +func (e *compiledObjectLiteral) emitGetter(putOnStack bool) { + e.addSrcMap() + e.c.emit(newObject) + for _, prop := range e.expr.Value { + switch prop := prop.(type) { + case *ast.PropertyKeyed: + keyExpr := e.c.compileExpression(prop.Key) + computed := false + var key unistring.String + switch keyExpr := keyExpr.(type) { + case *compiledLiteral: + key = keyExpr.val.string() + default: + keyExpr.emitGetter(true) + computed = true + //e.c.throwSyntaxError(e.offset, "non-literal properties in object literal are not supported yet") + } + valueExpr := e.c.compileExpression(prop.Value) + var anonFn *compiledFunctionLiteral + if fn, ok := valueExpr.(*compiledFunctionLiteral); ok { + if fn.expr.Name == nil { + anonFn = fn + fn.lhsName = key + } + } + if computed { + e.c.emit(_toPropertyKey{}) + valueExpr.emitGetter(true) + switch prop.Kind { + case ast.PropertyKindValue, ast.PropertyKindMethod: + if anonFn != nil { + e.c.emit(setElem1Named) + } else { + e.c.emit(setElem1) + } + case ast.PropertyKindGet: + e.c.emit(setPropGetter1) + case ast.PropertyKindSet: + e.c.emit(setPropSetter1) + default: + panic(fmt.Errorf("unknown property kind: %s", prop.Kind)) + } + } else { + if anonFn != nil { + anonFn.lhsName = key + } + valueExpr.emitGetter(true) + switch prop.Kind { + case ast.PropertyKindValue: + if key == __proto__ { + e.c.emit(setProto) + } else { + e.c.emit(setProp1(key)) + } + case ast.PropertyKindMethod: + e.c.emit(setProp1(key)) + case ast.PropertyKindGet: + e.c.emit(setPropGetter(key)) + case ast.PropertyKindSet: + e.c.emit(setPropSetter(key)) + default: + panic(fmt.Errorf("unknown property kind: %s", prop.Kind)) + } + } + case *ast.PropertyShort: + key := prop.Name.Name + if prop.Initializer != nil { + e.c.throwSyntaxError(int(prop.Initializer.Idx0())-1, "Invalid shorthand property initializer") + } + if e.c.scope.strict && key == "let" { + e.c.throwSyntaxError(e.offset, "'let' cannot be used as a shorthand property in strict mode") + } + e.c.compileIdentifierExpression(&prop.Name).emitGetter(true) + e.c.emit(setProp1(key)) + case *ast.SpreadElement: + e.c.compileExpression(prop.Expression).emitGetter(true) + e.c.emit(copySpread) + default: + panic(fmt.Errorf("unknown Property type: %T", prop)) + } + } + if !putOnStack { + e.c.emit(pop) + } +} + +func (c *compiler) compileObjectLiteral(v *ast.ObjectLiteral) compiledExpr { + r := &compiledObjectLiteral{ + expr: v, + } + r.init(c, v.Idx0()) + return r +} + +func (e *compiledArrayLiteral) emitGetter(putOnStack bool) { + e.addSrcMap() + hasSpread := false + mark := len(e.c.p.code) + e.c.emit(nil) + for _, v := range e.expr.Value { + if spread, ok := v.(*ast.SpreadElement); ok { + hasSpread = true + e.c.compileExpression(spread.Expression).emitGetter(true) + e.c.emit(pushArraySpread) + } else { + if v != nil { + e.c.compileExpression(v).emitGetter(true) + } else { + e.c.emit(loadNil) + } + e.c.emit(pushArrayItem) + } + } + var objCount uint32 + if !hasSpread { + objCount = uint32(len(e.expr.Value)) + } + e.c.p.code[mark] = newArray(objCount) + if !putOnStack { + e.c.emit(pop) + } +} + +func (c *compiler) compileArrayLiteral(v *ast.ArrayLiteral) compiledExpr { + r := &compiledArrayLiteral{ + expr: v, + } + r.init(c, v.Idx0()) + return r +} + +func (e *compiledRegexpLiteral) emitGetter(putOnStack bool) { + if putOnStack { + pattern, err := compileRegexp(e.expr.Pattern, e.expr.Flags) + if err != nil { + e.c.throwSyntaxError(e.offset, err.Error()) + } + + e.c.emit(&newRegexp{pattern: pattern, src: newStringValue(e.expr.Pattern)}) + } +} + +func (c *compiler) compileRegexpLiteral(v *ast.RegExpLiteral) compiledExpr { + r := &compiledRegexpLiteral{ + expr: v, + } + r.init(c, v.Idx0()) + return r +} + +func (e *compiledCallExpr) emitGetter(putOnStack bool) { + var calleeName unistring.String + if e.isVariadic { + e.c.emit(startVariadic) + } + switch callee := e.callee.(type) { + case *compiledDotExpr: + callee.left.emitGetter(true) + e.c.emit(dup) + e.c.emit(getPropCallee(callee.name)) + case *compiledBracketExpr: + callee.left.emitGetter(true) + e.c.emit(dup) + callee.member.emitGetter(true) + e.c.emit(getElemCallee) + case *compiledIdentifierExpr: + calleeName = callee.name + callee.emitGetterAndCallee() + default: + e.c.emit(loadUndef) + callee.emitGetter(true) + } + + for _, expr := range e.args { + expr.emitGetter(true) + } + + e.addSrcMap() + if calleeName == "eval" { + foundFunc, foundVar := false, false + for sc := e.c.scope; sc != nil; sc = sc.outer { + if !foundFunc && sc.function { + foundFunc = true + sc.thisNeeded, sc.argsNeeded = true, true + } + if !foundVar && (sc.variable || sc.function) { + foundVar = true + if !sc.strict { + sc.dynamic = true + } + } + sc.dynLookup = true + } + + if e.c.scope.strict { + if e.isVariadic { + e.c.emit(callEvalVariadicStrict) + } else { + e.c.emit(callEvalStrict(len(e.args))) + } + } else { + if e.isVariadic { + e.c.emit(callEvalVariadic) + } else { + e.c.emit(callEval(len(e.args))) + } + } + } else { + if e.isVariadic { + e.c.emit(callVariadic) + } else { + e.c.emit(call(len(e.args))) + } + } + if e.isVariadic { + e.c.emit(endVariadic) + } + if !putOnStack { + e.c.emit(pop) + } +} + +func (e *compiledCallExpr) deleteExpr() compiledExpr { + r := &defaultDeleteExpr{ + expr: e, + } + r.init(e.c, file.Idx(e.offset+1)) + return r +} + +func (c *compiler) compileSpreadCallArgument(spread *ast.SpreadElement) compiledExpr { + r := &compiledSpreadCallArgument{ + expr: c.compileExpression(spread.Expression), + } + r.init(c, spread.Idx0()) + return r +} + +func (c *compiler) compileCallExpression(v *ast.CallExpression) compiledExpr { + + args := make([]compiledExpr, len(v.ArgumentList)) + isVariadic := false + for i, argExpr := range v.ArgumentList { + if spread, ok := argExpr.(*ast.SpreadElement); ok { + args[i] = c.compileSpreadCallArgument(spread) + isVariadic = true + } else { + args[i] = c.compileExpression(argExpr) + } + } + + r := &compiledCallExpr{ + args: args, + callee: c.compileExpression(v.Callee), + isVariadic: isVariadic, + } + r.init(c, v.LeftParenthesis) + return r +} + +func (c *compiler) compileIdentifierExpression(v *ast.Identifier) compiledExpr { + if c.scope.strict { + c.checkIdentifierName(v.Name, int(v.Idx)-1) + } + + r := &compiledIdentifierExpr{ + name: v.Name, + } + r.offset = int(v.Idx) - 1 + r.init(c, v.Idx0()) + return r +} + +func (c *compiler) compileNumberLiteral(v *ast.NumberLiteral) compiledExpr { + if c.scope.strict && octalRegexp.MatchString(v.Literal) { + c.throwSyntaxError(int(v.Idx)-1, "Octal literals are not allowed in strict mode") + panic("Unreachable") + } + var val Value + switch num := v.Value.(type) { + case int64: + val = intToValue(num) + case float64: + val = floatToValue(num) + default: + panic(fmt.Errorf("Unsupported number literal type: %T", v.Value)) + } + r := &compiledLiteral{ + val: val, + } + r.init(c, v.Idx0()) + return r +} + +func (c *compiler) compileStringLiteral(v *ast.StringLiteral) compiledExpr { + r := &compiledLiteral{ + val: stringValueFromRaw(v.Value), + } + r.init(c, v.Idx0()) + return r +} + +func (c *compiler) compileBooleanLiteral(v *ast.BooleanLiteral) compiledExpr { + var val Value + if v.Value { + val = valueTrue + } else { + val = valueFalse + } + + r := &compiledLiteral{ + val: val, + } + r.init(c, v.Idx0()) + return r +} + +func (c *compiler) compileAssignExpression(v *ast.AssignExpression) compiledExpr { + // log.Printf("compileAssignExpression(): %+v", v) + + r := &compiledAssignExpr{ + left: c.compileExpression(v.Left), + right: c.compileExpression(v.Right), + operator: v.Operator, + } + r.init(c, v.Idx0()) + return r +} + +func (e *compiledEnumGetExpr) emitGetter(putOnStack bool) { + e.c.emit(enumGet) + if !putOnStack { + e.c.emit(pop) + } +} + +func (c *compiler) compileObjectAssignmentPattern(v *ast.ObjectPattern) compiledExpr { + r := &compiledObjectAssignmentPattern{ + expr: v, + } + r.init(c, v.Idx0()) + return r +} + +func (e *compiledObjectAssignmentPattern) emitGetter(putOnStack bool) { + if putOnStack { + e.c.emit(loadUndef) + } +} + +func (c *compiler) compileArrayAssignmentPattern(v *ast.ArrayPattern) compiledExpr { + r := &compiledArrayAssignmentPattern{ + expr: v, + } + r.init(c, v.Idx0()) + return r +} + +func (e *compiledArrayAssignmentPattern) emitGetter(putOnStack bool) { + if putOnStack { + e.c.emit(loadUndef) + } +} + +func (c *compiler) emitNamed(expr compiledExpr, name unistring.String) { + if en, ok := expr.(interface { + emitNamed(name unistring.String) + }); ok { + en.emitNamed(name) + } else { + expr.emitGetter(true) + } +} + +func (e *compiledFunctionLiteral) emitNamed(name unistring.String) { + e.lhsName = name + e.emitGetter(true) +} + +func (c *compiler) emitPattern(pattern ast.Pattern, emitter func(target, init compiledExpr), putOnStack bool) { + switch pattern := pattern.(type) { + case *ast.ObjectPattern: + c.emitObjectPattern(pattern, emitter, putOnStack) + case *ast.ArrayPattern: + c.emitArrayPattern(pattern, emitter, putOnStack) + default: + panic(fmt.Errorf("unsupported Pattern: %T", pattern)) + } +} + +func (c *compiler) emitAssign(target ast.Expression, init compiledExpr, emitAssignSimple func(target, init compiledExpr)) { + pattern, isPattern := target.(ast.Pattern) + if isPattern { + init.emitGetter(true) + c.emitPattern(pattern, emitAssignSimple, false) + } else { + emitAssignSimple(c.compileExpression(target), init) + } +} + +func (c *compiler) emitObjectPattern(pattern *ast.ObjectPattern, emitAssign func(target, init compiledExpr), putOnStack bool) { + if pattern.Rest != nil { + c.emit(createDestructSrc) + } else { + c.emit(checkObjectCoercible) + } + for _, prop := range pattern.Properties { + switch prop := prop.(type) { + case *ast.PropertyShort: + c.emit(dup) + emitAssign(c.compileIdentifierExpression(&prop.Name), c.compilePatternInitExpr(func() { + c.emit(getProp(prop.Name.Name)) + }, prop.Initializer, prop.Idx0())) + case *ast.PropertyKeyed: + c.emit(dup) + c.compileExpression(prop.Key).emitGetter(true) + c.emit(_toPropertyKey{}) + var target ast.Expression + var initializer ast.Expression + if e, ok := prop.Value.(*ast.AssignExpression); ok { + target = e.Left + initializer = e.Right + } else { + target = prop.Value + } + c.emitAssign(target, c.compilePatternInitExpr(func() { + c.emit(getKey) + }, initializer, prop.Idx0()), emitAssign) + default: + c.throwSyntaxError(int(prop.Idx0()-1), "Unsupported AssignmentProperty type: %T", prop) + } + } + if pattern.Rest != nil { + emitAssign(c.compileExpression(pattern.Rest), c.compileEmitterExpr(func() { + c.emit(copyRest) + }, pattern.Rest.Idx0())) + c.emit(pop) + } + if !putOnStack { + c.emit(pop) + } +} + +func (c *compiler) emitArrayPattern(pattern *ast.ArrayPattern, emitAssign func(target, init compiledExpr), putOnStack bool) { + var marks []int + c.emit(iterate) + for _, elt := range pattern.Elements { + switch elt := elt.(type) { + case nil: + marks = append(marks, len(c.p.code)) + c.emit(nil) + case *ast.AssignExpression: + c.emitAssign(elt.Left, c.compilePatternInitExpr(func() { + marks = append(marks, len(c.p.code)) + c.emit(nil, enumGet) + }, elt.Right, elt.Idx0()), emitAssign) + default: + c.emitAssign(elt, c.compileEmitterExpr(func() { + marks = append(marks, len(c.p.code)) + c.emit(nil, enumGet) + }, elt.Idx0()), emitAssign) + } + } + if pattern.Rest != nil { + c.emitAssign(pattern.Rest, c.compileEmitterExpr(func() { + c.emit(newArrayFromIter) + }, pattern.Rest.Idx0()), emitAssign) + } else { + c.emit(enumPopClose) + } + mark1 := len(c.p.code) + c.emit(nil) + + for i, elt := range pattern.Elements { + switch elt := elt.(type) { + case nil: + c.p.code[marks[i]] = iterNext(len(c.p.code) - marks[i]) + case *ast.Identifier: + emitAssign(c.compileIdentifierExpression(elt), c.compileEmitterExpr(func() { + c.p.code[marks[i]] = iterNext(len(c.p.code) - marks[i]) + c.emit(loadUndef) + }, elt.Idx0())) + case *ast.AssignExpression: + c.emitAssign(elt.Left, c.compileNamedEmitterExpr(func(name unistring.String) { + c.p.code[marks[i]] = iterNext(len(c.p.code) - marks[i]) + c.emitNamed(c.compileExpression(elt.Right), name) + }, elt.Idx0()), emitAssign) + default: + c.emitAssign(elt, c.compileEmitterExpr( + func() { + c.p.code[marks[i]] = iterNext(len(c.p.code) - marks[i]) + c.emit(loadUndef) + }, elt.Idx0()), emitAssign) + } + } + c.emit(enumPop) + if pattern.Rest != nil { + c.emitAssign(pattern.Rest, c.compileExpression( + &ast.ArrayLiteral{ + LeftBracket: pattern.Rest.Idx0(), + RightBracket: pattern.Rest.Idx0(), + }), emitAssign) + } + c.p.code[mark1] = jump(len(c.p.code) - mark1) + + if !putOnStack { + c.emit(pop) + } +} + +func (e *compiledObjectAssignmentPattern) emitSetter(valueExpr compiledExpr, putOnStack bool) { + valueExpr.emitGetter(true) + e.c.emitObjectPattern(e.expr, e.c.emitPatternAssign, putOnStack) +} + +func (e *compiledArrayAssignmentPattern) emitSetter(valueExpr compiledExpr, putOnStack bool) { + valueExpr.emitGetter(true) + e.c.emitArrayPattern(e.expr, e.c.emitPatternAssign, putOnStack) +} + +type compiledPatternInitExpr struct { + baseCompiledExpr + emitSrc func() + def compiledExpr +} + +func (e *compiledPatternInitExpr) emitGetter(putOnStack bool) { + if !putOnStack { + return + } + e.emitSrc() + if e.def != nil { + mark := len(e.c.p.code) + e.c.emit(nil) + e.def.emitGetter(true) + e.c.p.code[mark] = jdef(len(e.c.p.code) - mark) + } +} + +func (e *compiledPatternInitExpr) emitNamed(name unistring.String) { + e.emitSrc() + if e.def != nil { + mark := len(e.c.p.code) + e.c.emit(nil) + e.c.emitNamed(e.def, name) + e.c.p.code[mark] = jdef(len(e.c.p.code) - mark) + } +} + +func (c *compiler) compilePatternInitExpr(emitSrc func(), def ast.Expression, idx file.Idx) compiledExpr { + r := &compiledPatternInitExpr{ + emitSrc: emitSrc, + def: c.compileExpression(def), + } + r.init(c, idx) + return r +} + +type compiledEmitterExpr struct { + baseCompiledExpr + emitter func() + namedEmitter func(name unistring.String) +} + +func (e *compiledEmitterExpr) emitGetter(putOnStack bool) { + if e.emitter != nil { + e.emitter() + } else { + e.namedEmitter("") + } + if !putOnStack { + e.c.emit(pop) + } +} + +func (e *compiledEmitterExpr) emitNamed(name unistring.String) { + if e.namedEmitter != nil { + e.namedEmitter(name) + } else { + e.emitter() + } +} + +func (c *compiler) compileEmitterExpr(emitter func(), idx file.Idx) *compiledEmitterExpr { + r := &compiledEmitterExpr{ + emitter: emitter, + } + r.init(c, idx) + return r +} + +func (c *compiler) compileNamedEmitterExpr(namedEmitter func(unistring.String), idx file.Idx) *compiledEmitterExpr { + r := &compiledEmitterExpr{ + namedEmitter: namedEmitter, + } + r.init(c, idx) + return r +} + +func (e *compiledSpreadCallArgument) emitGetter(putOnStack bool) { + e.expr.emitGetter(putOnStack) + if putOnStack { + e.c.emit(pushSpread) + } +} diff --git a/vendor/github.com/dop251/goja/compiler_stmt.go b/vendor/github.com/dop251/goja/compiler_stmt.go new file mode 100644 index 0000000000..dfc4bc9ad2 --- /dev/null +++ b/vendor/github.com/dop251/goja/compiler_stmt.go @@ -0,0 +1,1099 @@ +package goja + +import ( + "fmt" + "github.com/dop251/goja/ast" + "github.com/dop251/goja/file" + "github.com/dop251/goja/token" + "github.com/dop251/goja/unistring" +) + +func (c *compiler) compileStatement(v ast.Statement, needResult bool) { + // log.Printf("compileStatement(): %T", v) + + switch v := v.(type) { + case *ast.BlockStatement: + c.compileBlockStatement(v, needResult) + case *ast.ExpressionStatement: + c.compileExpressionStatement(v, needResult) + case *ast.VariableStatement: + c.compileVariableStatement(v) + case *ast.LexicalDeclaration: + c.compileLexicalDeclaration(v) + case *ast.ReturnStatement: + c.compileReturnStatement(v) + case *ast.IfStatement: + c.compileIfStatement(v, needResult) + case *ast.DoWhileStatement: + c.compileDoWhileStatement(v, needResult) + case *ast.ForStatement: + c.compileForStatement(v, needResult) + case *ast.ForInStatement: + c.compileForInStatement(v, needResult) + case *ast.ForOfStatement: + c.compileForOfStatement(v, needResult) + case *ast.WhileStatement: + c.compileWhileStatement(v, needResult) + case *ast.BranchStatement: + c.compileBranchStatement(v) + case *ast.TryStatement: + c.compileTryStatement(v, needResult) + case *ast.ThrowStatement: + c.compileThrowStatement(v) + case *ast.SwitchStatement: + c.compileSwitchStatement(v, needResult) + case *ast.LabelledStatement: + c.compileLabeledStatement(v, needResult) + case *ast.EmptyStatement: + c.compileEmptyStatement(needResult) + case *ast.FunctionDeclaration: + c.compileStandaloneFunctionDecl(v) + // note functions inside blocks are hoisted to the top of the block and are compiled using compileFunctions() + case *ast.WithStatement: + c.compileWithStatement(v, needResult) + case *ast.DebuggerStatement: + default: + panic(fmt.Errorf("Unknown statement type: %T", v)) + } +} + +func (c *compiler) compileLabeledStatement(v *ast.LabelledStatement, needResult bool) { + label := v.Label.Name + if c.scope.strict { + c.checkIdentifierName(label, int(v.Label.Idx)-1) + } + for b := c.block; b != nil; b = b.outer { + if b.label == label { + c.throwSyntaxError(int(v.Label.Idx-1), "Label '%s' has already been declared", label) + } + } + switch s := v.Statement.(type) { + case *ast.ForInStatement: + c.compileLabeledForInStatement(s, needResult, label) + case *ast.ForOfStatement: + c.compileLabeledForOfStatement(s, needResult, label) + case *ast.ForStatement: + c.compileLabeledForStatement(s, needResult, label) + case *ast.WhileStatement: + c.compileLabeledWhileStatement(s, needResult, label) + case *ast.DoWhileStatement: + c.compileLabeledDoWhileStatement(s, needResult, label) + default: + c.compileGenericLabeledStatement(s, needResult, label) + } +} + +func (c *compiler) updateEnterBlock(enter *enterBlock) { + scope := c.scope + stashSize, stackSize := 0, 0 + if scope.dynLookup { + stashSize = len(scope.bindings) + enter.names = scope.makeNamesMap() + } else { + for _, b := range scope.bindings { + if b.inStash { + stashSize++ + } else { + stackSize++ + } + } + } + enter.stashSize, enter.stackSize = uint32(stashSize), uint32(stackSize) +} + +func (c *compiler) compileTryStatement(v *ast.TryStatement, needResult bool) { + c.block = &block{ + typ: blockTry, + outer: c.block, + } + var lp int + var bodyNeedResult bool + var finallyBreaking *block + if v.Finally != nil { + lp, finallyBreaking = c.scanStatements(v.Finally.List) + } + if finallyBreaking != nil { + c.block.breaking = finallyBreaking + if lp == -1 { + bodyNeedResult = finallyBreaking.needResult + } + } else { + bodyNeedResult = needResult + } + lbl := len(c.p.code) + c.emit(nil) + if needResult { + c.emit(clearResult) + } + c.compileBlockStatement(v.Body, bodyNeedResult) + c.emit(halt) + lbl2 := len(c.p.code) + c.emit(nil) + var catchOffset int + if v.Catch != nil { + catchOffset = len(c.p.code) - lbl + if v.Catch.Parameter != nil { + c.block = &block{ + typ: blockScope, + outer: c.block, + } + c.newBlockScope() + list := v.Catch.Body.List + funcs := c.extractFunctions(list) + if _, ok := v.Catch.Parameter.(ast.Pattern); ok { + // add anonymous binding for the catch parameter, note it must be first + c.scope.addBinding(int(v.Catch.Idx0()) - 1) + } + c.createBindings(v.Catch.Parameter, func(name unistring.String, offset int) { + if c.scope.strict { + switch name { + case "arguments", "eval": + c.throwSyntaxError(offset, "Catch variable may not be eval or arguments in strict mode") + } + } + c.scope.bindNameLexical(name, true, offset) + }) + enter := &enterBlock{} + c.emit(enter) + if pattern, ok := v.Catch.Parameter.(ast.Pattern); ok { + c.scope.bindings[0].emitGet() + c.emitPattern(pattern, func(target, init compiledExpr) { + c.emitPatternLexicalAssign(target, init, false) + }, false) + } + for _, decl := range funcs { + c.scope.bindNameLexical(decl.Function.Name.Name, true, int(decl.Function.Name.Idx1())-1) + } + c.compileLexicalDeclarations(list, true) + c.compileFunctions(funcs) + c.compileStatements(list, bodyNeedResult) + c.leaveScopeBlock(enter) + if c.scope.dynLookup || c.scope.bindings[0].inStash { + c.p.code[lbl+catchOffset] = &enterCatchBlock{ + names: enter.names, + stashSize: enter.stashSize, + stackSize: enter.stackSize, + } + } else { + enter.stackSize-- + } + c.popScope() + } else { + c.emit(pop) + c.compileBlockStatement(v.Catch.Body, bodyNeedResult) + } + c.emit(halt) + } + var finallyOffset int + if v.Finally != nil { + lbl1 := len(c.p.code) + c.emit(nil) + finallyOffset = len(c.p.code) - lbl + c.compileBlockStatement(v.Finally, false) + c.emit(halt, retFinally) + + c.p.code[lbl1] = jump(len(c.p.code) - lbl1) + } + c.p.code[lbl] = try{catchOffset: int32(catchOffset), finallyOffset: int32(finallyOffset)} + c.p.code[lbl2] = jump(len(c.p.code) - lbl2) + c.leaveBlock() +} + +func (c *compiler) compileThrowStatement(v *ast.ThrowStatement) { + //c.p.srcMap = append(c.p.srcMap, srcMapItem{pc: len(c.p.code), srcPos: int(v.Throw) - 1}) + c.compileExpression(v.Argument).emitGetter(true) + c.emit(throw) +} + +func (c *compiler) compileDoWhileStatement(v *ast.DoWhileStatement, needResult bool) { + c.compileLabeledDoWhileStatement(v, needResult, "") +} + +func (c *compiler) compileLabeledDoWhileStatement(v *ast.DoWhileStatement, needResult bool, label unistring.String) { + c.block = &block{ + typ: blockLoop, + outer: c.block, + label: label, + needResult: needResult, + } + + start := len(c.p.code) + c.compileStatement(v.Body, needResult) + c.block.cont = len(c.p.code) + c.emitExpr(c.compileExpression(v.Test), true) + c.emit(jeq(start - len(c.p.code))) + c.leaveBlock() +} + +func (c *compiler) compileForStatement(v *ast.ForStatement, needResult bool) { + c.compileLabeledForStatement(v, needResult, "") +} + +func (c *compiler) compileForHeadLexDecl(decl *ast.LexicalDeclaration, needResult bool) *enterBlock { + c.block = &block{ + typ: blockIterScope, + outer: c.block, + needResult: needResult, + } + + c.newBlockScope() + enterIterBlock := &enterBlock{} + c.emit(enterIterBlock) + c.createLexicalBindings(decl) + c.compileLexicalDeclaration(decl) + return enterIterBlock +} + +func (c *compiler) compileLabeledForStatement(v *ast.ForStatement, needResult bool, label unistring.String) { + loopBlock := &block{ + typ: blockLoop, + outer: c.block, + label: label, + needResult: needResult, + } + c.block = loopBlock + + var enterIterBlock *enterBlock + switch init := v.Initializer.(type) { + case nil: + // no-op + case *ast.ForLoopInitializerLexicalDecl: + enterIterBlock = c.compileForHeadLexDecl(&init.LexicalDeclaration, needResult) + case *ast.ForLoopInitializerVarDeclList: + for _, expr := range init.List { + c.compileVarBinding(expr) + } + case *ast.ForLoopInitializerExpression: + c.compileExpression(init.Expression).emitGetter(false) + default: + panic(fmt.Sprintf("Unsupported for loop initializer: %T", init)) + } + + if needResult { + c.emit(clearResult) // initial result + } + + if enterIterBlock != nil { + c.emit(jump(1)) + } + + start := len(c.p.code) + var j int + testConst := false + if v.Test != nil { + expr := c.compileExpression(v.Test) + if expr.constant() { + r, ex := c.evalConst(expr) + if ex == nil { + if r.ToBoolean() { + testConst = true + } else { + leave := c.enterDummyMode() + c.compileStatement(v.Body, false) + if v.Update != nil { + c.compileExpression(v.Update).emitGetter(false) + } + leave() + goto end + } + } else { + expr.addSrcMap() + c.emitThrow(ex.val) + goto end + } + } else { + expr.emitGetter(true) + j = len(c.p.code) + c.emit(nil) + } + } + if needResult { + c.emit(clearResult) + } + c.compileStatement(v.Body, needResult) + loopBlock.cont = len(c.p.code) + if enterIterBlock != nil { + c.emit(jump(1)) + } + if v.Update != nil { + c.compileExpression(v.Update).emitGetter(false) + } + if enterIterBlock != nil { + if c.scope.needStash || c.scope.isDynamic() { + c.p.code[start-1] = copyStash{} + c.p.code[loopBlock.cont] = copyStash{} + } else { + if l := len(c.p.code); l > loopBlock.cont { + loopBlock.cont++ + } else { + c.p.code = c.p.code[:l-1] + } + } + } + c.emit(jump(start - len(c.p.code))) + if v.Test != nil { + if !testConst { + c.p.code[j] = jne(len(c.p.code) - j) + } + } +end: + if enterIterBlock != nil { + c.leaveScopeBlock(enterIterBlock) + c.popScope() + } + c.leaveBlock() +} + +func (c *compiler) compileForInStatement(v *ast.ForInStatement, needResult bool) { + c.compileLabeledForInStatement(v, needResult, "") +} + +func (c *compiler) compileForInto(into ast.ForInto, needResult bool) (enter *enterBlock) { + switch into := into.(type) { + case *ast.ForIntoExpression: + c.compileExpression(into.Expression).emitSetter(&c.enumGetExpr, false) + case *ast.ForIntoVar: + if c.scope.strict && into.Binding.Initializer != nil { + c.throwSyntaxError(int(into.Binding.Initializer.Idx0())-1, "for-in loop variable declaration may not have an initializer.") + } + switch target := into.Binding.Target.(type) { + case *ast.Identifier: + c.compileIdentifierExpression(target).emitSetter(&c.enumGetExpr, false) + case ast.Pattern: + c.emit(enumGet) + c.emitPattern(target, c.emitPatternVarAssign, false) + default: + c.throwSyntaxError(int(target.Idx0()-1), "unsupported for-in var target: %T", target) + } + case *ast.ForDeclaration: + + c.block = &block{ + typ: blockIterScope, + outer: c.block, + needResult: needResult, + } + + c.newBlockScope() + enter = &enterBlock{} + c.emit(enter) + switch target := into.Target.(type) { + case *ast.Identifier: + b := c.createLexicalIdBinding(target.Name, into.IsConst, int(into.Idx)-1) + c.emit(enumGet) + b.emitInit() + case ast.Pattern: + c.createLexicalBinding(target, into.IsConst) + c.emit(enumGet) + c.emitPattern(target, func(target, init compiledExpr) { + c.emitPatternLexicalAssign(target, init, into.IsConst) + }, false) + default: + c.throwSyntaxError(int(into.Idx)-1, "Unsupported ForBinding: %T", into.Target) + } + default: + panic(fmt.Sprintf("Unsupported for-into: %T", into)) + } + + return +} + +func (c *compiler) compileLabeledForInOfStatement(into ast.ForInto, source ast.Expression, body ast.Statement, iter, needResult bool, label unistring.String) { + c.block = &block{ + typ: blockLoopEnum, + outer: c.block, + label: label, + needResult: needResult, + } + enterPos := -1 + if forDecl, ok := into.(*ast.ForDeclaration); ok { + c.block = &block{ + typ: blockScope, + outer: c.block, + needResult: false, + } + c.newBlockScope() + enterPos = len(c.p.code) + c.emit(jump(1)) + c.createLexicalBinding(forDecl.Target, forDecl.IsConst) + } + c.compileExpression(source).emitGetter(true) + if enterPos != -1 { + s := c.scope + used := len(c.block.breaks) > 0 || s.isDynamic() + if !used { + for _, b := range s.bindings { + if b.useCount() > 0 { + used = true + break + } + } + } + if used { + // We need the stack untouched because it contains the source. + // This is not the most optimal way, but it's an edge case, hopefully quite rare. + for _, b := range s.bindings { + b.moveToStash() + } + enter := &enterBlock{} + c.p.code[enterPos] = enter + c.leaveScopeBlock(enter) + } else { + c.block = c.block.outer + } + c.popScope() + } + if iter { + c.emit(iterateP) + } else { + c.emit(enumerate) + } + if needResult { + c.emit(clearResult) + } + start := len(c.p.code) + c.block.cont = start + c.emit(nil) + enterIterBlock := c.compileForInto(into, needResult) + if needResult { + c.emit(clearResult) + } + c.compileStatement(body, needResult) + if enterIterBlock != nil { + c.leaveScopeBlock(enterIterBlock) + c.popScope() + } + c.emit(jump(start - len(c.p.code))) + if iter { + c.p.code[start] = iterNext(len(c.p.code) - start) + } else { + c.p.code[start] = enumNext(len(c.p.code) - start) + } + c.emit(enumPop, jump(2)) + c.leaveBlock() + c.emit(enumPopClose) +} + +func (c *compiler) compileLabeledForInStatement(v *ast.ForInStatement, needResult bool, label unistring.String) { + c.compileLabeledForInOfStatement(v.Into, v.Source, v.Body, false, needResult, label) +} + +func (c *compiler) compileForOfStatement(v *ast.ForOfStatement, needResult bool) { + c.compileLabeledForOfStatement(v, needResult, "") +} + +func (c *compiler) compileLabeledForOfStatement(v *ast.ForOfStatement, needResult bool, label unistring.String) { + c.compileLabeledForInOfStatement(v.Into, v.Source, v.Body, true, needResult, label) +} + +func (c *compiler) compileWhileStatement(v *ast.WhileStatement, needResult bool) { + c.compileLabeledWhileStatement(v, needResult, "") +} + +func (c *compiler) compileLabeledWhileStatement(v *ast.WhileStatement, needResult bool, label unistring.String) { + c.block = &block{ + typ: blockLoop, + outer: c.block, + label: label, + needResult: needResult, + } + + if needResult { + c.emit(clearResult) + } + start := len(c.p.code) + c.block.cont = start + expr := c.compileExpression(v.Test) + testTrue := false + var j int + if expr.constant() { + if t, ex := c.evalConst(expr); ex == nil { + if t.ToBoolean() { + testTrue = true + } else { + c.compileStatementDummy(v.Body) + goto end + } + } else { + c.emitThrow(ex.val) + goto end + } + } else { + expr.emitGetter(true) + j = len(c.p.code) + c.emit(nil) + } + if needResult { + c.emit(clearResult) + } + c.compileStatement(v.Body, needResult) + c.emit(jump(start - len(c.p.code))) + if !testTrue { + c.p.code[j] = jne(len(c.p.code) - j) + } +end: + c.leaveBlock() +} + +func (c *compiler) compileEmptyStatement(needResult bool) { + if needResult { + c.emit(clearResult) + } +} + +func (c *compiler) compileBranchStatement(v *ast.BranchStatement) { + switch v.Token { + case token.BREAK: + c.compileBreak(v.Label, v.Idx) + case token.CONTINUE: + c.compileContinue(v.Label, v.Idx) + default: + panic(fmt.Errorf("Unknown branch statement token: %s", v.Token.String())) + } +} + +func (c *compiler) findBranchBlock(st *ast.BranchStatement) *block { + switch st.Token { + case token.BREAK: + return c.findBreakBlock(st.Label, true) + case token.CONTINUE: + return c.findBreakBlock(st.Label, false) + } + return nil +} + +func (c *compiler) findBreakBlock(label *ast.Identifier, isBreak bool) (res *block) { + if label != nil { + var found *block + for b := c.block; b != nil; b = b.outer { + if res == nil { + if bb := b.breaking; bb != nil { + res = bb + if isBreak { + return + } + } + } + if b.label == label.Name { + found = b + break + } + } + if !isBreak && found != nil && found.typ != blockLoop && found.typ != blockLoopEnum { + c.throwSyntaxError(int(label.Idx)-1, "Illegal continue statement: '%s' does not denote an iteration statement", label.Name) + } + if res == nil { + res = found + } + } else { + // find the nearest loop or switch (if break) + L: + for b := c.block; b != nil; b = b.outer { + if bb := b.breaking; bb != nil { + return bb + } + switch b.typ { + case blockLoop, blockLoopEnum: + res = b + break L + case blockSwitch: + if isBreak { + res = b + break L + } + } + } + } + + return +} + +func (c *compiler) emitBlockExitCode(label *ast.Identifier, idx file.Idx, isBreak bool) *block { + block := c.findBreakBlock(label, isBreak) + if block == nil { + c.throwSyntaxError(int(idx)-1, "Could not find block") + panic("unreachable") + } +L: + for b := c.block; b != block; b = b.outer { + switch b.typ { + case blockIterScope: + if !isBreak && b.outer == block { + break L + } + fallthrough + case blockScope: + b.breaks = append(b.breaks, len(c.p.code)) + c.emit(nil) + case blockTry: + c.emit(halt) + case blockWith: + c.emit(leaveWith) + case blockLoopEnum: + c.emit(enumPopClose) + } + } + return block +} + +func (c *compiler) compileBreak(label *ast.Identifier, idx file.Idx) { + block := c.emitBlockExitCode(label, idx, true) + block.breaks = append(block.breaks, len(c.p.code)) + c.emit(nil) +} + +func (c *compiler) compileContinue(label *ast.Identifier, idx file.Idx) { + block := c.emitBlockExitCode(label, idx, false) + block.conts = append(block.conts, len(c.p.code)) + c.emit(nil) +} + +func (c *compiler) compileIfBody(s ast.Statement, needResult bool) { + if !c.scope.strict { + if s, ok := s.(*ast.FunctionDeclaration); ok { + c.compileFunction(s) + if needResult { + c.emit(clearResult) + } + return + } + } + c.compileStatement(s, needResult) +} + +func (c *compiler) compileIfBodyDummy(s ast.Statement) { + leave := c.enterDummyMode() + defer leave() + c.compileIfBody(s, false) +} + +func (c *compiler) compileIfStatement(v *ast.IfStatement, needResult bool) { + test := c.compileExpression(v.Test) + if needResult { + c.emit(clearResult) + } + if test.constant() { + r, ex := c.evalConst(test) + if ex != nil { + test.addSrcMap() + c.emitThrow(ex.val) + return + } + if r.ToBoolean() { + c.compileIfBody(v.Consequent, needResult) + if v.Alternate != nil { + c.compileIfBodyDummy(v.Alternate) + } + } else { + c.compileIfBodyDummy(v.Consequent) + if v.Alternate != nil { + c.compileIfBody(v.Alternate, needResult) + } else { + if needResult { + c.emit(clearResult) + } + } + } + return + } + test.emitGetter(true) + jmp := len(c.p.code) + c.emit(nil) + c.compileIfBody(v.Consequent, needResult) + if v.Alternate != nil { + jmp1 := len(c.p.code) + c.emit(nil) + c.p.code[jmp] = jne(len(c.p.code) - jmp) + c.compileIfBody(v.Alternate, needResult) + c.p.code[jmp1] = jump(len(c.p.code) - jmp1) + } else { + if needResult { + c.emit(jump(2)) + c.p.code[jmp] = jne(len(c.p.code) - jmp) + c.emit(clearResult) + } else { + c.p.code[jmp] = jne(len(c.p.code) - jmp) + } + } +} + +func (c *compiler) compileReturnStatement(v *ast.ReturnStatement) { + if v.Argument != nil { + c.compileExpression(v.Argument).emitGetter(true) + } else { + c.emit(loadUndef) + } + for b := c.block; b != nil; b = b.outer { + switch b.typ { + case blockTry: + c.emit(halt) + case blockLoopEnum: + c.emit(enumPopClose) + } + } + c.emit(ret) +} + +func (c *compiler) checkVarConflict(name unistring.String, offset int) { + for sc := c.scope; sc != nil; sc = sc.outer { + if b, exists := sc.boundNames[name]; exists && !b.isVar && !(b.isArg && sc != c.scope) { + c.throwSyntaxError(offset, "Identifier '%s' has already been declared", name) + } + if sc.function { + break + } + } +} + +func (c *compiler) emitVarAssign(name unistring.String, offset int, init compiledExpr) { + c.checkVarConflict(name, offset) + if init != nil { + c.emitVarRef(name, offset) + c.emitNamed(init, name) + c.emit(putValueP) + } +} + +func (c *compiler) compileVarBinding(expr *ast.Binding) { + switch target := expr.Target.(type) { + case *ast.Identifier: + c.emitVarAssign(target.Name, int(target.Idx)-1, c.compileExpression(expr.Initializer)) + case ast.Pattern: + c.compileExpression(expr.Initializer).emitGetter(true) + c.emitPattern(target, c.emitPatternVarAssign, false) + default: + c.throwSyntaxError(int(target.Idx0()-1), "unsupported variable binding target: %T", target) + } +} + +func (c *compiler) emitLexicalAssign(name unistring.String, offset int, init compiledExpr, isConst bool) { + b := c.scope.boundNames[name] + if b == nil { + panic("Lexical declaration for an unbound name") + } + if init != nil { + c.emitNamed(init, name) + } else { + if isConst { + c.throwSyntaxError(offset, "Missing initializer in const declaration") + } + c.emit(loadUndef) + } + if c.scope.outer != nil { + b.emitInit() + } else { + c.emit(initGlobal(name)) + } +} + +func (c *compiler) emitPatternVarAssign(target, init compiledExpr) { + id := target.(*compiledIdentifierExpr) + c.emitVarAssign(id.name, id.offset, init) +} + +func (c *compiler) emitPatternLexicalAssign(target, init compiledExpr, isConst bool) { + id := target.(*compiledIdentifierExpr) + c.emitLexicalAssign(id.name, id.offset, init, isConst) +} + +func (c *compiler) emitPatternAssign(target, init compiledExpr) { + target.emitRef() + if id, ok := target.(*compiledIdentifierExpr); ok { + c.emitNamed(init, id.name) + } else { + init.emitGetter(true) + } + c.emit(putValueP) +} + +func (c *compiler) compileLexicalBinding(expr *ast.Binding, isConst bool) { + switch target := expr.Target.(type) { + case *ast.Identifier: + c.emitLexicalAssign(target.Name, int(target.Idx)-1, c.compileExpression(expr.Initializer), isConst) + case ast.Pattern: + c.compileExpression(expr.Initializer).emitGetter(true) + c.emitPattern(target, func(target, init compiledExpr) { + c.emitPatternLexicalAssign(target, init, isConst) + }, false) + default: + c.throwSyntaxError(int(target.Idx0()-1), "unsupported lexical binding target: %T", target) + } +} + +func (c *compiler) compileVariableStatement(v *ast.VariableStatement) { + for _, expr := range v.List { + c.compileVarBinding(expr) + } +} + +func (c *compiler) compileLexicalDeclaration(v *ast.LexicalDeclaration) { + isConst := v.Token == token.CONST + for _, e := range v.List { + c.compileLexicalBinding(e, isConst) + } +} + +func (c *compiler) isEmptyResult(st ast.Statement) bool { + switch st := st.(type) { + case *ast.EmptyStatement, *ast.VariableStatement, *ast.LexicalDeclaration, *ast.FunctionDeclaration, + *ast.BranchStatement, *ast.DebuggerStatement: + return true + case *ast.LabelledStatement: + return c.isEmptyResult(st.Statement) + case *ast.BlockStatement: + for _, s := range st.List { + if _, ok := s.(*ast.BranchStatement); ok { + return true + } + if !c.isEmptyResult(s) { + return false + } + } + return true + } + return false +} + +func (c *compiler) scanStatements(list []ast.Statement) (lastProducingIdx int, breakingBlock *block) { + lastProducingIdx = -1 + for i, st := range list { + if bs, ok := st.(*ast.BranchStatement); ok { + if blk := c.findBranchBlock(bs); blk != nil { + breakingBlock = blk + } + break + } + if !c.isEmptyResult(st) { + lastProducingIdx = i + } + } + return +} + +func (c *compiler) compileStatementsNeedResult(list []ast.Statement, lastProducingIdx int) { + if lastProducingIdx >= 0 { + for _, st := range list[:lastProducingIdx] { + if _, ok := st.(*ast.FunctionDeclaration); ok { + continue + } + c.compileStatement(st, false) + } + c.compileStatement(list[lastProducingIdx], true) + } + var leave func() + defer func() { + if leave != nil { + leave() + } + }() + for _, st := range list[lastProducingIdx+1:] { + if _, ok := st.(*ast.FunctionDeclaration); ok { + continue + } + c.compileStatement(st, false) + if leave == nil { + if _, ok := st.(*ast.BranchStatement); ok { + leave = c.enterDummyMode() + } + } + } +} + +func (c *compiler) compileStatements(list []ast.Statement, needResult bool) { + lastProducingIdx, blk := c.scanStatements(list) + if blk != nil { + needResult = blk.needResult + } + if needResult { + c.compileStatementsNeedResult(list, lastProducingIdx) + return + } + for _, st := range list { + if _, ok := st.(*ast.FunctionDeclaration); ok { + continue + } + c.compileStatement(st, false) + } +} + +func (c *compiler) compileGenericLabeledStatement(v ast.Statement, needResult bool, label unistring.String) { + c.block = &block{ + typ: blockLabel, + outer: c.block, + label: label, + needResult: needResult, + } + c.compileStatement(v, needResult) + c.leaveBlock() +} + +func (c *compiler) compileBlockStatement(v *ast.BlockStatement, needResult bool) { + var scopeDeclared bool + funcs := c.extractFunctions(v.List) + if len(funcs) > 0 { + c.newBlockScope() + scopeDeclared = true + } + c.createFunctionBindings(funcs) + scopeDeclared = c.compileLexicalDeclarations(v.List, scopeDeclared) + + var enter *enterBlock + if scopeDeclared { + c.block = &block{ + outer: c.block, + typ: blockScope, + needResult: needResult, + } + enter = &enterBlock{} + c.emit(enter) + } + c.compileFunctions(funcs) + c.compileStatements(v.List, needResult) + if scopeDeclared { + c.leaveScopeBlock(enter) + c.popScope() + } +} + +func (c *compiler) compileExpressionStatement(v *ast.ExpressionStatement, needResult bool) { + expr := c.compileExpression(v.Expression) + if expr.constant() { + c.emitConst(expr, needResult) + } else { + expr.emitGetter(needResult) + } + if needResult { + c.emit(saveResult) + } +} + +func (c *compiler) compileWithStatement(v *ast.WithStatement, needResult bool) { + if c.scope.strict { + c.throwSyntaxError(int(v.With)-1, "Strict mode code may not include a with statement") + return + } + c.compileExpression(v.Object).emitGetter(true) + c.emit(enterWith) + c.block = &block{ + outer: c.block, + typ: blockWith, + needResult: needResult, + } + c.newBlockScope() + c.scope.dynamic = true + c.compileStatement(v.Body, needResult) + c.emit(leaveWith) + c.leaveBlock() + c.popScope() +} + +func (c *compiler) compileSwitchStatement(v *ast.SwitchStatement, needResult bool) { + c.block = &block{ + typ: blockSwitch, + outer: c.block, + needResult: needResult, + } + + c.compileExpression(v.Discriminant).emitGetter(true) + + var funcs []*ast.FunctionDeclaration + for _, s := range v.Body { + f := c.extractFunctions(s.Consequent) + funcs = append(funcs, f...) + } + var scopeDeclared bool + if len(funcs) > 0 { + c.newBlockScope() + scopeDeclared = true + c.createFunctionBindings(funcs) + } + + for _, s := range v.Body { + scopeDeclared = c.compileLexicalDeclarations(s.Consequent, scopeDeclared) + } + + var enter *enterBlock + var db *binding + if scopeDeclared { + c.block = &block{ + typ: blockScope, + outer: c.block, + needResult: needResult, + } + enter = &enterBlock{} + c.emit(enter) + // create anonymous variable for the discriminant + bindings := c.scope.bindings + var bb []*binding + if cap(bindings) == len(bindings) { + bb = make([]*binding, len(bindings)+1) + } else { + bb = bindings[:len(bindings)+1] + } + copy(bb[1:], bindings) + db = &binding{ + scope: c.scope, + isConst: true, + isStrict: true, + } + bb[0] = db + c.scope.bindings = bb + } + + c.compileFunctions(funcs) + + if needResult { + c.emit(clearResult) + } + + jumps := make([]int, len(v.Body)) + + for i, s := range v.Body { + if s.Test != nil { + if db != nil { + db.emitGet() + } else { + c.emit(dup) + } + c.compileExpression(s.Test).emitGetter(true) + c.emit(op_strict_eq) + if db != nil { + c.emit(jne(2)) + } else { + c.emit(jne(3), pop) + } + jumps[i] = len(c.p.code) + c.emit(nil) + } + } + + if db == nil { + c.emit(pop) + } + jumpNoMatch := -1 + if v.Default != -1 { + if v.Default != 0 { + jumps[v.Default] = len(c.p.code) + c.emit(nil) + } + } else { + jumpNoMatch = len(c.p.code) + c.emit(nil) + } + + for i, s := range v.Body { + if s.Test != nil || i != 0 { + c.p.code[jumps[i]] = jump(len(c.p.code) - jumps[i]) + } + c.compileStatements(s.Consequent, needResult) + } + + if jumpNoMatch != -1 { + c.p.code[jumpNoMatch] = jump(len(c.p.code) - jumpNoMatch) + } + if enter != nil { + c.leaveScopeBlock(enter) + enter.stackSize-- + c.popScope() + } + c.leaveBlock() +} diff --git a/vendor/github.com/dop251/goja/date.go b/vendor/github.com/dop251/goja/date.go new file mode 100644 index 0000000000..66ac80b513 --- /dev/null +++ b/vendor/github.com/dop251/goja/date.go @@ -0,0 +1,140 @@ +package goja + +import ( + "math" + "time" +) + +const ( + dateTimeLayout = "Mon Jan 02 2006 15:04:05 GMT-0700 (MST)" + utcDateTimeLayout = "Mon, 02 Jan 2006 15:04:05 GMT" + isoDateTimeLayout = "2006-01-02T15:04:05.000Z" + dateLayout = "Mon Jan 02 2006" + timeLayout = "15:04:05 GMT-0700 (MST)" + datetimeLayout_en_GB = "01/02/2006, 15:04:05" + dateLayout_en_GB = "01/02/2006" + timeLayout_en_GB = "15:04:05" + + maxTime = 8.64e15 + timeUnset = math.MinInt64 +) + +type dateObject struct { + baseObject + msec int64 +} + +var ( + dateLayoutList = []string{ + "2006-01-02T15:04:05Z0700", + "2006-01-02T15:04:05", + "2006-01-02", + "2006-01-02 15:04:05", + time.RFC1123, + time.RFC1123Z, + dateTimeLayout, + time.UnixDate, + time.ANSIC, + time.RubyDate, + "Mon, 02 Jan 2006 15:04:05 GMT-0700 (MST)", + "Mon, 02 Jan 2006 15:04:05 -0700 (MST)", + + "2006", + "2006-01", + + "2006T15:04", + "2006-01T15:04", + "2006-01-02T15:04", + + "2006T15:04:05", + "2006-01T15:04:05", + + "2006T15:04Z0700", + "2006-01T15:04Z0700", + "2006-01-02T15:04Z0700", + + "2006T15:04:05Z0700", + "2006-01T15:04:05Z0700", + } +) + +func dateParse(date string) (time.Time, bool) { + var t time.Time + var err error + for _, layout := range dateLayoutList { + t, err = parseDate(layout, date, time.UTC) + if err == nil { + break + } + } + unix := timeToMsec(t) + return t, err == nil && unix >= -maxTime && unix <= maxTime +} + +func (r *Runtime) newDateObject(t time.Time, isSet bool, proto *Object) *Object { + v := &Object{runtime: r} + d := &dateObject{} + v.self = d + d.val = v + d.class = classDate + d.prototype = proto + d.extensible = true + d.init() + if isSet { + d.msec = timeToMsec(t) + } else { + d.msec = timeUnset + } + return v +} + +func dateFormat(t time.Time) string { + return t.Local().Format(dateTimeLayout) +} + +func timeFromMsec(msec int64) time.Time { + sec := msec / 1000 + nsec := (msec % 1000) * 1e6 + return time.Unix(sec, nsec) +} + +func timeToMsec(t time.Time) int64 { + return t.Unix()*1000 + int64(t.Nanosecond())/1e6 +} + +func (d *dateObject) toPrimitive() Value { + return d.toPrimitiveString() +} + +func (d *dateObject) export(*objectExportCtx) interface{} { + if d.isSet() { + return d.time() + } + return nil +} + +func (d *dateObject) setTimeMs(ms int64) Value { + if ms >= 0 && ms <= maxTime || ms < 0 && ms >= -maxTime { + d.msec = ms + return intToValue(ms) + } + + d.unset() + return _NaN +} + +func (d *dateObject) isSet() bool { + return d.msec != timeUnset +} + +func (d *dateObject) unset() { + d.msec = timeUnset +} + +func (d *dateObject) time() time.Time { + return timeFromMsec(d.msec) +} + +func (d *dateObject) timeUTC() time.Time { + return timeFromMsec(d.msec).In(time.UTC) +} diff --git a/vendor/github.com/dop251/goja/date_parser.go b/vendor/github.com/dop251/goja/date_parser.go new file mode 100644 index 0000000000..0841cf40e1 --- /dev/null +++ b/vendor/github.com/dop251/goja/date_parser.go @@ -0,0 +1,860 @@ +package goja + +// This is a slightly modified version of the standard Go parser to make it more compatible with ECMAScript 5.1 +// Changes: +// - 6-digit extended years are supported in place of long year (2006) in the form of +123456 +// - Timezone formats tolerate colons, e.g. -0700 will parse -07:00 +// - Short week day will also parse long week day +// - Timezone in brackets, "(MST)", will match any string in brackets (e.g. "(GMT Standard Time)") +// - If offset is not set and timezone name is unknown, an error is returned +// - If offset and timezone name are both set the offset takes precedence and the resulting Location will be FixedZone("", offset) + +// Original copyright message: + +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +import ( + "errors" + "time" +) + +const ( + _ = iota + stdLongMonth = iota + stdNeedDate // "January" + stdMonth // "Jan" + stdNumMonth // "1" + stdZeroMonth // "01" + stdLongWeekDay // "Monday" + stdWeekDay // "Mon" + stdDay // "2" + stdUnderDay // "_2" + stdZeroDay // "02" + stdHour = iota + stdNeedClock // "15" + stdHour12 // "3" + stdZeroHour12 // "03" + stdMinute // "4" + stdZeroMinute // "04" + stdSecond // "5" + stdZeroSecond // "05" + stdLongYear = iota + stdNeedDate // "2006" + stdYear // "06" + stdPM = iota + stdNeedClock // "PM" + stdpm // "pm" + stdTZ = iota // "MST" + stdBracketTZ // "(MST)" + stdISO8601TZ // "Z0700" // prints Z for UTC + stdISO8601SecondsTZ // "Z070000" + stdISO8601ShortTZ // "Z07" + stdISO8601ColonTZ // "Z07:00" // prints Z for UTC + stdISO8601ColonSecondsTZ // "Z07:00:00" + stdNumTZ // "-0700" // always numeric + stdNumSecondsTz // "-070000" + stdNumShortTZ // "-07" // always numeric + stdNumColonTZ // "-07:00" // always numeric + stdNumColonSecondsTZ // "-07:00:00" + stdFracSecond0 // ".0", ".00", ... , trailing zeros included + stdFracSecond9 // ".9", ".99", ..., trailing zeros omitted + + stdNeedDate = 1 << 8 // need month, day, year + stdNeedClock = 2 << 8 // need hour, minute, second + stdArgShift = 16 // extra argument in high bits, above low stdArgShift + stdMask = 1<= 69 { // Unix time starts Dec 31 1969 in some time zones + year += 1900 + } else { + year += 2000 + } + case stdLongYear: + if len(value) >= 7 && (value[0] == '-' || value[0] == '+') { // extended year + neg := value[0] == '-' + p, value = value[1:7], value[7:] + year, err = atoi(p) + if neg { + year = -year + } + } else { + if len(value) < 4 || !isDigit(value, 0) { + err = errBad + break + } + p, value = value[0:4], value[4:] + year, err = atoi(p) + } + + case stdMonth: + month, value, err = lookup(shortMonthNames, value) + month++ + case stdLongMonth: + month, value, err = lookup(longMonthNames, value) + month++ + case stdNumMonth, stdZeroMonth: + month, value, err = getnum(value, std == stdZeroMonth) + if month <= 0 || 12 < month { + rangeErrString = "month" + } + case stdWeekDay: + // Ignore weekday except for error checking. + _, value, err = lookup(longDayNames, value) + if err != nil { + _, value, err = lookup(shortDayNames, value) + } + case stdLongWeekDay: + _, value, err = lookup(longDayNames, value) + case stdDay, stdUnderDay, stdZeroDay: + if std == stdUnderDay && len(value) > 0 && value[0] == ' ' { + value = value[1:] + } + day, value, err = getnum(value, std == stdZeroDay) + if day < 0 { + // Note that we allow any one- or two-digit day here. + rangeErrString = "day" + } + case stdHour: + hour, value, err = getnum(value, false) + if hour < 0 || 24 <= hour { + rangeErrString = "hour" + } + case stdHour12, stdZeroHour12: + hour, value, err = getnum(value, std == stdZeroHour12) + if hour < 0 || 12 < hour { + rangeErrString = "hour" + } + case stdMinute, stdZeroMinute: + min, value, err = getnum(value, std == stdZeroMinute) + if min < 0 || 60 <= min { + rangeErrString = "minute" + } + case stdSecond, stdZeroSecond: + sec, value, err = getnum(value, std == stdZeroSecond) + if sec < 0 || 60 <= sec { + rangeErrString = "second" + break + } + // Special case: do we have a fractional second but no + // fractional second in the format? + if len(value) >= 2 && value[0] == '.' && isDigit(value, 1) { + _, std, _ = nextStdChunk(layout) + std &= stdMask + if std == stdFracSecond0 || std == stdFracSecond9 { + // Fractional second in the layout; proceed normally + break + } + // No fractional second in the layout but we have one in the input. + n := 2 + for ; n < len(value) && isDigit(value, n); n++ { + } + nsec, rangeErrString, err = parseNanoseconds(value, n) + value = value[n:] + } + case stdPM: + if len(value) < 2 { + err = errBad + break + } + p, value = value[0:2], value[2:] + switch p { + case "PM": + pmSet = true + case "AM": + amSet = true + default: + err = errBad + } + case stdpm: + if len(value) < 2 { + err = errBad + break + } + p, value = value[0:2], value[2:] + switch p { + case "pm": + pmSet = true + case "am": + amSet = true + default: + err = errBad + } + case stdISO8601TZ, stdISO8601ColonTZ, stdISO8601SecondsTZ, stdISO8601ShortTZ, stdISO8601ColonSecondsTZ, stdNumTZ, stdNumShortTZ, stdNumColonTZ, stdNumSecondsTz, stdNumColonSecondsTZ: + if (std == stdISO8601TZ || std == stdISO8601ShortTZ || std == stdISO8601ColonTZ || + std == stdISO8601SecondsTZ || std == stdISO8601ColonSecondsTZ) && len(value) >= 1 && value[0] == 'Z' { + + value = value[1:] + z = time.UTC + break + } + var sign, hour, min, seconds string + if std == stdISO8601ColonTZ || std == stdNumColonTZ || std == stdNumTZ || std == stdISO8601TZ { + if len(value) < 4 { + err = errBad + break + } + if value[3] != ':' { + if std == stdNumColonTZ || std == stdISO8601ColonTZ || len(value) < 5 { + err = errBad + break + } + sign, hour, min, seconds, value = value[0:1], value[1:3], value[3:5], "00", value[5:] + } else { + if len(value) < 6 { + err = errBad + break + } + sign, hour, min, seconds, value = value[0:1], value[1:3], value[4:6], "00", value[6:] + } + } else if std == stdNumShortTZ || std == stdISO8601ShortTZ { + if len(value) < 3 { + err = errBad + break + } + sign, hour, min, seconds, value = value[0:1], value[1:3], "00", "00", value[3:] + } else if std == stdISO8601ColonSecondsTZ || std == stdNumColonSecondsTZ || std == stdISO8601SecondsTZ || std == stdNumSecondsTz { + if len(value) < 7 { + err = errBad + break + } + if value[3] != ':' || value[6] != ':' { + if std == stdISO8601ColonSecondsTZ || std == stdNumColonSecondsTZ || len(value) < 7 { + err = errBad + break + } + sign, hour, min, seconds, value = value[0:1], value[1:3], value[3:5], value[5:7], value[7:] + } else { + if len(value) < 9 { + err = errBad + break + } + sign, hour, min, seconds, value = value[0:1], value[1:3], value[4:6], value[7:9], value[9:] + } + } + var hr, mm, ss int + hr, err = atoi(hour) + if err == nil { + mm, err = atoi(min) + } + if err == nil { + ss, err = atoi(seconds) + } + zoneOffset = (hr*60+mm)*60 + ss // offset is in seconds + switch sign[0] { + case '+': + case '-': + zoneOffset = -zoneOffset + default: + err = errBad + } + case stdTZ: + // Does it look like a time zone? + if len(value) >= 3 && value[0:3] == "UTC" { + z = time.UTC + value = value[3:] + break + } + n, ok := parseTimeZone(value) + if !ok { + err = errBad + break + } + zoneName, value = value[:n], value[n:] + case stdBracketTZ: + if len(value) < 3 || value[0] != '(' { + err = errBad + break + } + i := 1 + for ; ; i++ { + if i >= len(value) { + err = errBad + break + } + if value[i] == ')' { + zoneName, value = value[1:i], value[i+1:] + break + } + } + + case stdFracSecond0: + // stdFracSecond0 requires the exact number of digits as specified in + // the layout. + ndigit := 1 + (std >> stdArgShift) + if len(value) < ndigit { + err = errBad + break + } + nsec, rangeErrString, err = parseNanoseconds(value, ndigit) + value = value[ndigit:] + + case stdFracSecond9: + if len(value) < 2 || value[0] != '.' || value[1] < '0' || '9' < value[1] { + // Fractional second omitted. + break + } + // Take any number of digits, even more than asked for, + // because it is what the stdSecond case would do. + i := 0 + for i < 9 && i+1 < len(value) && '0' <= value[i+1] && value[i+1] <= '9' { + i++ + } + nsec, rangeErrString, err = parseNanoseconds(value, 1+i) + value = value[1+i:] + } + if rangeErrString != "" { + return time.Time{}, &time.ParseError{Layout: alayout, Value: avalue, LayoutElem: stdstr, ValueElem: value, Message: ": " + rangeErrString + " out of range"} + } + if err != nil { + return time.Time{}, &time.ParseError{Layout: alayout, Value: avalue, LayoutElem: stdstr, ValueElem: value} + } + } + if pmSet && hour < 12 { + hour += 12 + } else if amSet && hour == 12 { + hour = 0 + } + + // Validate the day of the month. + if day < 1 || day > daysIn(time.Month(month), year) { + return time.Time{}, &time.ParseError{Layout: alayout, Value: avalue, ValueElem: value, Message: ": day out of range"} + } + + if z == nil { + if zoneOffset == -1 { + if zoneName != "" { + if z1, err := time.LoadLocation(zoneName); err == nil { + z = z1 + } else { + return time.Time{}, &time.ParseError{Layout: alayout, Value: avalue, ValueElem: value, Message: ": unknown timezone"} + } + } else { + z = defaultLocation + } + } else if zoneOffset == 0 { + z = time.UTC + } else { + z = time.FixedZone("", zoneOffset) + } + } + + return time.Date(year, time.Month(month), day, hour, min, sec, nsec, z), nil +} + +var errLeadingInt = errors.New("time: bad [0-9]*") // never printed + +func signedLeadingInt(s string) (x int64, rem string, err error) { + neg := false + if s != "" && (s[0] == '-' || s[0] == '+') { + neg = s[0] == '-' + s = s[1:] + } + x, rem, err = leadingInt(s) + if err != nil { + return + } + + if neg { + x = -x + } + return +} + +// leadingInt consumes the leading [0-9]* from s. +func leadingInt(s string) (x int64, rem string, err error) { + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + if x > (1<<63-1)/10 { + // overflow + return 0, "", errLeadingInt + } + x = x*10 + int64(c) - '0' + if x < 0 { + // overflow + return 0, "", errLeadingInt + } + } + return x, s[i:], nil +} + +// nextStdChunk finds the first occurrence of a std string in +// layout and returns the text before, the std string, and the text after. +func nextStdChunk(layout string) (prefix string, std int, suffix string) { + for i := 0; i < len(layout); i++ { + switch c := int(layout[i]); c { + case 'J': // January, Jan + if len(layout) >= i+3 && layout[i:i+3] == "Jan" { + if len(layout) >= i+7 && layout[i:i+7] == "January" { + return layout[0:i], stdLongMonth, layout[i+7:] + } + if !startsWithLowerCase(layout[i+3:]) { + return layout[0:i], stdMonth, layout[i+3:] + } + } + + case 'M': // Monday, Mon, MST + if len(layout) >= i+3 { + if layout[i:i+3] == "Mon" { + if len(layout) >= i+6 && layout[i:i+6] == "Monday" { + return layout[0:i], stdLongWeekDay, layout[i+6:] + } + if !startsWithLowerCase(layout[i+3:]) { + return layout[0:i], stdWeekDay, layout[i+3:] + } + } + if layout[i:i+3] == "MST" { + return layout[0:i], stdTZ, layout[i+3:] + } + } + + case '0': // 01, 02, 03, 04, 05, 06 + if len(layout) >= i+2 && '1' <= layout[i+1] && layout[i+1] <= '6' { + return layout[0:i], std0x[layout[i+1]-'1'], layout[i+2:] + } + + case '1': // 15, 1 + if len(layout) >= i+2 && layout[i+1] == '5' { + return layout[0:i], stdHour, layout[i+2:] + } + return layout[0:i], stdNumMonth, layout[i+1:] + + case '2': // 2006, 2 + if len(layout) >= i+4 && layout[i:i+4] == "2006" { + return layout[0:i], stdLongYear, layout[i+4:] + } + return layout[0:i], stdDay, layout[i+1:] + + case '_': // _2, _2006 + if len(layout) >= i+2 && layout[i+1] == '2' { + //_2006 is really a literal _, followed by stdLongYear + if len(layout) >= i+5 && layout[i+1:i+5] == "2006" { + return layout[0 : i+1], stdLongYear, layout[i+5:] + } + return layout[0:i], stdUnderDay, layout[i+2:] + } + + case '3': + return layout[0:i], stdHour12, layout[i+1:] + + case '4': + return layout[0:i], stdMinute, layout[i+1:] + + case '5': + return layout[0:i], stdSecond, layout[i+1:] + + case 'P': // PM + if len(layout) >= i+2 && layout[i+1] == 'M' { + return layout[0:i], stdPM, layout[i+2:] + } + + case 'p': // pm + if len(layout) >= i+2 && layout[i+1] == 'm' { + return layout[0:i], stdpm, layout[i+2:] + } + + case '-': // -070000, -07:00:00, -0700, -07:00, -07 + if len(layout) >= i+7 && layout[i:i+7] == "-070000" { + return layout[0:i], stdNumSecondsTz, layout[i+7:] + } + if len(layout) >= i+9 && layout[i:i+9] == "-07:00:00" { + return layout[0:i], stdNumColonSecondsTZ, layout[i+9:] + } + if len(layout) >= i+5 && layout[i:i+5] == "-0700" { + return layout[0:i], stdNumTZ, layout[i+5:] + } + if len(layout) >= i+6 && layout[i:i+6] == "-07:00" { + return layout[0:i], stdNumColonTZ, layout[i+6:] + } + if len(layout) >= i+3 && layout[i:i+3] == "-07" { + return layout[0:i], stdNumShortTZ, layout[i+3:] + } + + case 'Z': // Z070000, Z07:00:00, Z0700, Z07:00, + if len(layout) >= i+7 && layout[i:i+7] == "Z070000" { + return layout[0:i], stdISO8601SecondsTZ, layout[i+7:] + } + if len(layout) >= i+9 && layout[i:i+9] == "Z07:00:00" { + return layout[0:i], stdISO8601ColonSecondsTZ, layout[i+9:] + } + if len(layout) >= i+5 && layout[i:i+5] == "Z0700" { + return layout[0:i], stdISO8601TZ, layout[i+5:] + } + if len(layout) >= i+6 && layout[i:i+6] == "Z07:00" { + return layout[0:i], stdISO8601ColonTZ, layout[i+6:] + } + if len(layout) >= i+3 && layout[i:i+3] == "Z07" { + return layout[0:i], stdISO8601ShortTZ, layout[i+3:] + } + + case '.': // .000 or .999 - repeated digits for fractional seconds. + if i+1 < len(layout) && (layout[i+1] == '0' || layout[i+1] == '9') { + ch := layout[i+1] + j := i + 1 + for j < len(layout) && layout[j] == ch { + j++ + } + // String of digits must end here - only fractional second is all digits. + if !isDigit(layout, j) { + std := stdFracSecond0 + if layout[i+1] == '9' { + std = stdFracSecond9 + } + std |= (j - (i + 1)) << stdArgShift + return layout[0:i], std, layout[j:] + } + } + case '(': + if len(layout) >= i+5 && layout[i:i+5] == "(MST)" { + return layout[0:i], stdBracketTZ, layout[i+5:] + } + } + } + return layout, 0, "" +} + +var longDayNames = []string{ + "Sunday", + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", +} + +var shortDayNames = []string{ + "Sun", + "Mon", + "Tue", + "Wed", + "Thu", + "Fri", + "Sat", +} + +var shortMonthNames = []string{ + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", +} + +var longMonthNames = []string{ + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", +} + +// isDigit reports whether s[i] is in range and is a decimal digit. +func isDigit(s string, i int) bool { + if len(s) <= i { + return false + } + c := s[i] + return '0' <= c && c <= '9' +} + +// getnum parses s[0:1] or s[0:2] (fixed forces the latter) +// as a decimal integer and returns the integer and the +// remainder of the string. +func getnum(s string, fixed bool) (int, string, error) { + if !isDigit(s, 0) { + return 0, s, errBad + } + if !isDigit(s, 1) { + if fixed { + return 0, s, errBad + } + return int(s[0] - '0'), s[1:], nil + } + return int(s[0]-'0')*10 + int(s[1]-'0'), s[2:], nil +} + +func cutspace(s string) string { + for len(s) > 0 && s[0] == ' ' { + s = s[1:] + } + return s +} + +// skip removes the given prefix from value, +// treating runs of space characters as equivalent. +func skip(value, prefix string) (string, error) { + for len(prefix) > 0 { + if prefix[0] == ' ' { + if len(value) > 0 && value[0] != ' ' { + return value, errBad + } + prefix = cutspace(prefix) + value = cutspace(value) + continue + } + if len(value) == 0 || value[0] != prefix[0] { + return value, errBad + } + prefix = prefix[1:] + value = value[1:] + } + return value, nil +} + +// Never printed, just needs to be non-nil for return by atoi. +var atoiError = errors.New("time: invalid number") + +// Duplicates functionality in strconv, but avoids dependency. +func atoi(s string) (x int, err error) { + q, rem, err := signedLeadingInt(s) + x = int(q) + if err != nil || rem != "" { + return 0, atoiError + } + return x, nil +} + +// match reports whether s1 and s2 match ignoring case. +// It is assumed s1 and s2 are the same length. +func match(s1, s2 string) bool { + for i := 0; i < len(s1); i++ { + c1 := s1[i] + c2 := s2[i] + if c1 != c2 { + // Switch to lower-case; 'a'-'A' is known to be a single bit. + c1 |= 'a' - 'A' + c2 |= 'a' - 'A' + if c1 != c2 || c1 < 'a' || c1 > 'z' { + return false + } + } + } + return true +} + +func lookup(tab []string, val string) (int, string, error) { + for i, v := range tab { + if len(val) >= len(v) && match(val[0:len(v)], v) { + return i, val[len(v):], nil + } + } + return -1, val, errBad +} + +// daysBefore[m] counts the number of days in a non-leap year +// before month m begins. There is an entry for m=12, counting +// the number of days before January of next year (365). +var daysBefore = [...]int32{ + 0, + 31, + 31 + 28, + 31 + 28 + 31, + 31 + 28 + 31 + 30, + 31 + 28 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31, +} + +func isLeap(year int) bool { + return year%4 == 0 && (year%100 != 0 || year%400 == 0) +} + +func daysIn(m time.Month, year int) int { + if m == time.February && isLeap(year) { + return 29 + } + return int(daysBefore[m] - daysBefore[m-1]) +} + +// parseTimeZone parses a time zone string and returns its length. Time zones +// are human-generated and unpredictable. We can't do precise error checking. +// On the other hand, for a correct parse there must be a time zone at the +// beginning of the string, so it's almost always true that there's one +// there. We look at the beginning of the string for a run of upper-case letters. +// If there are more than 5, it's an error. +// If there are 4 or 5 and the last is a T, it's a time zone. +// If there are 3, it's a time zone. +// Otherwise, other than special cases, it's not a time zone. +// GMT is special because it can have an hour offset. +func parseTimeZone(value string) (length int, ok bool) { + if len(value) < 3 { + return 0, false + } + // Special case 1: ChST and MeST are the only zones with a lower-case letter. + if len(value) >= 4 && (value[:4] == "ChST" || value[:4] == "MeST") { + return 4, true + } + // Special case 2: GMT may have an hour offset; treat it specially. + if value[:3] == "GMT" { + length = parseGMT(value) + return length, true + } + // Special Case 3: Some time zones are not named, but have +/-00 format + if value[0] == '+' || value[0] == '-' { + length = parseSignedOffset(value) + return length, true + } + // How many upper-case letters are there? Need at least three, at most five. + var nUpper int + for nUpper = 0; nUpper < 6; nUpper++ { + if nUpper >= len(value) { + break + } + if c := value[nUpper]; c < 'A' || 'Z' < c { + break + } + } + switch nUpper { + case 0, 1, 2, 6: + return 0, false + case 5: // Must end in T to match. + if value[4] == 'T' { + return 5, true + } + case 4: + // Must end in T, except one special case. + if value[3] == 'T' || value[:4] == "WITA" { + return 4, true + } + case 3: + return 3, true + } + return 0, false +} + +// parseGMT parses a GMT time zone. The input string is known to start "GMT". +// The function checks whether that is followed by a sign and a number in the +// range -14 through 12 excluding zero. +func parseGMT(value string) int { + value = value[3:] + if len(value) == 0 { + return 3 + } + + return 3 + parseSignedOffset(value) +} + +// parseSignedOffset parses a signed timezone offset (e.g. "+03" or "-04"). +// The function checks for a signed number in the range -14 through +12 excluding zero. +// Returns length of the found offset string or 0 otherwise +func parseSignedOffset(value string) int { + sign := value[0] + if sign != '-' && sign != '+' { + return 0 + } + x, rem, err := leadingInt(value[1:]) + if err != nil { + return 0 + } + if sign == '-' { + x = -x + } + if x == 0 || x < -14 || 12 < x { + return 0 + } + return len(value) - len(rem) +} + +func parseNanoseconds(value string, nbytes int) (ns int, rangeErrString string, err error) { + if value[0] != '.' { + err = errBad + return + } + if ns, err = atoi(value[1:nbytes]); err != nil { + return + } + if ns < 0 || 1e9 <= ns { + rangeErrString = "fractional second" + return + } + // We need nanoseconds, which means scaling by the number + // of missing digits in the format, maximum length 10. If it's + // longer than 10, we won't scale. + scaleDigits := 10 - nbytes + for i := 0; i < scaleDigits; i++ { + ns *= 10 + } + return +} + +// std0x records the std values for "01", "02", ..., "06". +var std0x = [...]int{stdZeroMonth, stdZeroDay, stdZeroHour12, stdZeroMinute, stdZeroSecond, stdYear} + +// startsWithLowerCase reports whether the string has a lower-case letter at the beginning. +// Its purpose is to prevent matching strings like "Month" when looking for "Mon". +func startsWithLowerCase(str string) bool { + if len(str) == 0 { + return false + } + c := str[0] + return 'a' <= c && c <= 'z' +} diff --git a/vendor/github.com/dop251/goja/destruct.go b/vendor/github.com/dop251/goja/destruct.go new file mode 100644 index 0000000000..54b99b7502 --- /dev/null +++ b/vendor/github.com/dop251/goja/destruct.go @@ -0,0 +1,277 @@ +package goja + +import ( + "github.com/dop251/goja/unistring" + "reflect" +) + +type destructKeyedSource struct { + r *Runtime + wrapped Value + usedKeys map[Value]struct{} +} + +func newDestructKeyedSource(r *Runtime, wrapped Value) *destructKeyedSource { + return &destructKeyedSource{ + r: r, + wrapped: wrapped, + } +} + +func (r *Runtime) newDestructKeyedSource(wrapped Value) *Object { + return &Object{ + runtime: r, + self: newDestructKeyedSource(r, wrapped), + } +} + +func (d *destructKeyedSource) w() objectImpl { + return d.wrapped.ToObject(d.r).self +} + +func (d *destructKeyedSource) recordKey(key Value) { + if d.usedKeys == nil { + d.usedKeys = make(map[Value]struct{}) + } + d.usedKeys[key] = struct{}{} +} + +func (d *destructKeyedSource) sortLen() int64 { + return d.w().sortLen() +} + +func (d *destructKeyedSource) sortGet(i int64) Value { + return d.w().sortGet(i) +} + +func (d *destructKeyedSource) swap(i int64, i2 int64) { + d.w().swap(i, i2) +} + +func (d *destructKeyedSource) className() string { + return d.w().className() +} + +func (d *destructKeyedSource) getStr(p unistring.String, receiver Value) Value { + d.recordKey(stringValueFromRaw(p)) + return d.w().getStr(p, receiver) +} + +func (d *destructKeyedSource) getIdx(p valueInt, receiver Value) Value { + d.recordKey(p.toString()) + return d.w().getIdx(p, receiver) +} + +func (d *destructKeyedSource) getSym(p *Symbol, receiver Value) Value { + d.recordKey(p) + return d.w().getSym(p, receiver) +} + +func (d *destructKeyedSource) getOwnPropStr(u unistring.String) Value { + d.recordKey(stringValueFromRaw(u)) + return d.w().getOwnPropStr(u) +} + +func (d *destructKeyedSource) getOwnPropIdx(v valueInt) Value { + d.recordKey(v.toString()) + return d.w().getOwnPropIdx(v) +} + +func (d *destructKeyedSource) getOwnPropSym(symbol *Symbol) Value { + d.recordKey(symbol) + return d.w().getOwnPropSym(symbol) +} + +func (d *destructKeyedSource) setOwnStr(p unistring.String, v Value, throw bool) bool { + return d.w().setOwnStr(p, v, throw) +} + +func (d *destructKeyedSource) setOwnIdx(p valueInt, v Value, throw bool) bool { + return d.w().setOwnIdx(p, v, throw) +} + +func (d *destructKeyedSource) setOwnSym(p *Symbol, v Value, throw bool) bool { + return d.w().setOwnSym(p, v, throw) +} + +func (d *destructKeyedSource) setForeignStr(p unistring.String, v, receiver Value, throw bool) (res bool, handled bool) { + return d.w().setForeignStr(p, v, receiver, throw) +} + +func (d *destructKeyedSource) setForeignIdx(p valueInt, v, receiver Value, throw bool) (res bool, handled bool) { + return d.w().setForeignIdx(p, v, receiver, throw) +} + +func (d *destructKeyedSource) setForeignSym(p *Symbol, v, receiver Value, throw bool) (res bool, handled bool) { + return d.w().setForeignSym(p, v, receiver, throw) +} + +func (d *destructKeyedSource) hasPropertyStr(u unistring.String) bool { + return d.w().hasPropertyStr(u) +} + +func (d *destructKeyedSource) hasPropertyIdx(idx valueInt) bool { + return d.w().hasPropertyIdx(idx) +} + +func (d *destructKeyedSource) hasPropertySym(s *Symbol) bool { + return d.w().hasPropertySym(s) +} + +func (d *destructKeyedSource) hasOwnPropertyStr(u unistring.String) bool { + return d.w().hasOwnPropertyStr(u) +} + +func (d *destructKeyedSource) hasOwnPropertyIdx(v valueInt) bool { + return d.w().hasOwnPropertyIdx(v) +} + +func (d *destructKeyedSource) hasOwnPropertySym(s *Symbol) bool { + return d.w().hasOwnPropertySym(s) +} + +func (d *destructKeyedSource) defineOwnPropertyStr(name unistring.String, desc PropertyDescriptor, throw bool) bool { + return d.w().defineOwnPropertyStr(name, desc, throw) +} + +func (d *destructKeyedSource) defineOwnPropertyIdx(name valueInt, desc PropertyDescriptor, throw bool) bool { + return d.w().defineOwnPropertyIdx(name, desc, throw) +} + +func (d *destructKeyedSource) defineOwnPropertySym(name *Symbol, desc PropertyDescriptor, throw bool) bool { + return d.w().defineOwnPropertySym(name, desc, throw) +} + +func (d *destructKeyedSource) deleteStr(name unistring.String, throw bool) bool { + return d.w().deleteStr(name, throw) +} + +func (d *destructKeyedSource) deleteIdx(idx valueInt, throw bool) bool { + return d.w().deleteIdx(idx, throw) +} + +func (d *destructKeyedSource) deleteSym(s *Symbol, throw bool) bool { + return d.w().deleteSym(s, throw) +} + +func (d *destructKeyedSource) toPrimitiveNumber() Value { + return d.w().toPrimitiveNumber() +} + +func (d *destructKeyedSource) toPrimitiveString() Value { + return d.w().toPrimitiveString() +} + +func (d *destructKeyedSource) toPrimitive() Value { + return d.w().toPrimitive() +} + +func (d *destructKeyedSource) assertCallable() (call func(FunctionCall) Value, ok bool) { + return d.w().assertCallable() +} + +func (d *destructKeyedSource) assertConstructor() func(args []Value, newTarget *Object) *Object { + return d.w().assertConstructor() +} + +func (d *destructKeyedSource) proto() *Object { + return d.w().proto() +} + +func (d *destructKeyedSource) setProto(proto *Object, throw bool) bool { + return d.w().setProto(proto, throw) +} + +func (d *destructKeyedSource) hasInstance(v Value) bool { + return d.w().hasInstance(v) +} + +func (d *destructKeyedSource) isExtensible() bool { + return d.w().isExtensible() +} + +func (d *destructKeyedSource) preventExtensions(throw bool) bool { + return d.w().preventExtensions(throw) +} + +type destructKeyedSourceIter struct { + d *destructKeyedSource + wrapped iterNextFunc +} + +func (i *destructKeyedSourceIter) next() (propIterItem, iterNextFunc) { + for { + item, next := i.wrapped() + if next == nil { + return item, nil + } + i.wrapped = next + if _, exists := i.d.usedKeys[stringValueFromRaw(item.name)]; !exists { + return item, i.next + } + } +} + +func (d *destructKeyedSource) enumerateOwnKeys() iterNextFunc { + return (&destructKeyedSourceIter{ + d: d, + wrapped: d.w().enumerateOwnKeys(), + }).next +} + +func (d *destructKeyedSource) export(ctx *objectExportCtx) interface{} { + return d.w().export(ctx) +} + +func (d *destructKeyedSource) exportType() reflect.Type { + return d.w().exportType() +} + +func (d *destructKeyedSource) equal(impl objectImpl) bool { + return d.w().equal(impl) +} + +func (d *destructKeyedSource) ownKeys(all bool, accum []Value) []Value { + var next iterNextFunc + if all { + next = d.enumerateOwnKeys() + } else { + next = (&enumerableIter{ + wrapped: d.enumerateOwnKeys(), + }).next + } + for item, next := next(); next != nil; item, next = next() { + accum = append(accum, stringValueFromRaw(item.name)) + } + return accum +} + +func (d *destructKeyedSource) filterUsedKeys(keys []Value) []Value { + k := 0 + for i, key := range keys { + if _, exists := d.usedKeys[key]; exists { + continue + } + if k != i { + keys[k] = key + } + k++ + } + return keys[:k] +} + +func (d *destructKeyedSource) ownSymbols(all bool, accum []Value) []Value { + return d.filterUsedKeys(d.w().ownSymbols(all, accum)) +} + +func (d *destructKeyedSource) ownPropertyKeys(all bool, accum []Value) []Value { + return d.filterUsedKeys(d.w().ownPropertyKeys(all, accum)) +} + +func (d *destructKeyedSource) _putProp(name unistring.String, value Value, writable, enumerable, configurable bool) Value { + return d.w()._putProp(name, value, writable, enumerable, configurable) +} + +func (d *destructKeyedSource) _putSym(s *Symbol, prop Value) { + d.w()._putSym(s, prop) +} diff --git a/vendor/github.com/dop251/goja/file/README.markdown b/vendor/github.com/dop251/goja/file/README.markdown new file mode 100644 index 0000000000..e9228c2f5e --- /dev/null +++ b/vendor/github.com/dop251/goja/file/README.markdown @@ -0,0 +1,110 @@ +# file +-- + import "github.com/dop251/goja/file" + +Package file encapsulates the file abstractions used by the ast & parser. + +## Usage + +#### type File + +```go +type File struct { +} +``` + + +#### func NewFile + +```go +func NewFile(filename, src string, base int) *File +``` + +#### func (*File) Base + +```go +func (fl *File) Base() int +``` + +#### func (*File) Name + +```go +func (fl *File) Name() string +``` + +#### func (*File) Source + +```go +func (fl *File) Source() string +``` + +#### type FileSet + +```go +type FileSet struct { +} +``` + +A FileSet represents a set of source files. + +#### func (*FileSet) AddFile + +```go +func (self *FileSet) AddFile(filename, src string) int +``` +AddFile adds a new file with the given filename and src. + +This an internal method, but exported for cross-package use. + +#### func (*FileSet) File + +```go +func (self *FileSet) File(idx Idx) *File +``` + +#### func (*FileSet) Position + +```go +func (self *FileSet) Position(idx Idx) *Position +``` +Position converts an Idx in the FileSet into a Position. + +#### type Idx + +```go +type Idx int +``` + +Idx is a compact encoding of a source position within a file set. It can be +converted into a Position for a more convenient, but much larger, +representation. + +#### type Position + +```go +type Position struct { + Filename string // The filename where the error occurred, if any + Offset int // The src offset + Line int // The line number, starting at 1 + Column int // The column number, starting at 1 (The character count) + +} +``` + +Position describes an arbitrary source position including the filename, line, +and column location. + +#### func (*Position) String + +```go +func (self *Position) String() string +``` +String returns a string in one of several forms: + + file:line:column A valid position with filename + line:column A valid position without filename + file An invalid position with filename + - An invalid position without filename + +-- +**godocdown** http://github.com/robertkrimen/godocdown diff --git a/vendor/github.com/dop251/goja/file/file.go b/vendor/github.com/dop251/goja/file/file.go new file mode 100644 index 0000000000..78ae1ad901 --- /dev/null +++ b/vendor/github.com/dop251/goja/file/file.go @@ -0,0 +1,215 @@ +// Package file encapsulates the file abstractions used by the ast & parser. +// +package file + +import ( + "fmt" + "path" + "sort" + "sync" + + "github.com/go-sourcemap/sourcemap" +) + +// Idx is a compact encoding of a source position within a file set. +// It can be converted into a Position for a more convenient, but much +// larger, representation. +type Idx int + +// Position describes an arbitrary source position +// including the filename, line, and column location. +type Position struct { + Filename string // The filename where the error occurred, if any + Line int // The line number, starting at 1 + Column int // The column number, starting at 1 (The character count) + +} + +// A Position is valid if the line number is > 0. + +func (self *Position) isValid() bool { + return self.Line > 0 +} + +// String returns a string in one of several forms: +// +// file:line:column A valid position with filename +// line:column A valid position without filename +// file An invalid position with filename +// - An invalid position without filename +// +func (self Position) String() string { + str := self.Filename + if self.isValid() { + if str != "" { + str += ":" + } + str += fmt.Sprintf("%d:%d", self.Line, self.Column) + } + if str == "" { + str = "-" + } + return str +} + +// FileSet + +// A FileSet represents a set of source files. +type FileSet struct { + files []*File + last *File +} + +// AddFile adds a new file with the given filename and src. +// +// This an internal method, but exported for cross-package use. +func (self *FileSet) AddFile(filename, src string) int { + base := self.nextBase() + file := &File{ + name: filename, + src: src, + base: base, + } + self.files = append(self.files, file) + self.last = file + return base +} + +func (self *FileSet) nextBase() int { + if self.last == nil { + return 1 + } + return self.last.base + len(self.last.src) + 1 +} + +func (self *FileSet) File(idx Idx) *File { + for _, file := range self.files { + if idx <= Idx(file.base+len(file.src)) { + return file + } + } + return nil +} + +// Position converts an Idx in the FileSet into a Position. +func (self *FileSet) Position(idx Idx) Position { + for _, file := range self.files { + if idx <= Idx(file.base+len(file.src)) { + return file.Position(int(idx) - file.base) + } + } + return Position{} +} + +type File struct { + mu sync.Mutex + name string + src string + base int // This will always be 1 or greater + sourceMap *sourcemap.Consumer + lineOffsets []int + lastScannedOffset int +} + +func NewFile(filename, src string, base int) *File { + return &File{ + name: filename, + src: src, + base: base, + } +} + +func (fl *File) Name() string { + return fl.name +} + +func (fl *File) Source() string { + return fl.src +} + +func (fl *File) Base() int { + return fl.base +} + +func (fl *File) SetSourceMap(m *sourcemap.Consumer) { + fl.sourceMap = m +} + +func (fl *File) Position(offset int) Position { + var line int + var lineOffsets []int + fl.mu.Lock() + if offset > fl.lastScannedOffset { + line = fl.scanTo(offset) + lineOffsets = fl.lineOffsets + fl.mu.Unlock() + } else { + lineOffsets = fl.lineOffsets + fl.mu.Unlock() + line = sort.Search(len(lineOffsets), func(x int) bool { return lineOffsets[x] > offset }) - 1 + } + + var lineStart int + if line >= 0 { + lineStart = lineOffsets[line] + } + + row := line + 2 + col := offset - lineStart + 1 + + if fl.sourceMap != nil { + if source, _, row, col, ok := fl.sourceMap.Source(row, col); ok { + if !path.IsAbs(source) { + source = path.Join(path.Dir(fl.name), source) + } + return Position{ + Filename: source, + Line: row, + Column: col, + } + } + } + + return Position{ + Filename: fl.name, + Line: row, + Column: col, + } +} + +func findNextLineStart(s string) int { + for pos, ch := range s { + switch ch { + case '\r': + if pos < len(s)-1 && s[pos+1] == '\n' { + return pos + 2 + } + return pos + 1 + case '\n': + return pos + 1 + case '\u2028', '\u2029': + return pos + 3 + } + } + return -1 +} + +func (fl *File) scanTo(offset int) int { + o := fl.lastScannedOffset + for o < offset { + p := findNextLineStart(fl.src[o:]) + if p == -1 { + fl.lastScannedOffset = len(fl.src) + return len(fl.lineOffsets) - 1 + } + o = o + p + fl.lineOffsets = append(fl.lineOffsets, o) + } + fl.lastScannedOffset = o + + if o == offset { + return len(fl.lineOffsets) - 1 + } + + return len(fl.lineOffsets) - 2 +} diff --git a/vendor/github.com/dop251/goja/ftoa/LICENSE_LUCENE b/vendor/github.com/dop251/goja/ftoa/LICENSE_LUCENE new file mode 100644 index 0000000000..c8da489c56 --- /dev/null +++ b/vendor/github.com/dop251/goja/ftoa/LICENSE_LUCENE @@ -0,0 +1,21 @@ +Copyright (C) 1998, 1999 by Lucent Technologies +All Rights Reserved + +Permission to use, copy, modify, and distribute this software and +its documentation for any purpose and without fee is hereby +granted, provided that the above copyright notice appear in all +copies and that both that the copyright notice and this +permission notice and warranty disclaimer appear in supporting +documentation, and that the name of Lucent or any of its entities +not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +LUCENT DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. +IN NO EVENT SHALL LUCENT OR ANY OF ITS ENTITIES BE LIABLE FOR ANY +SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. diff --git a/vendor/github.com/dop251/goja/ftoa/common.go b/vendor/github.com/dop251/goja/ftoa/common.go new file mode 100644 index 0000000000..207fb5fac0 --- /dev/null +++ b/vendor/github.com/dop251/goja/ftoa/common.go @@ -0,0 +1,151 @@ +/* +Package ftoa provides ECMAScript-compliant floating point number conversion to string. + +It contains code ported from Rhino (https://github.com/mozilla/rhino/blob/master/src/org/mozilla/javascript/DToA.java) +as well as from the original code by David M. Gay. + +See LICENSE_LUCENE for the original copyright message and disclaimer. + +*/ +package ftoa + +import ( + "math" +) + +const ( + frac_mask = 0xfffff + exp_shift = 20 + exp_msk1 = 0x100000 + + exp_shiftL = 52 + exp_mask_shifted = 0x7ff + frac_maskL = 0xfffffffffffff + exp_msk1L = 0x10000000000000 + exp_shift1 = 20 + exp_mask = 0x7ff00000 + bias = 1023 + p = 53 + bndry_mask = 0xfffff + log2P = 1 +) + +func lo0bits(x uint32) (k int) { + + if (x & 7) != 0 { + if (x & 1) != 0 { + return 0 + } + if (x & 2) != 0 { + return 1 + } + return 2 + } + if (x & 0xffff) == 0 { + k = 16 + x >>= 16 + } + if (x & 0xff) == 0 { + k += 8 + x >>= 8 + } + if (x & 0xf) == 0 { + k += 4 + x >>= 4 + } + if (x & 0x3) == 0 { + k += 2 + x >>= 2 + } + if (x & 1) == 0 { + k++ + x >>= 1 + if (x & 1) == 0 { + return 32 + } + } + return +} + +func hi0bits(x uint32) (k int) { + + if (x & 0xffff0000) == 0 { + k = 16 + x <<= 16 + } + if (x & 0xff000000) == 0 { + k += 8 + x <<= 8 + } + if (x & 0xf0000000) == 0 { + k += 4 + x <<= 4 + } + if (x & 0xc0000000) == 0 { + k += 2 + x <<= 2 + } + if (x & 0x80000000) == 0 { + k++ + if (x & 0x40000000) == 0 { + return 32 + } + } + return +} + +func stuffBits(bits []byte, offset int, val uint32) { + bits[offset] = byte(val >> 24) + bits[offset+1] = byte(val >> 16) + bits[offset+2] = byte(val >> 8) + bits[offset+3] = byte(val) +} + +func d2b(d float64, b []byte) (e, bits int, dblBits []byte) { + dBits := math.Float64bits(d) + d0 := uint32(dBits >> 32) + d1 := uint32(dBits) + + z := d0 & frac_mask + d0 &= 0x7fffffff /* clear sign bit, which we ignore */ + + var de, k, i int + if de = int(d0 >> exp_shift); de != 0 { + z |= exp_msk1 + } + + y := d1 + if y != 0 { + dblBits = b[:8] + k = lo0bits(y) + y >>= k + if k != 0 { + stuffBits(dblBits, 4, y|z<<(32-k)) + z >>= k + } else { + stuffBits(dblBits, 4, y) + } + stuffBits(dblBits, 0, z) + if z != 0 { + i = 2 + } else { + i = 1 + } + } else { + dblBits = b[:4] + k = lo0bits(z) + z >>= k + stuffBits(dblBits, 0, z) + k += 32 + i = 1 + } + + if de != 0 { + e = de - bias - (p - 1) + k + bits = p - k + } else { + e = de - bias - (p - 1) + 1 + k + bits = 32*i - hi0bits(z) + } + return +} diff --git a/vendor/github.com/dop251/goja/ftoa/ftoa.go b/vendor/github.com/dop251/goja/ftoa/ftoa.go new file mode 100644 index 0000000000..59b516d745 --- /dev/null +++ b/vendor/github.com/dop251/goja/ftoa/ftoa.go @@ -0,0 +1,698 @@ +package ftoa + +import ( + "math" + "math/big" +) + +const ( + exp_11 = 0x3ff00000 + frac_mask1 = 0xfffff + bletch = 0x10 + quick_max = 14 + int_max = 14 +) + +var ( + tens = [...]float64{ + 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, + 1e20, 1e21, 1e22, + } + + bigtens = [...]float64{1e16, 1e32, 1e64, 1e128, 1e256} + + big5 = big.NewInt(5) + big10 = big.NewInt(10) + + p05 = []*big.Int{big5, big.NewInt(25), big.NewInt(125)} + pow5Cache [7]*big.Int + + dtoaModes = []int{ + ModeStandard: 0, + ModeStandardExponential: 0, + ModeFixed: 3, + ModeExponential: 2, + ModePrecision: 2, + } +) + +/* +d must be > 0 and must not be Inf + +mode: + 0 ==> shortest string that yields d when read in + and rounded to nearest. + 1 ==> like 0, but with Steele & White stopping rule; + e.g. with IEEE P754 arithmetic , mode 0 gives + 1e23 whereas mode 1 gives 9.999999999999999e22. + 2 ==> max(1,ndigits) significant digits. This gives a + return value similar to that of ecvt, except + that trailing zeros are suppressed. + 3 ==> through ndigits past the decimal point. This + gives a return value similar to that from fcvt, + except that trailing zeros are suppressed, and + ndigits can be negative. + 4,5 ==> similar to 2 and 3, respectively, but (in + round-nearest mode) with the tests of mode 0 to + possibly return a shorter string that rounds to d. + With IEEE arithmetic and compilation with + -DHonor_FLT_ROUNDS, modes 4 and 5 behave the same + as modes 2 and 3 when FLT_ROUNDS != 1. + 6-9 ==> Debugging modes similar to mode - 4: don't try + fast floating-point estimate (if applicable). + + Values of mode other than 0-9 are treated as mode 0. +*/ +func ftoa(d float64, mode int, biasUp bool, ndigits int, buf []byte) ([]byte, int) { + startPos := len(buf) + dblBits := make([]byte, 0, 8) + be, bbits, dblBits := d2b(d, dblBits) + + dBits := math.Float64bits(d) + word0 := uint32(dBits >> 32) + word1 := uint32(dBits) + + i := int((word0 >> exp_shift1) & (exp_mask >> exp_shift1)) + var d2 float64 + var denorm bool + if i != 0 { + d2 = setWord0(d, (word0&frac_mask1)|exp_11) + i -= bias + denorm = false + } else { + /* d is denormalized */ + i = bbits + be + (bias + (p - 1) - 1) + var x uint64 + if i > 32 { + x = uint64(word0)<<(64-i) | uint64(word1)>>(i-32) + } else { + x = uint64(word1) << (32 - i) + } + d2 = setWord0(float64(x), uint32((x>>32)-31*exp_mask)) + i -= (bias + (p - 1) - 1) + 1 + denorm = true + } + /* At this point d = f*2^i, where 1 <= f < 2. d2 is an approximation of f. */ + ds := (d2-1.5)*0.289529654602168 + 0.1760912590558 + float64(i)*0.301029995663981 + k := int(ds) + if ds < 0.0 && ds != float64(k) { + k-- /* want k = floor(ds) */ + } + k_check := true + if k >= 0 && k < len(tens) { + if d < tens[k] { + k-- + } + k_check = false + } + /* At this point floor(log10(d)) <= k <= floor(log10(d))+1. + If k_check is zero, we're guaranteed that k = floor(log10(d)). */ + j := bbits - i - 1 + var b2, s2, b5, s5 int + /* At this point d = b/2^j, where b is an odd integer. */ + if j >= 0 { + b2 = 0 + s2 = j + } else { + b2 = -j + s2 = 0 + } + if k >= 0 { + b5 = 0 + s5 = k + s2 += k + } else { + b2 -= k + b5 = -k + s5 = 0 + } + /* At this point d/10^k = (b * 2^b2 * 5^b5) / (2^s2 * 5^s5), where b is an odd integer, + b2 >= 0, b5 >= 0, s2 >= 0, and s5 >= 0. */ + if mode < 0 || mode > 9 { + mode = 0 + } + try_quick := true + if mode > 5 { + mode -= 4 + try_quick = false + } + leftright := true + var ilim, ilim1 int + switch mode { + case 0, 1: + ilim, ilim1 = -1, -1 + ndigits = 0 + case 2: + leftright = false + fallthrough + case 4: + if ndigits <= 0 { + ndigits = 1 + } + ilim, ilim1 = ndigits, ndigits + case 3: + leftright = false + fallthrough + case 5: + i = ndigits + k + 1 + ilim = i + ilim1 = i - 1 + } + /* ilim is the maximum number of significant digits we want, based on k and ndigits. */ + /* ilim1 is the maximum number of significant digits we want, based on k and ndigits, + when it turns out that k was computed too high by one. */ + fast_failed := false + if ilim >= 0 && ilim <= quick_max && try_quick { + + /* Try to get by with floating-point arithmetic. */ + + i = 0 + d2 = d + k0 := k + ilim0 := ilim + ieps := 2 /* conservative */ + /* Divide d by 10^k, keeping track of the roundoff error and avoiding overflows. */ + if k > 0 { + ds = tens[k&0xf] + j = k >> 4 + if (j & bletch) != 0 { + /* prevent overflows */ + j &= bletch - 1 + d /= bigtens[len(bigtens)-1] + ieps++ + } + for ; j != 0; i++ { + if (j & 1) != 0 { + ieps++ + ds *= bigtens[i] + } + j >>= 1 + } + d /= ds + } else if j1 := -k; j1 != 0 { + d *= tens[j1&0xf] + for j = j1 >> 4; j != 0; i++ { + if (j & 1) != 0 { + ieps++ + d *= bigtens[i] + } + j >>= 1 + } + } + /* Check that k was computed correctly. */ + if k_check && d < 1.0 && ilim > 0 { + if ilim1 <= 0 { + fast_failed = true + } else { + ilim = ilim1 + k-- + d *= 10. + ieps++ + } + } + /* eps bounds the cumulative error. */ + eps := float64(ieps)*d + 7.0 + eps = setWord0(eps, _word0(eps)-(p-1)*exp_msk1) + if ilim == 0 { + d -= 5.0 + if d > eps { + buf = append(buf, '1') + k++ + return buf, k + 1 + } + if d < -eps { + buf = append(buf, '0') + return buf, 1 + } + fast_failed = true + } + if !fast_failed { + fast_failed = true + if leftright { + /* Use Steele & White method of only + * generating digits needed. + */ + eps = 0.5/tens[ilim-1] - eps + for i = 0; ; { + l := int64(d) + d -= float64(l) + buf = append(buf, byte('0'+l)) + if d < eps { + return buf, k + 1 + } + if 1.0-d < eps { + buf, k = bumpUp(buf, k) + return buf, k + 1 + } + i++ + if i >= ilim { + break + } + eps *= 10.0 + d *= 10.0 + } + } else { + /* Generate ilim digits, then fix them up. */ + eps *= tens[ilim-1] + for i = 1; ; i++ { + l := int64(d) + d -= float64(l) + buf = append(buf, byte('0'+l)) + if i == ilim { + if d > 0.5+eps { + buf, k = bumpUp(buf, k) + return buf, k + 1 + } else if d < 0.5-eps { + buf = stripTrailingZeroes(buf, startPos) + return buf, k + 1 + } + break + } + d *= 10.0 + } + } + } + if fast_failed { + buf = buf[:startPos] + d = d2 + k = k0 + ilim = ilim0 + } + } + + /* Do we have a "small" integer? */ + if be >= 0 && k <= int_max { + /* Yes. */ + ds = tens[k] + if ndigits < 0 && ilim <= 0 { + if ilim < 0 || d < 5*ds || (!biasUp && d == 5*ds) { + buf = buf[:startPos] + buf = append(buf, '0') + return buf, 1 + } + buf = append(buf, '1') + k++ + return buf, k + 1 + } + for i = 1; ; i++ { + l := int64(d / ds) + d -= float64(l) * ds + buf = append(buf, byte('0'+l)) + if i == ilim { + d += d + if (d > ds) || (d == ds && (((l & 1) != 0) || biasUp)) { + buf, k = bumpUp(buf, k) + } + break + } + d *= 10.0 + if d == 0 { + break + } + } + return buf, k + 1 + } + + m2 := b2 + m5 := b5 + var mhi, mlo *big.Int + if leftright { + if mode < 2 { + if denorm { + i = be + (bias + (p - 1) - 1 + 1) + } else { + i = 1 + p - bbits + } + /* i is 1 plus the number of trailing zero bits in d's significand. Thus, + (2^m2 * 5^m5) / (2^(s2+i) * 5^s5) = (1/2 lsb of d)/10^k. */ + } else { + j = ilim - 1 + if m5 >= j { + m5 -= j + } else { + j -= m5 + s5 += j + b5 += j + m5 = 0 + } + i = ilim + if i < 0 { + m2 -= i + i = 0 + } + /* (2^m2 * 5^m5) / (2^(s2+i) * 5^s5) = (1/2 * 10^(1-ilim))/10^k. */ + } + b2 += i + s2 += i + mhi = big.NewInt(1) + /* (mhi * 2^m2 * 5^m5) / (2^s2 * 5^s5) = one-half of last printed (when mode >= 2) or + input (when mode < 2) significant digit, divided by 10^k. */ + } + + /* We still have d/10^k = (b * 2^b2 * 5^b5) / (2^s2 * 5^s5). Reduce common factors in + b2, m2, and s2 without changing the equalities. */ + if m2 > 0 && s2 > 0 { + if m2 < s2 { + i = m2 + } else { + i = s2 + } + b2 -= i + m2 -= i + s2 -= i + } + + b := new(big.Int).SetBytes(dblBits) + /* Fold b5 into b and m5 into mhi. */ + if b5 > 0 { + if leftright { + if m5 > 0 { + pow5mult(mhi, m5) + b.Mul(mhi, b) + } + j = b5 - m5 + if j != 0 { + pow5mult(b, j) + } + } else { + pow5mult(b, b5) + } + } + /* Now we have d/10^k = (b * 2^b2) / (2^s2 * 5^s5) and + (mhi * 2^m2) / (2^s2 * 5^s5) = one-half of last printed or input significant digit, divided by 10^k. */ + + S := big.NewInt(1) + if s5 > 0 { + pow5mult(S, s5) + } + /* Now we have d/10^k = (b * 2^b2) / (S * 2^s2) and + (mhi * 2^m2) / (S * 2^s2) = one-half of last printed or input significant digit, divided by 10^k. */ + + /* Check for special case that d is a normalized power of 2. */ + spec_case := false + if mode < 2 { + if (_word1(d) == 0) && ((_word0(d) & bndry_mask) == 0) && + ((_word0(d) & (exp_mask & (exp_mask << 1))) != 0) { + /* The special case. Here we want to be within a quarter of the last input + significant digit instead of one half of it when the decimal output string's value is less than d. */ + b2 += log2P + s2 += log2P + spec_case = true + } + } + + /* Arrange for convenient computation of quotients: + * shift left if necessary so divisor has 4 leading 0 bits. + * + * Perhaps we should just compute leading 28 bits of S once + * and for all and pass them and a shift to quorem, so it + * can do shifts and ors to compute the numerator for q. + */ + var zz int + if s5 != 0 { + S_bytes := S.Bytes() + var S_hiWord uint32 + for idx := 0; idx < 4; idx++ { + S_hiWord = S_hiWord << 8 + if idx < len(S_bytes) { + S_hiWord |= uint32(S_bytes[idx]) + } + } + zz = 32 - hi0bits(S_hiWord) + } else { + zz = 1 + } + i = (zz + s2) & 0x1f + if i != 0 { + i = 32 - i + } + /* i is the number of leading zero bits in the most significant word of S*2^s2. */ + if i > 4 { + i -= 4 + b2 += i + m2 += i + s2 += i + } else if i < 4 { + i += 28 + b2 += i + m2 += i + s2 += i + } + /* Now S*2^s2 has exactly four leading zero bits in its most significant word. */ + if b2 > 0 { + b = b.Lsh(b, uint(b2)) + } + if s2 > 0 { + S.Lsh(S, uint(s2)) + } + /* Now we have d/10^k = b/S and + (mhi * 2^m2) / S = maximum acceptable error, divided by 10^k. */ + if k_check { + if b.Cmp(S) < 0 { + k-- + b.Mul(b, big10) /* we botched the k estimate */ + if leftright { + mhi.Mul(mhi, big10) + } + ilim = ilim1 + } + } + /* At this point 1 <= d/10^k = b/S < 10. */ + + if ilim <= 0 && mode > 2 { + /* We're doing fixed-mode output and d is less than the minimum nonzero output in this mode. + Output either zero or the minimum nonzero output depending on which is closer to d. */ + if ilim >= 0 { + i = b.Cmp(S.Mul(S, big5)) + } + if ilim < 0 || i < 0 || i == 0 && !biasUp { + /* Always emit at least one digit. If the number appears to be zero + using the current mode, then emit one '0' digit and set decpt to 1. */ + buf = buf[:startPos] + buf = append(buf, '0') + return buf, 1 + } + buf = append(buf, '1') + k++ + return buf, k + 1 + } + + var dig byte + if leftright { + if m2 > 0 { + mhi.Lsh(mhi, uint(m2)) + } + + /* Compute mlo -- check for special case + * that d is a normalized power of 2. + */ + + mlo = mhi + if spec_case { + mhi = mlo + mhi = new(big.Int).Lsh(mhi, log2P) + } + /* mlo/S = maximum acceptable error, divided by 10^k, if the output is less than d. */ + /* mhi/S = maximum acceptable error, divided by 10^k, if the output is greater than d. */ + var z, delta big.Int + for i = 1; ; i++ { + z.DivMod(b, S, b) + dig = byte(z.Int64() + '0') + /* Do we yet have the shortest decimal string + * that will round to d? + */ + j = b.Cmp(mlo) + /* j is b/S compared with mlo/S. */ + delta.Sub(S, mhi) + var j1 int + if delta.Sign() <= 0 { + j1 = 1 + } else { + j1 = b.Cmp(&delta) + } + /* j1 is b/S compared with 1 - mhi/S. */ + if (j1 == 0) && (mode == 0) && ((_word1(d) & 1) == 0) { + if dig == '9' { + var flag bool + buf = append(buf, '9') + if buf, flag = roundOff(buf, startPos); flag { + k++ + buf = append(buf, '1') + } + return buf, k + 1 + } + if j > 0 { + dig++ + } + buf = append(buf, dig) + return buf, k + 1 + } + if (j < 0) || ((j == 0) && (mode == 0) && ((_word1(d) & 1) == 0)) { + if j1 > 0 { + /* Either dig or dig+1 would work here as the least significant decimal digit. + Use whichever would produce a decimal value closer to d. */ + b.Lsh(b, 1) + j1 = b.Cmp(S) + if (j1 > 0) || (j1 == 0 && (((dig & 1) == 1) || biasUp)) { + dig++ + if dig == '9' { + buf = append(buf, '9') + buf, flag := roundOff(buf, startPos) + if flag { + k++ + buf = append(buf, '1') + } + return buf, k + 1 + } + } + } + buf = append(buf, dig) + return buf, k + 1 + } + if j1 > 0 { + if dig == '9' { /* possible if i == 1 */ + buf = append(buf, '9') + buf, flag := roundOff(buf, startPos) + if flag { + k++ + buf = append(buf, '1') + } + return buf, k + 1 + } + buf = append(buf, dig+1) + return buf, k + 1 + } + buf = append(buf, dig) + if i == ilim { + break + } + b.Mul(b, big10) + if mlo == mhi { + mhi.Mul(mhi, big10) + } else { + mlo.Mul(mlo, big10) + mhi.Mul(mhi, big10) + } + } + } else { + var z big.Int + for i = 1; ; i++ { + z.DivMod(b, S, b) + dig = byte(z.Int64() + '0') + buf = append(buf, dig) + if i >= ilim { + break + } + + b.Mul(b, big10) + } + } + /* Round off last digit */ + + b.Lsh(b, 1) + j = b.Cmp(S) + if (j > 0) || (j == 0 && (((dig & 1) == 1) || biasUp)) { + var flag bool + buf, flag = roundOff(buf, startPos) + if flag { + k++ + buf = append(buf, '1') + return buf, k + 1 + } + } else { + buf = stripTrailingZeroes(buf, startPos) + } + + return buf, k + 1 +} + +func bumpUp(buf []byte, k int) ([]byte, int) { + var lastCh byte + stop := 0 + if len(buf) > 0 && buf[0] == '-' { + stop = 1 + } + for { + lastCh = buf[len(buf)-1] + buf = buf[:len(buf)-1] + if lastCh != '9' { + break + } + if len(buf) == stop { + k++ + lastCh = '0' + break + } + } + buf = append(buf, lastCh+1) + return buf, k +} + +func setWord0(d float64, w uint32) float64 { + dBits := math.Float64bits(d) + return math.Float64frombits(uint64(w)<<32 | dBits&0xffffffff) +} + +func _word0(d float64) uint32 { + dBits := math.Float64bits(d) + return uint32(dBits >> 32) +} + +func _word1(d float64) uint32 { + dBits := math.Float64bits(d) + return uint32(dBits) +} + +func stripTrailingZeroes(buf []byte, startPos int) []byte { + bl := len(buf) - 1 + for bl >= startPos && buf[bl] == '0' { + bl-- + } + return buf[:bl+1] +} + +/* Set b = b * 5^k. k must be nonnegative. */ +func pow5mult(b *big.Int, k int) *big.Int { + if k < (1 << (len(pow5Cache) + 2)) { + i := k & 3 + if i != 0 { + b.Mul(b, p05[i-1]) + } + k >>= 2 + i = 0 + for { + if k&1 != 0 { + b.Mul(b, pow5Cache[i]) + } + k >>= 1 + if k == 0 { + break + } + i++ + } + return b + } + return b.Mul(b, new(big.Int).Exp(big5, big.NewInt(int64(k)), nil)) +} + +func roundOff(buf []byte, startPos int) ([]byte, bool) { + i := len(buf) + for i != startPos { + i-- + if buf[i] != '9' { + buf[i]++ + return buf[:i+1], false + } + } + return buf[:startPos], true +} + +func init() { + p := big.NewInt(625) + pow5Cache[0] = p + for i := 1; i < len(pow5Cache); i++ { + p = new(big.Int).Mul(p, p) + pow5Cache[i] = p + } +} diff --git a/vendor/github.com/dop251/goja/ftoa/ftobasestr.go b/vendor/github.com/dop251/goja/ftoa/ftobasestr.go new file mode 100644 index 0000000000..9dc9b2d0df --- /dev/null +++ b/vendor/github.com/dop251/goja/ftoa/ftobasestr.go @@ -0,0 +1,153 @@ +package ftoa + +import ( + "fmt" + "math" + "math/big" + "strconv" + "strings" +) + +const ( + digits = "0123456789abcdefghijklmnopqrstuvwxyz" +) + +func FToBaseStr(num float64, radix int) string { + var negative bool + if num < 0 { + num = -num + negative = true + } + + dfloor := math.Floor(num) + ldfloor := int64(dfloor) + var intDigits string + if dfloor == float64(ldfloor) { + if negative { + ldfloor = -ldfloor + } + intDigits = strconv.FormatInt(ldfloor, radix) + } else { + floorBits := math.Float64bits(num) + exp := int(floorBits>>exp_shiftL) & exp_mask_shifted + var mantissa int64 + if exp == 0 { + mantissa = int64((floorBits & frac_maskL) << 1) + } else { + mantissa = int64((floorBits & frac_maskL) | exp_msk1L) + } + + if negative { + mantissa = -mantissa + } + exp -= 1075 + x := big.NewInt(mantissa) + if exp > 0 { + x.Lsh(x, uint(exp)) + } else if exp < 0 { + x.Rsh(x, uint(-exp)) + } + intDigits = x.Text(radix) + } + + if num == dfloor { + // No fraction part + return intDigits + } else { + /* We have a fraction. */ + var buffer strings.Builder + buffer.WriteString(intDigits) + buffer.WriteByte('.') + df := num - dfloor + + dBits := math.Float64bits(num) + word0 := uint32(dBits >> 32) + word1 := uint32(dBits) + + dblBits := make([]byte, 0, 8) + e, _, dblBits := d2b(df, dblBits) + // JS_ASSERT(e < 0); + /* At this point df = b * 2^e. e must be less than zero because 0 < df < 1. */ + + s2 := -int((word0 >> exp_shift1) & (exp_mask >> exp_shift1)) + if s2 == 0 { + s2 = -1 + } + s2 += bias + p + /* 1/2^s2 = (nextDouble(d) - d)/2 */ + // JS_ASSERT(-s2 < e); + if -s2 >= e { + panic(fmt.Errorf("-s2 >= e: %d, %d", -s2, e)) + } + mlo := big.NewInt(1) + mhi := mlo + if (word1 == 0) && ((word0 & bndry_mask) == 0) && ((word0 & (exp_mask & (exp_mask << 1))) != 0) { + /* The special case. Here we want to be within a quarter of the last input + significant digit instead of one half of it when the output string's value is less than d. */ + s2 += log2P + mhi = big.NewInt(1 << log2P) + } + + b := new(big.Int).SetBytes(dblBits) + b.Lsh(b, uint(e+s2)) + s := big.NewInt(1) + s.Lsh(s, uint(s2)) + /* At this point we have the following: + * s = 2^s2; + * 1 > df = b/2^s2 > 0; + * (d - prevDouble(d))/2 = mlo/2^s2; + * (nextDouble(d) - d)/2 = mhi/2^s2. */ + bigBase := big.NewInt(int64(radix)) + + done := false + m := &big.Int{} + delta := &big.Int{} + for !done { + b.Mul(b, bigBase) + b.DivMod(b, s, m) + digit := byte(b.Int64()) + b, m = m, b + mlo.Mul(mlo, bigBase) + if mlo != mhi { + mhi.Mul(mhi, bigBase) + } + + /* Do we yet have the shortest string that will round to d? */ + j := b.Cmp(mlo) + /* j is b/2^s2 compared with mlo/2^s2. */ + + delta.Sub(s, mhi) + var j1 int + if delta.Sign() <= 0 { + j1 = 1 + } else { + j1 = b.Cmp(delta) + } + /* j1 is b/2^s2 compared with 1 - mhi/2^s2. */ + if j1 == 0 && (word1&1) == 0 { + if j > 0 { + digit++ + } + done = true + } else if j < 0 || (j == 0 && ((word1 & 1) == 0)) { + if j1 > 0 { + /* Either dig or dig+1 would work here as the least significant digit. + Use whichever would produce an output value closer to d. */ + b.Lsh(b, 1) + j1 = b.Cmp(s) + if j1 > 0 { /* The even test (|| (j1 == 0 && (digit & 1))) is not here because it messes up odd base output such as 3.5 in base 3. */ + digit++ + } + } + done = true + } else if j1 > 0 { + digit++ + done = true + } + // JS_ASSERT(digit < (uint32)base); + buffer.WriteByte(digits[digit]) + } + + return buffer.String() + } +} diff --git a/vendor/github.com/dop251/goja/ftoa/ftostr.go b/vendor/github.com/dop251/goja/ftoa/ftostr.go new file mode 100644 index 0000000000..a9d2d24094 --- /dev/null +++ b/vendor/github.com/dop251/goja/ftoa/ftostr.go @@ -0,0 +1,147 @@ +package ftoa + +import ( + "math" + "strconv" + + "github.com/dop251/goja/ftoa/internal/fast" +) + +type FToStrMode int + +const ( + // Either fixed or exponential format; round-trip + ModeStandard FToStrMode = iota + // Always exponential format; round-trip + ModeStandardExponential + // Round to digits after the decimal point; exponential if number is large + ModeFixed + // Always exponential format; significant digits + ModeExponential + // Either fixed or exponential format; significant digits + ModePrecision +) + +func insert(b []byte, p int, c byte) []byte { + b = append(b, 0) + copy(b[p+1:], b[p:]) + b[p] = c + return b +} + +func expand(b []byte, delta int) []byte { + newLen := len(b) + delta + if newLen <= cap(b) { + return b[:newLen] + } + b1 := make([]byte, newLen) + copy(b1, b) + return b1 +} + +func FToStr(d float64, mode FToStrMode, precision int, buffer []byte) []byte { + if math.IsNaN(d) { + buffer = append(buffer, "NaN"...) + return buffer + } + if math.IsInf(d, 0) { + if math.Signbit(d) { + buffer = append(buffer, '-') + } + buffer = append(buffer, "Infinity"...) + return buffer + } + + if mode == ModeFixed && (d >= 1e21 || d <= -1e21) { + mode = ModeStandard + } + + var decPt int + var ok bool + startPos := len(buffer) + + if d != 0 { // also matches -0 + if d < 0 { + buffer = append(buffer, '-') + d = -d + startPos++ + } + switch mode { + case ModeStandard, ModeStandardExponential: + buffer, decPt, ok = fast.Dtoa(d, fast.ModeShortest, 0, buffer) + case ModeExponential, ModePrecision: + buffer, decPt, ok = fast.Dtoa(d, fast.ModePrecision, precision, buffer) + } + } else { + buffer = append(buffer, '0') + decPt, ok = 1, true + } + if !ok { + buffer, decPt = ftoa(d, dtoaModes[mode], mode >= ModeFixed, precision, buffer) + } + exponentialNotation := false + minNDigits := 0 /* Minimum number of significand digits required by mode and precision */ + nDigits := len(buffer) - startPos + + switch mode { + case ModeStandard: + if decPt < -5 || decPt > 21 { + exponentialNotation = true + } else { + minNDigits = decPt + } + case ModeFixed: + if precision >= 0 { + minNDigits = decPt + precision + } else { + minNDigits = decPt + } + case ModeExponential: + // JS_ASSERT(precision > 0); + minNDigits = precision + fallthrough + case ModeStandardExponential: + exponentialNotation = true + case ModePrecision: + // JS_ASSERT(precision > 0); + minNDigits = precision + if decPt < -5 || decPt > precision { + exponentialNotation = true + } + } + + for nDigits < minNDigits { + buffer = append(buffer, '0') + nDigits++ + } + + if exponentialNotation { + /* Insert a decimal point if more than one significand digit */ + if nDigits != 1 { + buffer = insert(buffer, startPos+1, '.') + } + buffer = append(buffer, 'e') + if decPt-1 >= 0 { + buffer = append(buffer, '+') + } + buffer = strconv.AppendInt(buffer, int64(decPt-1), 10) + } else if decPt != nDigits { + /* Some kind of a fraction in fixed notation */ + // JS_ASSERT(decPt <= nDigits); + if decPt > 0 { + /* dd...dd . dd...dd */ + buffer = insert(buffer, startPos+decPt, '.') + } else { + /* 0 . 00...00dd...dd */ + buffer = expand(buffer, 2-decPt) + copy(buffer[startPos+2-decPt:], buffer[startPos:]) + buffer[startPos] = '0' + buffer[startPos+1] = '.' + for i := startPos + 2; i < startPos+2-decPt; i++ { + buffer[i] = '0' + } + } + } + + return buffer +} diff --git a/vendor/github.com/dop251/goja/ftoa/internal/fast/LICENSE_V8 b/vendor/github.com/dop251/goja/ftoa/internal/fast/LICENSE_V8 new file mode 100644 index 0000000000..bbad26627e --- /dev/null +++ b/vendor/github.com/dop251/goja/ftoa/internal/fast/LICENSE_V8 @@ -0,0 +1,26 @@ +Copyright 2014, the V8 project authors. All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/dop251/goja/ftoa/internal/fast/cachedpower.go b/vendor/github.com/dop251/goja/ftoa/internal/fast/cachedpower.go new file mode 100644 index 0000000000..4f7e49fc08 --- /dev/null +++ b/vendor/github.com/dop251/goja/ftoa/internal/fast/cachedpower.go @@ -0,0 +1,120 @@ +package fast + +import "math" + +const ( + kCachedPowersOffset = 348 // -1 * the first decimal_exponent. + kD_1_LOG2_10 = 0.30102999566398114 // 1 / lg(10) + kDecimalExponentDistance = 8 +) + +type cachedPower struct { + significand uint64 + binary_exponent int16 + decimal_exponent int16 +} + +var ( + cachedPowers = [...]cachedPower{ + {0xFA8FD5A0081C0288, -1220, -348}, + {0xBAAEE17FA23EBF76, -1193, -340}, + {0x8B16FB203055AC76, -1166, -332}, + {0xCF42894A5DCE35EA, -1140, -324}, + {0x9A6BB0AA55653B2D, -1113, -316}, + {0xE61ACF033D1A45DF, -1087, -308}, + {0xAB70FE17C79AC6CA, -1060, -300}, + {0xFF77B1FCBEBCDC4F, -1034, -292}, + {0xBE5691EF416BD60C, -1007, -284}, + {0x8DD01FAD907FFC3C, -980, -276}, + {0xD3515C2831559A83, -954, -268}, + {0x9D71AC8FADA6C9B5, -927, -260}, + {0xEA9C227723EE8BCB, -901, -252}, + {0xAECC49914078536D, -874, -244}, + {0x823C12795DB6CE57, -847, -236}, + {0xC21094364DFB5637, -821, -228}, + {0x9096EA6F3848984F, -794, -220}, + {0xD77485CB25823AC7, -768, -212}, + {0xA086CFCD97BF97F4, -741, -204}, + {0xEF340A98172AACE5, -715, -196}, + {0xB23867FB2A35B28E, -688, -188}, + {0x84C8D4DFD2C63F3B, -661, -180}, + {0xC5DD44271AD3CDBA, -635, -172}, + {0x936B9FCEBB25C996, -608, -164}, + {0xDBAC6C247D62A584, -582, -156}, + {0xA3AB66580D5FDAF6, -555, -148}, + {0xF3E2F893DEC3F126, -529, -140}, + {0xB5B5ADA8AAFF80B8, -502, -132}, + {0x87625F056C7C4A8B, -475, -124}, + {0xC9BCFF6034C13053, -449, -116}, + {0x964E858C91BA2655, -422, -108}, + {0xDFF9772470297EBD, -396, -100}, + {0xA6DFBD9FB8E5B88F, -369, -92}, + {0xF8A95FCF88747D94, -343, -84}, + {0xB94470938FA89BCF, -316, -76}, + {0x8A08F0F8BF0F156B, -289, -68}, + {0xCDB02555653131B6, -263, -60}, + {0x993FE2C6D07B7FAC, -236, -52}, + {0xE45C10C42A2B3B06, -210, -44}, + {0xAA242499697392D3, -183, -36}, + {0xFD87B5F28300CA0E, -157, -28}, + {0xBCE5086492111AEB, -130, -20}, + {0x8CBCCC096F5088CC, -103, -12}, + {0xD1B71758E219652C, -77, -4}, + {0x9C40000000000000, -50, 4}, + {0xE8D4A51000000000, -24, 12}, + {0xAD78EBC5AC620000, 3, 20}, + {0x813F3978F8940984, 30, 28}, + {0xC097CE7BC90715B3, 56, 36}, + {0x8F7E32CE7BEA5C70, 83, 44}, + {0xD5D238A4ABE98068, 109, 52}, + {0x9F4F2726179A2245, 136, 60}, + {0xED63A231D4C4FB27, 162, 68}, + {0xB0DE65388CC8ADA8, 189, 76}, + {0x83C7088E1AAB65DB, 216, 84}, + {0xC45D1DF942711D9A, 242, 92}, + {0x924D692CA61BE758, 269, 100}, + {0xDA01EE641A708DEA, 295, 108}, + {0xA26DA3999AEF774A, 322, 116}, + {0xF209787BB47D6B85, 348, 124}, + {0xB454E4A179DD1877, 375, 132}, + {0x865B86925B9BC5C2, 402, 140}, + {0xC83553C5C8965D3D, 428, 148}, + {0x952AB45CFA97A0B3, 455, 156}, + {0xDE469FBD99A05FE3, 481, 164}, + {0xA59BC234DB398C25, 508, 172}, + {0xF6C69A72A3989F5C, 534, 180}, + {0xB7DCBF5354E9BECE, 561, 188}, + {0x88FCF317F22241E2, 588, 196}, + {0xCC20CE9BD35C78A5, 614, 204}, + {0x98165AF37B2153DF, 641, 212}, + {0xE2A0B5DC971F303A, 667, 220}, + {0xA8D9D1535CE3B396, 694, 228}, + {0xFB9B7CD9A4A7443C, 720, 236}, + {0xBB764C4CA7A44410, 747, 244}, + {0x8BAB8EEFB6409C1A, 774, 252}, + {0xD01FEF10A657842C, 800, 260}, + {0x9B10A4E5E9913129, 827, 268}, + {0xE7109BFBA19C0C9D, 853, 276}, + {0xAC2820D9623BF429, 880, 284}, + {0x80444B5E7AA7CF85, 907, 292}, + {0xBF21E44003ACDD2D, 933, 300}, + {0x8E679C2F5E44FF8F, 960, 308}, + {0xD433179D9C8CB841, 986, 316}, + {0x9E19DB92B4E31BA9, 1013, 324}, + {0xEB96BF6EBADF77D9, 1039, 332}, + {0xAF87023B9BF0EE6B, 1066, 340}, + } +) + +func getCachedPowerForBinaryExponentRange(min_exponent, max_exponent int) (power diyfp, decimal_exponent int) { + kQ := diyFpKSignificandSize + k := int(math.Ceil(float64(min_exponent+kQ-1) * kD_1_LOG2_10)) + index := (kCachedPowersOffset+k-1)/kDecimalExponentDistance + 1 + cached_power := cachedPowers[index] + _DCHECK(min_exponent <= int(cached_power.binary_exponent)) + _DCHECK(int(cached_power.binary_exponent) <= max_exponent) + decimal_exponent = int(cached_power.decimal_exponent) + power = diyfp{f: cached_power.significand, e: int(cached_power.binary_exponent)} + + return +} diff --git a/vendor/github.com/dop251/goja/ftoa/internal/fast/common.go b/vendor/github.com/dop251/goja/ftoa/internal/fast/common.go new file mode 100644 index 0000000000..6ffaaf92c2 --- /dev/null +++ b/vendor/github.com/dop251/goja/ftoa/internal/fast/common.go @@ -0,0 +1,18 @@ +/* +Package fast contains code ported from V8 (https://github.com/v8/v8/blob/master/src/numbers/fast-dtoa.cc) + +See LICENSE_V8 for the original copyright message and disclaimer. +*/ +package fast + +import "errors" + +var ( + dcheckFailure = errors.New("DCHECK assertion failed") +) + +func _DCHECK(f bool) { + if !f { + panic(dcheckFailure) + } +} diff --git a/vendor/github.com/dop251/goja/ftoa/internal/fast/diyfp.go b/vendor/github.com/dop251/goja/ftoa/internal/fast/diyfp.go new file mode 100644 index 0000000000..727a747223 --- /dev/null +++ b/vendor/github.com/dop251/goja/ftoa/internal/fast/diyfp.go @@ -0,0 +1,152 @@ +package fast + +import "math" + +const ( + diyFpKSignificandSize = 64 + kSignificandSize = 53 + kUint64MSB uint64 = 1 << 63 + + kSignificandMask = 0x000FFFFFFFFFFFFF + kHiddenBit = 0x0010000000000000 + kExponentMask = 0x7FF0000000000000 + + kPhysicalSignificandSize = 52 // Excludes the hidden bit. + kExponentBias = 0x3FF + kPhysicalSignificandSize + kDenormalExponent = -kExponentBias + 1 +) + +type double float64 + +type diyfp struct { + f uint64 + e int +} + +// f =- o. +// The exponents of both numbers must be the same and the significand of this +// must be bigger than the significand of other. +// The result will not be normalized. +func (f *diyfp) subtract(o diyfp) { + _DCHECK(f.e == o.e) + _DCHECK(f.f >= o.f) + f.f -= o.f +} + +// Returns f - o +// The exponents of both numbers must be the same and this must be bigger +// than other. The result will not be normalized. +func (f diyfp) minus(o diyfp) diyfp { + res := f + res.subtract(o) + return res +} + +// f *= o +func (f *diyfp) mul(o diyfp) { + // Simply "emulates" a 128 bit multiplication. + // However: the resulting number only contains 64 bits. The least + // significant 64 bits are only used for rounding the most significant 64 + // bits. + const kM32 uint64 = 0xFFFFFFFF + a := f.f >> 32 + b := f.f & kM32 + c := o.f >> 32 + d := o.f & kM32 + ac := a * c + bc := b * c + ad := a * d + bd := b * d + tmp := (bd >> 32) + (ad & kM32) + (bc & kM32) + // By adding 1U << 31 to tmp we round the final result. + // Halfway cases will be round up. + tmp += 1 << 31 + result_f := ac + (ad >> 32) + (bc >> 32) + (tmp >> 32) + f.e += o.e + 64 + f.f = result_f +} + +// Returns f * o +func (f diyfp) times(o diyfp) diyfp { + res := f + res.mul(o) + return res +} + +func (f *diyfp) _normalize() { + f_, e := f.f, f.e + // This method is mainly called for normalizing boundaries. In general + // boundaries need to be shifted by 10 bits. We thus optimize for this case. + const k10MSBits uint64 = 0x3FF << 54 + for f_&k10MSBits == 0 { + f_ <<= 10 + e -= 10 + } + for f_&kUint64MSB == 0 { + f_ <<= 1 + e-- + } + f.f, f.e = f_, e +} + +func normalizeDiyfp(f diyfp) diyfp { + res := f + res._normalize() + return res +} + +// f must be strictly greater than 0. +func (d double) toNormalizedDiyfp() diyfp { + f, e := d.sigExp() + + // The current float could be a denormal. + for (f & kHiddenBit) == 0 { + f <<= 1 + e-- + } + // Do the final shifts in one go. + f <<= diyFpKSignificandSize - kSignificandSize + e -= diyFpKSignificandSize - kSignificandSize + return diyfp{f, e} +} + +// Returns the two boundaries of this. +// The bigger boundary (m_plus) is normalized. The lower boundary has the same +// exponent as m_plus. +// Precondition: the value encoded by this Double must be greater than 0. +func (d double) normalizedBoundaries() (m_minus, m_plus diyfp) { + v := d.toDiyFp() + significand_is_zero := v.f == kHiddenBit + m_plus = normalizeDiyfp(diyfp{f: (v.f << 1) + 1, e: v.e - 1}) + if significand_is_zero && v.e != kDenormalExponent { + // The boundary is closer. Think of v = 1000e10 and v- = 9999e9. + // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but + // at a distance of 1e8. + // The only exception is for the smallest normal: the largest denormal is + // at the same distance as its successor. + // Note: denormals have the same exponent as the smallest normals. + m_minus = diyfp{f: (v.f << 2) - 1, e: v.e - 2} + } else { + m_minus = diyfp{f: (v.f << 1) - 1, e: v.e - 1} + } + m_minus.f <<= m_minus.e - m_plus.e + m_minus.e = m_plus.e + return +} + +func (d double) toDiyFp() diyfp { + f, e := d.sigExp() + return diyfp{f: f, e: e} +} + +func (d double) sigExp() (significand uint64, exponent int) { + d64 := math.Float64bits(float64(d)) + significand = d64 & kSignificandMask + if d64&kExponentMask != 0 { // not denormal + significand += kHiddenBit + exponent = int((d64&kExponentMask)>>kPhysicalSignificandSize) - kExponentBias + } else { + exponent = kDenormalExponent + } + return +} diff --git a/vendor/github.com/dop251/goja/ftoa/internal/fast/dtoa.go b/vendor/github.com/dop251/goja/ftoa/internal/fast/dtoa.go new file mode 100644 index 0000000000..a82e31e996 --- /dev/null +++ b/vendor/github.com/dop251/goja/ftoa/internal/fast/dtoa.go @@ -0,0 +1,624 @@ +package fast + +import ( + "fmt" + "strconv" +) + +const ( + kMinimalTargetExponent = -60 + kMaximalTargetExponent = -32 + + kTen4 = 10000 + kTen5 = 100000 + kTen6 = 1000000 + kTen7 = 10000000 + kTen8 = 100000000 + kTen9 = 1000000000 +) + +type Mode int + +const ( + ModeShortest Mode = iota + ModePrecision +) + +// Adjusts the last digit of the generated number, and screens out generated +// solutions that may be inaccurate. A solution may be inaccurate if it is +// outside the safe interval, or if we cannot prove that it is closer to the +// input than a neighboring representation of the same length. +// +// Input: * buffer containing the digits of too_high / 10^kappa +// * distance_too_high_w == (too_high - w).f() * unit +// * unsafe_interval == (too_high - too_low).f() * unit +// * rest = (too_high - buffer * 10^kappa).f() * unit +// * ten_kappa = 10^kappa * unit +// * unit = the common multiplier +// Output: returns true if the buffer is guaranteed to contain the closest +// representable number to the input. +// Modifies the generated digits in the buffer to approach (round towards) w. +func roundWeed(buffer []byte, distance_too_high_w, unsafe_interval, rest, ten_kappa, unit uint64) bool { + small_distance := distance_too_high_w - unit + big_distance := distance_too_high_w + unit + + // Let w_low = too_high - big_distance, and + // w_high = too_high - small_distance. + // Note: w_low < w < w_high + // + // The real w (* unit) must lie somewhere inside the interval + // ]w_low; w_high[ (often written as "(w_low; w_high)") + + // Basically the buffer currently contains a number in the unsafe interval + // ]too_low; too_high[ with too_low < w < too_high + // + // too_high - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + // ^v 1 unit ^ ^ ^ ^ + // boundary_high --------------------- . . . . + // ^v 1 unit . . . . + // - - - - - - - - - - - - - - - - - - - + - - + - - - - - - . . + // . . ^ . . + // . big_distance . . . + // . . . . rest + // small_distance . . . . + // v . . . . + // w_high - - - - - - - - - - - - - - - - - - . . . . + // ^v 1 unit . . . . + // w ---------------------------------------- . . . . + // ^v 1 unit v . . . + // w_low - - - - - - - - - - - - - - - - - - - - - . . . + // . . v + // buffer --------------------------------------------------+-------+-------- + // . . + // safe_interval . + // v . + // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - . + // ^v 1 unit . + // boundary_low ------------------------- unsafe_interval + // ^v 1 unit v + // too_low - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + // + // + // Note that the value of buffer could lie anywhere inside the range too_low + // to too_high. + // + // boundary_low, boundary_high and w are approximations of the real boundaries + // and v (the input number). They are guaranteed to be precise up to one unit. + // In fact the error is guaranteed to be strictly less than one unit. + // + // Anything that lies outside the unsafe interval is guaranteed not to round + // to v when read again. + // Anything that lies inside the safe interval is guaranteed to round to v + // when read again. + // If the number inside the buffer lies inside the unsafe interval but not + // inside the safe interval then we simply do not know and bail out (returning + // false). + // + // Similarly we have to take into account the imprecision of 'w' when finding + // the closest representation of 'w'. If we have two potential + // representations, and one is closer to both w_low and w_high, then we know + // it is closer to the actual value v. + // + // By generating the digits of too_high we got the largest (closest to + // too_high) buffer that is still in the unsafe interval. In the case where + // w_high < buffer < too_high we try to decrement the buffer. + // This way the buffer approaches (rounds towards) w. + // There are 3 conditions that stop the decrementation process: + // 1) the buffer is already below w_high + // 2) decrementing the buffer would make it leave the unsafe interval + // 3) decrementing the buffer would yield a number below w_high and farther + // away than the current number. In other words: + // (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high + // Instead of using the buffer directly we use its distance to too_high. + // Conceptually rest ~= too_high - buffer + // We need to do the following tests in this order to avoid over- and + // underflows. + _DCHECK(rest <= unsafe_interval) + for rest < small_distance && // Negated condition 1 + unsafe_interval-rest >= ten_kappa && // Negated condition 2 + (rest+ten_kappa < small_distance || // buffer{-1} > w_high + small_distance-rest >= rest+ten_kappa-small_distance) { + buffer[len(buffer)-1]-- + rest += ten_kappa + } + + // We have approached w+ as much as possible. We now test if approaching w- + // would require changing the buffer. If yes, then we have two possible + // representations close to w, but we cannot decide which one is closer. + if rest < big_distance && unsafe_interval-rest >= ten_kappa && + (rest+ten_kappa < big_distance || + big_distance-rest > rest+ten_kappa-big_distance) { + return false + } + + // Weeding test. + // The safe interval is [too_low + 2 ulp; too_high - 2 ulp] + // Since too_low = too_high - unsafe_interval this is equivalent to + // [too_high - unsafe_interval + 4 ulp; too_high - 2 ulp] + // Conceptually we have: rest ~= too_high - buffer + return (2*unit <= rest) && (rest <= unsafe_interval-4*unit) +} + +// Rounds the buffer upwards if the result is closer to v by possibly adding +// 1 to the buffer. If the precision of the calculation is not sufficient to +// round correctly, return false. +// The rounding might shift the whole buffer in which case the kappa is +// adjusted. For example "99", kappa = 3 might become "10", kappa = 4. +// +// If 2*rest > ten_kappa then the buffer needs to be round up. +// rest can have an error of +/- 1 unit. This function accounts for the +// imprecision and returns false, if the rounding direction cannot be +// unambiguously determined. +// +// Precondition: rest < ten_kappa. +func roundWeedCounted(buffer []byte, rest, ten_kappa, unit uint64, kappa *int) bool { + _DCHECK(rest < ten_kappa) + // The following tests are done in a specific order to avoid overflows. They + // will work correctly with any uint64 values of rest < ten_kappa and unit. + // + // If the unit is too big, then we don't know which way to round. For example + // a unit of 50 means that the real number lies within rest +/- 50. If + // 10^kappa == 40 then there is no way to tell which way to round. + if unit >= ten_kappa { + return false + } + // Even if unit is just half the size of 10^kappa we are already completely + // lost. (And after the previous test we know that the expression will not + // over/underflow.) + if ten_kappa-unit <= unit { + return false + } + // If 2 * (rest + unit) <= 10^kappa we can safely round down. + if (ten_kappa-rest > rest) && (ten_kappa-2*rest >= 2*unit) { + return true + } + + // If 2 * (rest - unit) >= 10^kappa, then we can safely round up. + if (rest > unit) && (ten_kappa-(rest-unit) <= (rest - unit)) { + // Increment the last digit recursively until we find a non '9' digit. + buffer[len(buffer)-1]++ + for i := len(buffer) - 1; i > 0; i-- { + if buffer[i] != '0'+10 { + break + } + buffer[i] = '0' + buffer[i-1]++ + } + // If the first digit is now '0'+ 10 we had a buffer with all '9's. With the + // exception of the first digit all digits are now '0'. Simply switch the + // first digit to '1' and adjust the kappa. Example: "99" becomes "10" and + // the power (the kappa) is increased. + if buffer[0] == '0'+10 { + buffer[0] = '1' + *kappa += 1 + } + return true + } + return false +} + +// Returns the biggest power of ten that is less than or equal than the given +// number. We furthermore receive the maximum number of bits 'number' has. +// If number_bits == 0 then 0^-1 is returned +// The number of bits must be <= 32. +// Precondition: number < (1 << (number_bits + 1)). +func biggestPowerTen(number uint32, number_bits int) (power uint32, exponent int) { + switch number_bits { + case 32, 31, 30: + if kTen9 <= number { + power = kTen9 + exponent = 9 + break + } + fallthrough + case 29, 28, 27: + if kTen8 <= number { + power = kTen8 + exponent = 8 + break + } + fallthrough + case 26, 25, 24: + if kTen7 <= number { + power = kTen7 + exponent = 7 + break + } + fallthrough + case 23, 22, 21, 20: + if kTen6 <= number { + power = kTen6 + exponent = 6 + break + } + fallthrough + case 19, 18, 17: + if kTen5 <= number { + power = kTen5 + exponent = 5 + break + } + fallthrough + case 16, 15, 14: + if kTen4 <= number { + power = kTen4 + exponent = 4 + break + } + fallthrough + case 13, 12, 11, 10: + if 1000 <= number { + power = 1000 + exponent = 3 + break + } + fallthrough + case 9, 8, 7: + if 100 <= number { + power = 100 + exponent = 2 + break + } + fallthrough + case 6, 5, 4: + if 10 <= number { + power = 10 + exponent = 1 + break + } + fallthrough + case 3, 2, 1: + if 1 <= number { + power = 1 + exponent = 0 + break + } + fallthrough + case 0: + power = 0 + exponent = -1 + } + return +} + +// Generates the digits of input number w. +// w is a floating-point number (DiyFp), consisting of a significand and an +// exponent. Its exponent is bounded by kMinimalTargetExponent and +// kMaximalTargetExponent. +// Hence -60 <= w.e() <= -32. +// +// Returns false if it fails, in which case the generated digits in the buffer +// should not be used. +// Preconditions: +// * low, w and high are correct up to 1 ulp (unit in the last place). That +// is, their error must be less than a unit of their last digits. +// * low.e() == w.e() == high.e() +// * low < w < high, and taking into account their error: low~ <= high~ +// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent +// Postconditions: returns false if procedure fails. +// otherwise: +// * buffer is not null-terminated, but len contains the number of digits. +// * buffer contains the shortest possible decimal digit-sequence +// such that LOW < buffer * 10^kappa < HIGH, where LOW and HIGH are the +// correct values of low and high (without their error). +// * if more than one decimal representation gives the minimal number of +// decimal digits then the one closest to W (where W is the correct value +// of w) is chosen. +// Remark: this procedure takes into account the imprecision of its input +// numbers. If the precision is not enough to guarantee all the postconditions +// then false is returned. This usually happens rarely (~0.5%). +// +// Say, for the sake of example, that +// w.e() == -48, and w.f() == 0x1234567890ABCDEF +// w's value can be computed by w.f() * 2^w.e() +// We can obtain w's integral digits by simply shifting w.f() by -w.e(). +// -> w's integral part is 0x1234 +// w's fractional part is therefore 0x567890ABCDEF. +// Printing w's integral part is easy (simply print 0x1234 in decimal). +// In order to print its fraction we repeatedly multiply the fraction by 10 and +// get each digit. Example the first digit after the point would be computed by +// (0x567890ABCDEF * 10) >> 48. -> 3 +// The whole thing becomes slightly more complicated because we want to stop +// once we have enough digits. That is, once the digits inside the buffer +// represent 'w' we can stop. Everything inside the interval low - high +// represents w. However we have to pay attention to low, high and w's +// imprecision. +func digitGen(low, w, high diyfp, buffer []byte) (kappa int, buf []byte, res bool) { + _DCHECK(low.e == w.e && w.e == high.e) + _DCHECK(low.f+1 <= high.f-1) + _DCHECK(kMinimalTargetExponent <= w.e && w.e <= kMaximalTargetExponent) + // low, w and high are imprecise, but by less than one ulp (unit in the last + // place). + // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that + // the new numbers are outside of the interval we want the final + // representation to lie in. + // Inversely adding (resp. removing) 1 ulp from low (resp. high) would yield + // numbers that are certain to lie in the interval. We will use this fact + // later on. + // We will now start by generating the digits within the uncertain + // interval. Later we will weed out representations that lie outside the safe + // interval and thus _might_ lie outside the correct interval. + unit := uint64(1) + too_low := diyfp{f: low.f - unit, e: low.e} + too_high := diyfp{f: high.f + unit, e: high.e} + // too_low and too_high are guaranteed to lie outside the interval we want the + // generated number in. + unsafe_interval := too_high.minus(too_low) + // We now cut the input number into two parts: the integral digits and the + // fractionals. We will not write any decimal separator though, but adapt + // kappa instead. + // Reminder: we are currently computing the digits (stored inside the buffer) + // such that: too_low < buffer * 10^kappa < too_high + // We use too_high for the digit_generation and stop as soon as possible. + // If we stop early we effectively round down. + one := diyfp{f: 1 << -w.e, e: w.e} + // Division by one is a shift. + integrals := uint32(too_high.f >> -one.e) + // Modulo by one is an and. + fractionals := too_high.f & (one.f - 1) + divisor, divisor_exponent := biggestPowerTen(integrals, diyFpKSignificandSize-(-one.e)) + kappa = divisor_exponent + 1 + buf = buffer + for kappa > 0 { + digit := int(integrals / divisor) + buf = append(buf, byte('0'+digit)) + integrals %= divisor + kappa-- + // Note that kappa now equals the exponent of the divisor and that the + // invariant thus holds again. + rest := uint64(integrals)<<-one.e + fractionals + // Invariant: too_high = buffer * 10^kappa + DiyFp(rest, one.e) + // Reminder: unsafe_interval.e == one.e + if rest < unsafe_interval.f { + // Rounding down (by not emitting the remaining digits) yields a number + // that lies within the unsafe interval. + res = roundWeed(buf, too_high.minus(w).f, + unsafe_interval.f, rest, + uint64(divisor)<<-one.e, unit) + return + } + divisor /= 10 + } + // The integrals have been generated. We are at the point of the decimal + // separator. In the following loop we simply multiply the remaining digits by + // 10 and divide by one. We just need to pay attention to multiply associated + // data (like the interval or 'unit'), too. + // Note that the multiplication by 10 does not overflow, because w.e >= -60 + // and thus one.e >= -60. + _DCHECK(one.e >= -60) + _DCHECK(fractionals < one.f) + _DCHECK(0xFFFFFFFFFFFFFFFF/10 >= one.f) + for { + fractionals *= 10 + unit *= 10 + unsafe_interval.f *= 10 + // Integer division by one. + digit := byte(fractionals >> -one.e) + buf = append(buf, '0'+digit) + fractionals &= one.f - 1 // Modulo by one. + kappa-- + if fractionals < unsafe_interval.f { + res = roundWeed(buf, too_high.minus(w).f*unit, unsafe_interval.f, fractionals, one.f, unit) + return + } + } +} + +// Generates (at most) requested_digits of input number w. +// w is a floating-point number (DiyFp), consisting of a significand and an +// exponent. Its exponent is bounded by kMinimalTargetExponent and +// kMaximalTargetExponent. +// Hence -60 <= w.e() <= -32. +// +// Returns false if it fails, in which case the generated digits in the buffer +// should not be used. +// Preconditions: +// * w is correct up to 1 ulp (unit in the last place). That +// is, its error must be strictly less than a unit of its last digit. +// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent +// +// Postconditions: returns false if procedure fails. +// otherwise: +// * buffer is not null-terminated, but length contains the number of +// digits. +// * the representation in buffer is the most precise representation of +// requested_digits digits. +// * buffer contains at most requested_digits digits of w. If there are less +// than requested_digits digits then some trailing '0's have been removed. +// * kappa is such that +// w = buffer * 10^kappa + eps with |eps| < 10^kappa / 2. +// +// Remark: This procedure takes into account the imprecision of its input +// numbers. If the precision is not enough to guarantee all the postconditions +// then false is returned. This usually happens rarely, but the failure-rate +// increases with higher requested_digits. +func digitGenCounted(w diyfp, requested_digits int, buffer []byte) (kappa int, buf []byte, res bool) { + _DCHECK(kMinimalTargetExponent <= w.e && w.e <= kMaximalTargetExponent) + + // w is assumed to have an error less than 1 unit. Whenever w is scaled we + // also scale its error. + w_error := uint64(1) + // We cut the input number into two parts: the integral digits and the + // fractional digits. We don't emit any decimal separator, but adapt kappa + // instead. Example: instead of writing "1.2" we put "12" into the buffer and + // increase kappa by 1. + one := diyfp{f: 1 << -w.e, e: w.e} + // Division by one is a shift. + integrals := uint32(w.f >> -one.e) + // Modulo by one is an and. + fractionals := w.f & (one.f - 1) + divisor, divisor_exponent := biggestPowerTen(integrals, diyFpKSignificandSize-(-one.e)) + kappa = divisor_exponent + 1 + buf = buffer + // Loop invariant: buffer = w / 10^kappa (integer division) + // The invariant holds for the first iteration: kappa has been initialized + // with the divisor exponent + 1. And the divisor is the biggest power of ten + // that is smaller than 'integrals'. + for kappa > 0 { + digit := byte(integrals / divisor) + buf = append(buf, '0'+digit) + requested_digits-- + integrals %= divisor + kappa-- + // Note that kappa now equals the exponent of the divisor and that the + // invariant thus holds again. + if requested_digits == 0 { + break + } + divisor /= 10 + } + + if requested_digits == 0 { + rest := uint64(integrals)<<-one.e + fractionals + res = roundWeedCounted(buf, rest, uint64(divisor)<<-one.e, w_error, &kappa) + return + } + + // The integrals have been generated. We are at the point of the decimal + // separator. In the following loop we simply multiply the remaining digits by + // 10 and divide by one. We just need to pay attention to multiply associated + // data (the 'unit'), too. + // Note that the multiplication by 10 does not overflow, because w.e >= -60 + // and thus one.e >= -60. + _DCHECK(one.e >= -60) + _DCHECK(fractionals < one.f) + _DCHECK(0xFFFFFFFFFFFFFFFF/10 >= one.f) + for requested_digits > 0 && fractionals > w_error { + fractionals *= 10 + w_error *= 10 + // Integer division by one. + digit := byte(fractionals >> -one.e) + buf = append(buf, '0'+digit) + requested_digits-- + fractionals &= one.f - 1 // Modulo by one. + kappa-- + } + if requested_digits != 0 { + res = false + } else { + res = roundWeedCounted(buf, fractionals, one.f, w_error, &kappa) + } + return +} + +// Provides a decimal representation of v. +// Returns true if it succeeds, otherwise the result cannot be trusted. +// There will be *length digits inside the buffer (not null-terminated). +// If the function returns true then +// v == (double) (buffer * 10^decimal_exponent). +// The digits in the buffer are the shortest representation possible: no +// 0.09999999999999999 instead of 0.1. The shorter representation will even be +// chosen even if the longer one would be closer to v. +// The last digit will be closest to the actual v. That is, even if several +// digits might correctly yield 'v' when read again, the closest will be +// computed. +func grisu3(f float64, buffer []byte) (digits []byte, decimal_exponent int, result bool) { + v := double(f) + w := v.toNormalizedDiyfp() + + // boundary_minus and boundary_plus are the boundaries between v and its + // closest floating-point neighbors. Any number strictly between + // boundary_minus and boundary_plus will round to v when convert to a double. + // Grisu3 will never output representations that lie exactly on a boundary. + boundary_minus, boundary_plus := v.normalizedBoundaries() + ten_mk_minimal_binary_exponent := kMinimalTargetExponent - (w.e + diyFpKSignificandSize) + ten_mk_maximal_binary_exponent := kMaximalTargetExponent - (w.e + diyFpKSignificandSize) + ten_mk, mk := getCachedPowerForBinaryExponentRange(ten_mk_minimal_binary_exponent, ten_mk_maximal_binary_exponent) + + _DCHECK( + (kMinimalTargetExponent <= + w.e+ten_mk.e+diyFpKSignificandSize) && + (kMaximalTargetExponent >= w.e+ten_mk.e+diyFpKSignificandSize)) + // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a + // 64 bit significand and ten_mk is thus only precise up to 64 bits. + + // The DiyFp::Times procedure rounds its result, and ten_mk is approximated + // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now + // off by a small amount. + // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w. + // In other words: let f = scaled_w.f() and e = scaled_w.e(), then + // (f-1) * 2^e < w*10^k < (f+1) * 2^e + scaled_w := w.times(ten_mk) + _DCHECK(scaled_w.e == + boundary_plus.e+ten_mk.e+diyFpKSignificandSize) + // In theory it would be possible to avoid some recomputations by computing + // the difference between w and boundary_minus/plus (a power of 2) and to + // compute scaled_boundary_minus/plus by subtracting/adding from + // scaled_w. However the code becomes much less readable and the speed + // enhancements are not terrific. + scaled_boundary_minus := boundary_minus.times(ten_mk) + scaled_boundary_plus := boundary_plus.times(ten_mk) + // DigitGen will generate the digits of scaled_w. Therefore we have + // v == (double) (scaled_w * 10^-mk). + // Set decimal_exponent == -mk and pass it to DigitGen. If scaled_w is not an + // integer than it will be updated. For instance if scaled_w == 1.23 then + // the buffer will be filled with "123" und the decimal_exponent will be + // decreased by 2. + var kappa int + kappa, digits, result = digitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus, buffer) + decimal_exponent = -mk + kappa + return +} + +// The "counted" version of grisu3 (see above) only generates requested_digits +// number of digits. This version does not generate the shortest representation, +// and with enough requested digits 0.1 will at some point print as 0.9999999... +// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and +// therefore the rounding strategy for halfway cases is irrelevant. +func grisu3Counted(v float64, requested_digits int, buffer []byte) (digits []byte, decimal_exponent int, result bool) { + w := double(v).toNormalizedDiyfp() + ten_mk_minimal_binary_exponent := kMinimalTargetExponent - (w.e + diyFpKSignificandSize) + ten_mk_maximal_binary_exponent := kMaximalTargetExponent - (w.e + diyFpKSignificandSize) + ten_mk, mk := getCachedPowerForBinaryExponentRange(ten_mk_minimal_binary_exponent, ten_mk_maximal_binary_exponent) + + _DCHECK( + (kMinimalTargetExponent <= + w.e+ten_mk.e+diyFpKSignificandSize) && + (kMaximalTargetExponent >= w.e+ten_mk.e+diyFpKSignificandSize)) + // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a + // 64 bit significand and ten_mk is thus only precise up to 64 bits. + + // The DiyFp::Times procedure rounds its result, and ten_mk is approximated + // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now + // off by a small amount. + // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w. + // In other words: let f = scaled_w.f() and e = scaled_w.e(), then + // (f-1) * 2^e < w*10^k < (f+1) * 2^e + scaled_w := w.times(ten_mk) + // We now have (double) (scaled_w * 10^-mk). + // DigitGen will generate the first requested_digits digits of scaled_w and + // return together with a kappa such that scaled_w ~= buffer * 10^kappa. (It + // will not always be exactly the same since DigitGenCounted only produces a + // limited number of digits.) + var kappa int + kappa, digits, result = digitGenCounted(scaled_w, requested_digits, buffer) + decimal_exponent = -mk + kappa + + return +} + +// v must be > 0 and must not be Inf or NaN +func Dtoa(v float64, mode Mode, requested_digits int, buffer []byte) (digits []byte, decimal_point int, result bool) { + defer func() { + if x := recover(); x != nil { + if x == dcheckFailure { + panic(fmt.Errorf("DCHECK assertion failed while formatting %s in mode %d", strconv.FormatFloat(v, 'e', 50, 64), mode)) + } + panic(x) + } + }() + var decimal_exponent int + startPos := len(buffer) + switch mode { + case ModeShortest: + digits, decimal_exponent, result = grisu3(v, buffer) + case ModePrecision: + digits, decimal_exponent, result = grisu3Counted(v, requested_digits, buffer) + } + if result { + decimal_point = len(digits) - startPos + decimal_exponent + } else { + digits = digits[:startPos] + } + return +} diff --git a/vendor/github.com/dop251/goja/func.go b/vendor/github.com/dop251/goja/func.go new file mode 100644 index 0000000000..1599d702d5 --- /dev/null +++ b/vendor/github.com/dop251/goja/func.go @@ -0,0 +1,280 @@ +package goja + +import ( + "reflect" + + "github.com/dop251/goja/unistring" +) + +type baseFuncObject struct { + baseObject + + lenProp valueProperty +} + +type funcObject struct { + baseFuncObject + + stash *stash + prg *Program + src string +} + +type nativeFuncObject struct { + baseFuncObject + + f func(FunctionCall) Value + construct func(args []Value, newTarget *Object) *Object +} + +type boundFuncObject struct { + nativeFuncObject + wrapped *Object +} + +func (f *nativeFuncObject) export(*objectExportCtx) interface{} { + return f.f +} + +func (f *nativeFuncObject) exportType() reflect.Type { + return reflect.TypeOf(f.f) +} + +func (f *funcObject) _addProto(n unistring.String) Value { + if n == "prototype" { + if _, exists := f.values[n]; !exists { + return f.addPrototype() + } + } + return nil +} + +func (f *funcObject) getStr(p unistring.String, receiver Value) Value { + return f.getStrWithOwnProp(f.getOwnPropStr(p), p, receiver) +} + +func (f *funcObject) getOwnPropStr(name unistring.String) Value { + if v := f._addProto(name); v != nil { + return v + } + + return f.baseObject.getOwnPropStr(name) +} + +func (f *funcObject) setOwnStr(name unistring.String, val Value, throw bool) bool { + f._addProto(name) + return f.baseObject.setOwnStr(name, val, throw) +} + +func (f *funcObject) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) { + return f._setForeignStr(name, f.getOwnPropStr(name), val, receiver, throw) +} + +func (f *funcObject) deleteStr(name unistring.String, throw bool) bool { + f._addProto(name) + return f.baseObject.deleteStr(name, throw) +} + +func (f *funcObject) addPrototype() Value { + proto := f.val.runtime.NewObject() + proto.self._putProp("constructor", f.val, true, false, true) + return f._putProp("prototype", proto, true, false, false) +} + +func (f *funcObject) hasOwnPropertyStr(name unistring.String) bool { + if r := f.baseObject.hasOwnPropertyStr(name); r { + return true + } + + if name == "prototype" { + return true + } + return false +} + +func (f *funcObject) ownKeys(all bool, accum []Value) []Value { + if all { + if _, exists := f.values["prototype"]; !exists { + accum = append(accum, asciiString("prototype")) + } + } + return f.baseFuncObject.ownKeys(all, accum) +} + +func (f *funcObject) construct(args []Value, newTarget *Object) *Object { + if newTarget == nil { + newTarget = f.val + } + proto := newTarget.self.getStr("prototype", nil) + var protoObj *Object + if p, ok := proto.(*Object); ok { + protoObj = p + } else { + protoObj = f.val.runtime.global.ObjectPrototype + } + + obj := f.val.runtime.newBaseObject(protoObj, classObject).val + ret := f.call(FunctionCall{ + This: obj, + Arguments: args, + }, newTarget) + + if ret, ok := ret.(*Object); ok { + return ret + } + return obj +} + +func (f *funcObject) Call(call FunctionCall) Value { + return f.call(call, nil) +} + +func (f *funcObject) call(call FunctionCall, newTarget Value) Value { + vm := f.val.runtime.vm + pc := vm.pc + + vm.stack.expand(vm.sp + len(call.Arguments) + 1) + vm.stack[vm.sp] = f.val + vm.sp++ + if call.This != nil { + vm.stack[vm.sp] = call.This + } else { + vm.stack[vm.sp] = _undefined + } + vm.sp++ + for _, arg := range call.Arguments { + if arg != nil { + vm.stack[vm.sp] = arg + } else { + vm.stack[vm.sp] = _undefined + } + vm.sp++ + } + + vm.pc = -1 + vm.pushCtx() + vm.args = len(call.Arguments) + vm.prg = f.prg + vm.stash = f.stash + vm.newTarget = newTarget + vm.pc = 0 + vm.run() + vm.pc = pc + vm.halt = false + return vm.pop() +} + +func (f *funcObject) export(*objectExportCtx) interface{} { + return f.Call +} + +func (f *funcObject) exportType() reflect.Type { + return reflect.TypeOf(f.Call) +} + +func (f *funcObject) assertCallable() (func(FunctionCall) Value, bool) { + return f.Call, true +} + +func (f *funcObject) assertConstructor() func(args []Value, newTarget *Object) *Object { + return f.construct +} + +func (f *baseFuncObject) init(name unistring.String, length int) { + f.baseObject.init() + + if name != "" { + f._putProp("name", stringValueFromRaw(name), false, false, true) + } + + f.lenProp.configurable = true + f.lenProp.value = valueInt(length) + f._put("length", &f.lenProp) +} + +func (f *baseFuncObject) hasInstance(v Value) bool { + if v, ok := v.(*Object); ok { + o := f.val.self.getStr("prototype", nil) + if o1, ok := o.(*Object); ok { + for { + v = v.self.proto() + if v == nil { + return false + } + if o1 == v { + return true + } + } + } else { + f.val.runtime.typeErrorResult(true, "prototype is not an object") + } + } + + return false +} + +func (f *nativeFuncObject) defaultConstruct(ccall func(ConstructorCall) *Object, args []Value, newTarget *Object) *Object { + proto := f.getStr("prototype", nil) + var protoObj *Object + if p, ok := proto.(*Object); ok { + protoObj = p + } else { + protoObj = f.val.runtime.global.ObjectPrototype + } + obj := f.val.runtime.newBaseObject(protoObj, classObject).val + ret := ccall(ConstructorCall{ + This: obj, + Arguments: args, + NewTarget: newTarget, + }) + + if ret != nil { + return ret + } + return obj +} + +func (f *nativeFuncObject) assertCallable() (func(FunctionCall) Value, bool) { + if f.f != nil { + return f.f, true + } + return nil, false +} + +func (f *nativeFuncObject) assertConstructor() func(args []Value, newTarget *Object) *Object { + return f.construct +} + +func (f *boundFuncObject) getStr(p unistring.String, receiver Value) Value { + return f.getStrWithOwnProp(f.getOwnPropStr(p), p, receiver) +} + +func (f *boundFuncObject) getOwnPropStr(name unistring.String) Value { + if name == "caller" || name == "arguments" { + return f.val.runtime.global.throwerProperty + } + + return f.nativeFuncObject.getOwnPropStr(name) +} + +func (f *boundFuncObject) deleteStr(name unistring.String, throw bool) bool { + if name == "caller" || name == "arguments" { + return true + } + return f.nativeFuncObject.deleteStr(name, throw) +} + +func (f *boundFuncObject) setOwnStr(name unistring.String, val Value, throw bool) bool { + if name == "caller" || name == "arguments" { + panic(f.val.runtime.NewTypeError("'caller' and 'arguments' are restricted function properties and cannot be accessed in this context.")) + } + return f.nativeFuncObject.setOwnStr(name, val, throw) +} + +func (f *boundFuncObject) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) { + return f._setForeignStr(name, f.getOwnPropStr(name), val, receiver, throw) +} + +func (f *boundFuncObject) hasInstance(v Value) bool { + return instanceOfOperator(v, f.wrapped) +} diff --git a/vendor/github.com/dop251/goja/go.mod b/vendor/github.com/dop251/goja/go.mod new file mode 100644 index 0000000000..64dae78b2c --- /dev/null +++ b/vendor/github.com/dop251/goja/go.mod @@ -0,0 +1,13 @@ +module github.com/dop251/goja + +go 1.14 + +require ( + github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 + github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 + github.com/go-sourcemap/sourcemap v2.1.3+incompatible + github.com/kr/text v0.2.0 // indirect + golang.org/x/text v0.3.6 + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v2 v2.4.0 +) diff --git a/vendor/github.com/dop251/goja/go.sum b/vendor/github.com/dop251/goja/go.sum new file mode 100644 index 0000000000..552aa985d7 --- /dev/null +++ b/vendor/github.com/dop251/goja/go.sum @@ -0,0 +1,21 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 h1:tYwu/z8Y0NkkzGEh3z21mSWggMg4LwLRFucLS7TjARg= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/vendor/github.com/dop251/goja/ipow.go b/vendor/github.com/dop251/goja/ipow.go new file mode 100644 index 0000000000..2462a98cba --- /dev/null +++ b/vendor/github.com/dop251/goja/ipow.go @@ -0,0 +1,97 @@ +package goja + +// ported from https://gist.github.com/orlp/3551590 + +var highest_bit_set = [256]byte{ + 0, 1, 2, 2, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 255, // anything past 63 is a guaranteed overflow with base > 1 + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, +} + +func ipow(base, exp int64) (result int64) { + result = 1 + + switch highest_bit_set[byte(exp)] { + case 255: // we use 255 as an overflow marker and return 0 on overflow/underflow + if base == 1 { + return 1 + } + + if base == -1 { + return 1 - 2*(exp&1) + } + + return 0 + case 6: + if exp&1 != 0 { + result *= base + } + exp >>= 1 + base *= base + fallthrough + case 5: + if exp&1 != 0 { + result *= base + } + exp >>= 1 + base *= base + fallthrough + case 4: + if exp&1 != 0 { + result *= base + } + exp >>= 1 + base *= base + fallthrough + case 3: + if exp&1 != 0 { + result *= base + } + exp >>= 1 + base *= base + fallthrough + case 2: + if exp&1 != 0 { + result *= base + } + exp >>= 1 + base *= base + fallthrough + case 1: + if exp&1 != 0 { + result *= base + } + fallthrough + default: + return result + } +} diff --git a/vendor/github.com/dop251/goja/map.go b/vendor/github.com/dop251/goja/map.go new file mode 100644 index 0000000000..b092b0d049 --- /dev/null +++ b/vendor/github.com/dop251/goja/map.go @@ -0,0 +1,169 @@ +package goja + +import ( + "hash/maphash" +) + +type mapEntry struct { + key, value Value + + iterPrev, iterNext *mapEntry + hNext *mapEntry +} + +type orderedMap struct { + hash *maphash.Hash + hashTable map[uint64]*mapEntry + iterFirst, iterLast *mapEntry + size int +} + +type orderedMapIter struct { + m *orderedMap + cur *mapEntry +} + +func (m *orderedMap) lookup(key Value) (h uint64, entry, hPrev *mapEntry) { + if key == _negativeZero { + key = intToValue(0) + } + h = key.hash(m.hash) + for entry = m.hashTable[h]; entry != nil && !entry.key.SameAs(key); hPrev, entry = entry, entry.hNext { + } + return +} + +func (m *orderedMap) set(key, value Value) { + h, entry, hPrev := m.lookup(key) + if entry != nil { + entry.value = value + } else { + if key == _negativeZero { + key = intToValue(0) + } + entry = &mapEntry{key: key, value: value} + if hPrev == nil { + m.hashTable[h] = entry + } else { + hPrev.hNext = entry + } + if m.iterLast != nil { + entry.iterPrev = m.iterLast + m.iterLast.iterNext = entry + } else { + m.iterFirst = entry + } + m.iterLast = entry + m.size++ + } +} + +func (m *orderedMap) get(key Value) Value { + _, entry, _ := m.lookup(key) + if entry != nil { + return entry.value + } + + return nil +} + +func (m *orderedMap) remove(key Value) bool { + h, entry, hPrev := m.lookup(key) + if entry != nil { + entry.key = nil + entry.value = nil + + // remove from the doubly-linked list + if entry.iterPrev != nil { + entry.iterPrev.iterNext = entry.iterNext + } else { + m.iterFirst = entry.iterNext + } + if entry.iterNext != nil { + entry.iterNext.iterPrev = entry.iterPrev + } else { + m.iterLast = entry.iterPrev + } + + // remove from the hashTable + if hPrev == nil { + if entry.hNext == nil { + delete(m.hashTable, h) + } else { + m.hashTable[h] = entry.hNext + } + } else { + hPrev.hNext = entry.hNext + } + + m.size-- + return true + } + + return false +} + +func (m *orderedMap) has(key Value) bool { + _, entry, _ := m.lookup(key) + return entry != nil +} + +func (iter *orderedMapIter) next() *mapEntry { + if iter.m == nil { + // closed iterator + return nil + } + + cur := iter.cur + // if the current item was deleted, track back to find the latest that wasn't + for cur != nil && cur.key == nil { + cur = cur.iterPrev + } + + if cur != nil { + cur = cur.iterNext + } else { + cur = iter.m.iterFirst + } + + if cur == nil { + iter.close() + } else { + iter.cur = cur + } + + return cur +} + +func (iter *orderedMapIter) close() { + iter.m = nil + iter.cur = nil +} + +func newOrderedMap(h *maphash.Hash) *orderedMap { + return &orderedMap{ + hash: h, + hashTable: make(map[uint64]*mapEntry), + } +} + +func (m *orderedMap) newIter() *orderedMapIter { + iter := &orderedMapIter{ + m: m, + } + return iter +} + +func (m *orderedMap) clear() { + for item := m.iterFirst; item != nil; item = item.iterNext { + item.key = nil + item.value = nil + if item.iterPrev != nil { + item.iterPrev.iterNext = nil + } + } + m.iterFirst = nil + m.iterLast = nil + m.hashTable = make(map[uint64]*mapEntry) + m.size = 0 +} diff --git a/vendor/github.com/dop251/goja/object.go b/vendor/github.com/dop251/goja/object.go new file mode 100644 index 0000000000..812dac44ca --- /dev/null +++ b/vendor/github.com/dop251/goja/object.go @@ -0,0 +1,1552 @@ +package goja + +import ( + "fmt" + "math" + "reflect" + "sort" + + "github.com/dop251/goja/unistring" +) + +const ( + classObject = "Object" + classArray = "Array" + classWeakSet = "WeakSet" + classWeakMap = "WeakMap" + classMap = "Map" + classMath = "Math" + classSet = "Set" + classFunction = "Function" + classNumber = "Number" + classString = "String" + classBoolean = "Boolean" + classError = "Error" + classRegExp = "RegExp" + classDate = "Date" + classJSON = "JSON" + classGlobal = "global" + + classArrayIterator = "Array Iterator" + classMapIterator = "Map Iterator" + classSetIterator = "Set Iterator" + classStringIterator = "String Iterator" + classRegExpStringIterator = "RegExp String Iterator" +) + +var ( + hintDefault Value = asciiString("default") + hintNumber Value = asciiString("number") + hintString Value = asciiString("string") +) + +type Object struct { + id uint64 + runtime *Runtime + self objectImpl + + weakRefs map[weakMap]Value +} + +type iterNextFunc func() (propIterItem, iterNextFunc) + +type PropertyDescriptor struct { + jsDescriptor *Object + + Value Value + + Writable, Configurable, Enumerable Flag + + Getter, Setter Value +} + +func (p *PropertyDescriptor) Empty() bool { + var empty PropertyDescriptor + return *p == empty +} + +func (p *PropertyDescriptor) IsAccessor() bool { + return p.Setter != nil || p.Getter != nil +} + +func (p *PropertyDescriptor) IsData() bool { + return p.Value != nil || p.Writable != FLAG_NOT_SET +} + +func (p *PropertyDescriptor) IsGeneric() bool { + return !p.IsAccessor() && !p.IsData() +} + +func (p *PropertyDescriptor) toValue(r *Runtime) Value { + if p.jsDescriptor != nil { + return p.jsDescriptor + } + if p.Empty() { + return _undefined + } + o := r.NewObject() + s := o.self + + if p.Value != nil { + s._putProp("value", p.Value, true, true, true) + } + + if p.Writable != FLAG_NOT_SET { + s._putProp("writable", valueBool(p.Writable.Bool()), true, true, true) + } + + if p.Enumerable != FLAG_NOT_SET { + s._putProp("enumerable", valueBool(p.Enumerable.Bool()), true, true, true) + } + + if p.Configurable != FLAG_NOT_SET { + s._putProp("configurable", valueBool(p.Configurable.Bool()), true, true, true) + } + + if p.Getter != nil { + s._putProp("get", p.Getter, true, true, true) + } + if p.Setter != nil { + s._putProp("set", p.Setter, true, true, true) + } + + return o +} + +func (p *PropertyDescriptor) complete() { + if p.Getter == nil && p.Setter == nil { + if p.Value == nil { + p.Value = _undefined + } + if p.Writable == FLAG_NOT_SET { + p.Writable = FLAG_FALSE + } + } else { + if p.Getter == nil { + p.Getter = _undefined + } + if p.Setter == nil { + p.Setter = _undefined + } + } + if p.Enumerable == FLAG_NOT_SET { + p.Enumerable = FLAG_FALSE + } + if p.Configurable == FLAG_NOT_SET { + p.Configurable = FLAG_FALSE + } +} + +type objectExportCacheItem map[reflect.Type]interface{} + +type objectExportCtx struct { + cache map[objectImpl]interface{} +} + +type objectImpl interface { + sortable + className() string + getStr(p unistring.String, receiver Value) Value + getIdx(p valueInt, receiver Value) Value + getSym(p *Symbol, receiver Value) Value + + getOwnPropStr(unistring.String) Value + getOwnPropIdx(valueInt) Value + getOwnPropSym(*Symbol) Value + + setOwnStr(p unistring.String, v Value, throw bool) bool + setOwnIdx(p valueInt, v Value, throw bool) bool + setOwnSym(p *Symbol, v Value, throw bool) bool + + setForeignStr(p unistring.String, v, receiver Value, throw bool) (res bool, handled bool) + setForeignIdx(p valueInt, v, receiver Value, throw bool) (res bool, handled bool) + setForeignSym(p *Symbol, v, receiver Value, throw bool) (res bool, handled bool) + + hasPropertyStr(unistring.String) bool + hasPropertyIdx(idx valueInt) bool + hasPropertySym(s *Symbol) bool + + hasOwnPropertyStr(unistring.String) bool + hasOwnPropertyIdx(valueInt) bool + hasOwnPropertySym(s *Symbol) bool + + defineOwnPropertyStr(name unistring.String, desc PropertyDescriptor, throw bool) bool + defineOwnPropertyIdx(name valueInt, desc PropertyDescriptor, throw bool) bool + defineOwnPropertySym(name *Symbol, desc PropertyDescriptor, throw bool) bool + + deleteStr(name unistring.String, throw bool) bool + deleteIdx(idx valueInt, throw bool) bool + deleteSym(s *Symbol, throw bool) bool + + toPrimitiveNumber() Value + toPrimitiveString() Value + toPrimitive() Value + assertCallable() (call func(FunctionCall) Value, ok bool) + assertConstructor() func(args []Value, newTarget *Object) *Object + proto() *Object + setProto(proto *Object, throw bool) bool + hasInstance(v Value) bool + isExtensible() bool + preventExtensions(throw bool) bool + enumerateOwnKeys() iterNextFunc + export(ctx *objectExportCtx) interface{} + exportType() reflect.Type + equal(objectImpl) bool + ownKeys(all bool, accum []Value) []Value + ownSymbols(all bool, accum []Value) []Value + ownPropertyKeys(all bool, accum []Value) []Value + + _putProp(name unistring.String, value Value, writable, enumerable, configurable bool) Value + _putSym(s *Symbol, prop Value) +} + +type baseObject struct { + class string + val *Object + prototype *Object + extensible bool + + values map[unistring.String]Value + propNames []unistring.String + + lastSortedPropLen, idxPropCount int + + symValues *orderedMap +} + +type guardedObject struct { + baseObject + guardedProps map[unistring.String]struct{} +} + +type primitiveValueObject struct { + baseObject + pValue Value +} + +func (o *primitiveValueObject) export(*objectExportCtx) interface{} { + return o.pValue.Export() +} + +func (o *primitiveValueObject) exportType() reflect.Type { + return o.pValue.ExportType() +} + +type FunctionCall struct { + This Value + Arguments []Value +} + +type ConstructorCall struct { + This *Object + Arguments []Value + NewTarget *Object +} + +func (f FunctionCall) Argument(idx int) Value { + if idx < len(f.Arguments) { + return f.Arguments[idx] + } + return _undefined +} + +func (f ConstructorCall) Argument(idx int) Value { + if idx < len(f.Arguments) { + return f.Arguments[idx] + } + return _undefined +} + +func (o *baseObject) init() { + o.values = make(map[unistring.String]Value) +} + +func (o *baseObject) className() string { + return o.class +} + +func (o *baseObject) hasPropertyStr(name unistring.String) bool { + if o.val.self.hasOwnPropertyStr(name) { + return true + } + if o.prototype != nil { + return o.prototype.self.hasPropertyStr(name) + } + return false +} + +func (o *baseObject) hasPropertyIdx(idx valueInt) bool { + return o.val.self.hasPropertyStr(idx.string()) +} + +func (o *baseObject) hasPropertySym(s *Symbol) bool { + if o.hasOwnPropertySym(s) { + return true + } + if o.prototype != nil { + return o.prototype.self.hasPropertySym(s) + } + return false +} + +func (o *baseObject) getWithOwnProp(prop, p, receiver Value) Value { + if prop == nil && o.prototype != nil { + if receiver == nil { + return o.prototype.get(p, o.val) + } + return o.prototype.get(p, receiver) + } + if prop, ok := prop.(*valueProperty); ok { + if receiver == nil { + return prop.get(o.val) + } + return prop.get(receiver) + } + return prop +} + +func (o *baseObject) getStrWithOwnProp(prop Value, name unistring.String, receiver Value) Value { + if prop == nil && o.prototype != nil { + if receiver == nil { + return o.prototype.self.getStr(name, o.val) + } + return o.prototype.self.getStr(name, receiver) + } + if prop, ok := prop.(*valueProperty); ok { + if receiver == nil { + return prop.get(o.val) + } + return prop.get(receiver) + } + return prop +} + +func (o *baseObject) getIdx(idx valueInt, receiver Value) Value { + return o.val.self.getStr(idx.string(), receiver) +} + +func (o *baseObject) getSym(s *Symbol, receiver Value) Value { + return o.getWithOwnProp(o.getOwnPropSym(s), s, receiver) +} + +func (o *baseObject) getStr(name unistring.String, receiver Value) Value { + prop := o.values[name] + if prop == nil { + if o.prototype != nil { + if receiver == nil { + return o.prototype.self.getStr(name, o.val) + } + return o.prototype.self.getStr(name, receiver) + } + } + if prop, ok := prop.(*valueProperty); ok { + if receiver == nil { + return prop.get(o.val) + } + return prop.get(receiver) + } + return prop +} + +func (o *baseObject) getOwnPropIdx(idx valueInt) Value { + return o.val.self.getOwnPropStr(idx.string()) +} + +func (o *baseObject) getOwnPropSym(s *Symbol) Value { + if o.symValues != nil { + return o.symValues.get(s) + } + return nil +} + +func (o *baseObject) getOwnPropStr(name unistring.String) Value { + return o.values[name] +} + +func (o *baseObject) checkDeleteProp(name unistring.String, prop *valueProperty, throw bool) bool { + if !prop.configurable { + o.val.runtime.typeErrorResult(throw, "Cannot delete property '%s' of %s", name, o.val.toString()) + return false + } + return true +} + +func (o *baseObject) checkDelete(name unistring.String, val Value, throw bool) bool { + if val, ok := val.(*valueProperty); ok { + return o.checkDeleteProp(name, val, throw) + } + return true +} + +func (o *baseObject) _delete(name unistring.String) { + delete(o.values, name) + for i, n := range o.propNames { + if n == name { + names := o.propNames + if namesMarkedForCopy(names) { + newNames := make([]unistring.String, len(names)-1, shrinkCap(len(names), cap(names))) + copy(newNames, names[:i]) + copy(newNames[i:], names[i+1:]) + o.propNames = newNames + } else { + copy(names[i:], names[i+1:]) + names[len(names)-1] = "" + o.propNames = names[:len(names)-1] + } + if i < o.lastSortedPropLen { + o.lastSortedPropLen-- + if i < o.idxPropCount { + o.idxPropCount-- + } + } + break + } + } +} + +func (o *baseObject) deleteIdx(idx valueInt, throw bool) bool { + return o.val.self.deleteStr(idx.string(), throw) +} + +func (o *baseObject) deleteSym(s *Symbol, throw bool) bool { + if o.symValues != nil { + if val := o.symValues.get(s); val != nil { + if !o.checkDelete(s.desc.string(), val, throw) { + return false + } + o.symValues.remove(s) + } + } + return true +} + +func (o *baseObject) deleteStr(name unistring.String, throw bool) bool { + if val, exists := o.values[name]; exists { + if !o.checkDelete(name, val, throw) { + return false + } + o._delete(name) + } + return true +} + +func (o *baseObject) setProto(proto *Object, throw bool) bool { + current := o.prototype + if current.SameAs(proto) { + return true + } + if !o.extensible { + o.val.runtime.typeErrorResult(throw, "%s is not extensible", o.val) + return false + } + for p := proto; p != nil; p = p.self.proto() { + if p.SameAs(o.val) { + o.val.runtime.typeErrorResult(throw, "Cyclic __proto__ value") + return false + } + if _, ok := p.self.(*proxyObject); ok { + break + } + } + o.prototype = proto + return true +} + +func (o *baseObject) setOwnStr(name unistring.String, val Value, throw bool) bool { + ownDesc := o.values[name] + if ownDesc == nil { + if proto := o.prototype; proto != nil { + // we know it's foreign because prototype loops are not allowed + if res, handled := proto.self.setForeignStr(name, val, o.val, throw); handled { + return res + } + } + // new property + if !o.extensible { + o.val.runtime.typeErrorResult(throw, "Cannot add property %s, object is not extensible", name) + return false + } else { + o.values[name] = val + names := copyNamesIfNeeded(o.propNames, 1) + o.propNames = append(names, name) + } + return true + } + if prop, ok := ownDesc.(*valueProperty); ok { + if !prop.isWritable() { + o.val.runtime.typeErrorResult(throw, "Cannot assign to read only property '%s'", name) + return false + } else { + prop.set(o.val, val) + } + } else { + o.values[name] = val + } + return true +} + +func (o *baseObject) setOwnIdx(idx valueInt, val Value, throw bool) bool { + return o.val.self.setOwnStr(idx.string(), val, throw) +} + +func (o *baseObject) setOwnSym(name *Symbol, val Value, throw bool) bool { + var ownDesc Value + if o.symValues != nil { + ownDesc = o.symValues.get(name) + } + if ownDesc == nil { + if proto := o.prototype; proto != nil { + // we know it's foreign because prototype loops are not allowed + if res, handled := proto.self.setForeignSym(name, val, o.val, throw); handled { + return res + } + } + // new property + if !o.extensible { + o.val.runtime.typeErrorResult(throw, "Cannot add property %s, object is not extensible", name) + return false + } else { + if o.symValues == nil { + o.symValues = newOrderedMap(nil) + } + o.symValues.set(name, val) + } + return true + } + if prop, ok := ownDesc.(*valueProperty); ok { + if !prop.isWritable() { + o.val.runtime.typeErrorResult(throw, "Cannot assign to read only property '%s'", name) + return false + } else { + prop.set(o.val, val) + } + } else { + o.symValues.set(name, val) + } + return true +} + +func (o *baseObject) _setForeignStr(name unistring.String, prop, val, receiver Value, throw bool) (bool, bool) { + if prop != nil { + if prop, ok := prop.(*valueProperty); ok { + if !prop.isWritable() { + o.val.runtime.typeErrorResult(throw, "Cannot assign to read only property '%s'", name) + return false, true + } + if prop.setterFunc != nil { + prop.set(receiver, val) + return true, true + } + } + } else { + if proto := o.prototype; proto != nil { + if receiver != proto { + return proto.self.setForeignStr(name, val, receiver, throw) + } + return proto.self.setOwnStr(name, val, throw), true + } + } + return false, false +} + +func (o *baseObject) _setForeignIdx(idx valueInt, prop, val, receiver Value, throw bool) (bool, bool) { + if prop != nil { + if prop, ok := prop.(*valueProperty); ok { + if !prop.isWritable() { + o.val.runtime.typeErrorResult(throw, "Cannot assign to read only property '%d'", idx) + return false, true + } + if prop.setterFunc != nil { + prop.set(receiver, val) + return true, true + } + } + } else { + if proto := o.prototype; proto != nil { + if receiver != proto { + return proto.self.setForeignIdx(idx, val, receiver, throw) + } + return proto.self.setOwnIdx(idx, val, throw), true + } + } + return false, false +} + +func (o *baseObject) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) { + return o._setForeignStr(name, o.values[name], val, receiver, throw) +} + +func (o *baseObject) setForeignIdx(name valueInt, val, receiver Value, throw bool) (bool, bool) { + if idx := toIdx(name); idx != math.MaxUint32 { + if o.lastSortedPropLen != len(o.propNames) { + o.fixPropOrder() + } + if o.idxPropCount == 0 { + return o._setForeignIdx(name, name, nil, receiver, throw) + } + } + return o.setForeignStr(name.string(), val, receiver, throw) +} + +func (o *baseObject) setForeignSym(name *Symbol, val, receiver Value, throw bool) (bool, bool) { + var prop Value + if o.symValues != nil { + prop = o.symValues.get(name) + } + if prop != nil { + if prop, ok := prop.(*valueProperty); ok { + if !prop.isWritable() { + o.val.runtime.typeErrorResult(throw, "Cannot assign to read only property '%s'", name) + return false, true + } + if prop.setterFunc != nil { + prop.set(receiver, val) + return true, true + } + } + } else { + if proto := o.prototype; proto != nil { + if receiver != o.val { + return proto.self.setForeignSym(name, val, receiver, throw) + } + return proto.self.setOwnSym(name, val, throw), true + } + } + return false, false +} + +func (o *baseObject) hasOwnPropertySym(s *Symbol) bool { + if o.symValues != nil { + return o.symValues.has(s) + } + return false +} + +func (o *baseObject) hasOwnPropertyStr(name unistring.String) bool { + _, exists := o.values[name] + return exists +} + +func (o *baseObject) hasOwnPropertyIdx(idx valueInt) bool { + return o.val.self.hasOwnPropertyStr(idx.string()) +} + +func (o *baseObject) _defineOwnProperty(name unistring.String, existingValue Value, descr PropertyDescriptor, throw bool) (val Value, ok bool) { + + getterObj, _ := descr.Getter.(*Object) + setterObj, _ := descr.Setter.(*Object) + + var existing *valueProperty + + if existingValue == nil { + if !o.extensible { + o.val.runtime.typeErrorResult(throw, "Cannot define property %s, object is not extensible", name) + return nil, false + } + existing = &valueProperty{} + } else { + if existing, ok = existingValue.(*valueProperty); !ok { + existing = &valueProperty{ + writable: true, + enumerable: true, + configurable: true, + value: existingValue, + } + } + + if !existing.configurable { + if descr.Configurable == FLAG_TRUE { + goto Reject + } + if descr.Enumerable != FLAG_NOT_SET && descr.Enumerable.Bool() != existing.enumerable { + goto Reject + } + } + if existing.accessor && descr.Value != nil || !existing.accessor && (getterObj != nil || setterObj != nil) { + if !existing.configurable { + goto Reject + } + } else if !existing.accessor { + if !existing.configurable { + if !existing.writable { + if descr.Writable == FLAG_TRUE { + goto Reject + } + if descr.Value != nil && !descr.Value.SameAs(existing.value) { + goto Reject + } + } + } + } else { + if !existing.configurable { + if descr.Getter != nil && existing.getterFunc != getterObj || descr.Setter != nil && existing.setterFunc != setterObj { + goto Reject + } + } + } + } + + if descr.Writable == FLAG_TRUE && descr.Enumerable == FLAG_TRUE && descr.Configurable == FLAG_TRUE && descr.Value != nil { + return descr.Value, true + } + + if descr.Writable != FLAG_NOT_SET { + existing.writable = descr.Writable.Bool() + } + if descr.Enumerable != FLAG_NOT_SET { + existing.enumerable = descr.Enumerable.Bool() + } + if descr.Configurable != FLAG_NOT_SET { + existing.configurable = descr.Configurable.Bool() + } + + if descr.Value != nil { + existing.value = descr.Value + existing.getterFunc = nil + existing.setterFunc = nil + } + + if descr.Value != nil || descr.Writable != FLAG_NOT_SET { + existing.accessor = false + } + + if descr.Getter != nil { + existing.getterFunc = propGetter(o.val, descr.Getter, o.val.runtime) + existing.value = nil + existing.accessor = true + } + + if descr.Setter != nil { + existing.setterFunc = propSetter(o.val, descr.Setter, o.val.runtime) + existing.value = nil + existing.accessor = true + } + + if !existing.accessor && existing.value == nil { + existing.value = _undefined + } + + return existing, true + +Reject: + o.val.runtime.typeErrorResult(throw, "Cannot redefine property: %s", name) + return nil, false + +} + +func (o *baseObject) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool { + existingVal := o.values[name] + if v, ok := o._defineOwnProperty(name, existingVal, descr, throw); ok { + o.values[name] = v + if existingVal == nil { + names := copyNamesIfNeeded(o.propNames, 1) + o.propNames = append(names, name) + } + return true + } + return false +} + +func (o *baseObject) defineOwnPropertyIdx(idx valueInt, desc PropertyDescriptor, throw bool) bool { + return o.val.self.defineOwnPropertyStr(idx.string(), desc, throw) +} + +func (o *baseObject) defineOwnPropertySym(s *Symbol, descr PropertyDescriptor, throw bool) bool { + var existingVal Value + if o.symValues != nil { + existingVal = o.symValues.get(s) + } + if v, ok := o._defineOwnProperty(s.desc.string(), existingVal, descr, throw); ok { + if o.symValues == nil { + o.symValues = newOrderedMap(nil) + } + o.symValues.set(s, v) + return true + } + return false +} + +func (o *baseObject) _put(name unistring.String, v Value) { + if _, exists := o.values[name]; !exists { + names := copyNamesIfNeeded(o.propNames, 1) + o.propNames = append(names, name) + } + + o.values[name] = v +} + +func valueProp(value Value, writable, enumerable, configurable bool) Value { + if writable && enumerable && configurable { + return value + } + return &valueProperty{ + value: value, + writable: writable, + enumerable: enumerable, + configurable: configurable, + } +} + +func (o *baseObject) _putProp(name unistring.String, value Value, writable, enumerable, configurable bool) Value { + prop := valueProp(value, writable, enumerable, configurable) + o._put(name, prop) + return prop +} + +func (o *baseObject) _putSym(s *Symbol, prop Value) { + if o.symValues == nil { + o.symValues = newOrderedMap(nil) + } + o.symValues.set(s, prop) +} + +func (o *Object) tryPrimitive(methodName unistring.String) Value { + if method, ok := o.self.getStr(methodName, nil).(*Object); ok { + if call, ok := method.self.assertCallable(); ok { + v := call(FunctionCall{ + This: o, + }) + if _, fail := v.(*Object); !fail { + return v + } + } + } + return nil +} + +func (o *Object) genericToPrimitiveNumber() Value { + if v := o.tryPrimitive("valueOf"); v != nil { + return v + } + + if v := o.tryPrimitive("toString"); v != nil { + return v + } + + panic(o.runtime.NewTypeError("Could not convert %v to primitive", o.self)) +} + +func (o *baseObject) toPrimitiveNumber() Value { + return o.val.genericToPrimitiveNumber() +} + +func (o *Object) genericToPrimitiveString() Value { + if v := o.tryPrimitive("toString"); v != nil { + return v + } + + if v := o.tryPrimitive("valueOf"); v != nil { + return v + } + + panic(o.runtime.NewTypeError("Could not convert %v to primitive", o.self)) +} + +func (o *Object) genericToPrimitive() Value { + return o.genericToPrimitiveNumber() +} + +func (o *baseObject) toPrimitiveString() Value { + return o.val.genericToPrimitiveString() +} + +func (o *baseObject) toPrimitive() Value { + return o.val.genericToPrimitiveNumber() +} + +func (o *Object) tryExoticToPrimitive(hint Value) Value { + exoticToPrimitive := toMethod(o.self.getSym(SymToPrimitive, nil)) + if exoticToPrimitive != nil { + ret := exoticToPrimitive(FunctionCall{ + This: o, + Arguments: []Value{hint}, + }) + if _, fail := ret.(*Object); !fail { + return ret + } + panic(o.runtime.NewTypeError("Cannot convert object to primitive value")) + } + return nil +} + +func (o *Object) toPrimitiveNumber() Value { + if v := o.tryExoticToPrimitive(hintNumber); v != nil { + return v + } + + return o.self.toPrimitiveNumber() +} + +func (o *Object) toPrimitiveString() Value { + if v := o.tryExoticToPrimitive(hintString); v != nil { + return v + } + + return o.self.toPrimitiveString() +} + +func (o *Object) toPrimitive() Value { + if v := o.tryExoticToPrimitive(hintDefault); v != nil { + return v + } + return o.self.toPrimitive() +} + +func (o *baseObject) assertCallable() (func(FunctionCall) Value, bool) { + return nil, false +} + +func (o *baseObject) assertConstructor() func(args []Value, newTarget *Object) *Object { + return nil +} + +func (o *baseObject) proto() *Object { + return o.prototype +} + +func (o *baseObject) isExtensible() bool { + return o.extensible +} + +func (o *baseObject) preventExtensions(bool) bool { + o.extensible = false + return true +} + +func (o *baseObject) sortLen() int64 { + return toLength(o.val.self.getStr("length", nil)) +} + +func (o *baseObject) sortGet(i int64) Value { + return o.val.self.getIdx(valueInt(i), nil) +} + +func (o *baseObject) swap(i, j int64) { + ii := valueInt(i) + jj := valueInt(j) + + x := o.val.self.getIdx(ii, nil) + y := o.val.self.getIdx(jj, nil) + + o.val.self.setOwnIdx(ii, y, false) + o.val.self.setOwnIdx(jj, x, false) +} + +func (o *baseObject) export(ctx *objectExportCtx) interface{} { + if v, exists := ctx.get(o); exists { + return v + } + keys := o.ownKeys(false, nil) + m := make(map[string]interface{}, len(keys)) + ctx.put(o, m) + for _, itemName := range keys { + itemNameStr := itemName.String() + v := o.val.self.getStr(itemName.string(), nil) + if v != nil { + m[itemNameStr] = exportValue(v, ctx) + } else { + m[itemNameStr] = nil + } + } + + return m +} + +func (o *baseObject) exportType() reflect.Type { + return reflectTypeMap +} + +type enumerableFlag int + +const ( + _ENUM_UNKNOWN enumerableFlag = iota + _ENUM_FALSE + _ENUM_TRUE +) + +type propIterItem struct { + name unistring.String + value Value // set only when enumerable == _ENUM_UNKNOWN + enumerable enumerableFlag +} + +type objectPropIter struct { + o *baseObject + propNames []unistring.String + idx int +} + +type recursivePropIter struct { + o objectImpl + cur iterNextFunc + seen map[unistring.String]struct{} +} + +type enumerableIter struct { + wrapped iterNextFunc +} + +func (i *enumerableIter) next() (propIterItem, iterNextFunc) { + for { + var item propIterItem + item, i.wrapped = i.wrapped() + if i.wrapped == nil { + return item, nil + } + if item.enumerable == _ENUM_FALSE { + continue + } + if item.enumerable == _ENUM_UNKNOWN { + if prop, ok := item.value.(*valueProperty); ok { + if !prop.enumerable { + continue + } + } + } + return item, i.next + } +} + +func (i *recursivePropIter) next() (propIterItem, iterNextFunc) { + for { + var item propIterItem + item, i.cur = i.cur() + if i.cur == nil { + if proto := i.o.proto(); proto != nil { + i.cur = proto.self.enumerateOwnKeys() + i.o = proto.self + continue + } + return propIterItem{}, nil + } + if _, exists := i.seen[item.name]; !exists { + i.seen[item.name] = struct{}{} + return item, i.next + } + } +} + +func enumerateRecursive(o *Object) iterNextFunc { + return (&enumerableIter{ + wrapped: (&recursivePropIter{ + o: o.self, + cur: o.self.enumerateOwnKeys(), + seen: make(map[unistring.String]struct{}), + }).next, + }).next +} + +func (i *objectPropIter) next() (propIterItem, iterNextFunc) { + for i.idx < len(i.propNames) { + name := i.propNames[i.idx] + i.idx++ + prop := i.o.values[name] + if prop != nil { + return propIterItem{name: name, value: prop}, i.next + } + } + clearNamesCopyMarker(i.propNames) + return propIterItem{}, nil +} + +var copyMarker = unistring.String(" ") + +// Set a copy-on-write flag so that any subsequent modifications of anything below the current length +// trigger a copy. +// The marker is a special value put at the index position of cap-1. Capacity is set so that the marker is +// beyond the current length (therefore invisible to normal slice operations). +// This function is called before an iteration begins to avoid copying of the names array if +// there are no modifications within the iteration. +// Note that the copying also occurs in two cases: nested iterations (on the same object) and +// iterations after a previously abandoned iteration (because there is currently no mechanism to close an +// iterator). It is still better than copying every time. +func prepareNamesForCopy(names []unistring.String) []unistring.String { + if len(names) == 0 { + return names + } + if namesMarkedForCopy(names) || cap(names) == len(names) { + var newcap int + if cap(names) == len(names) { + newcap = growCap(len(names)+1, len(names), cap(names)) + } else { + newcap = cap(names) + } + newNames := make([]unistring.String, len(names), newcap) + copy(newNames, names) + names = newNames + } + names[cap(names)-1 : cap(names)][0] = copyMarker + return names +} + +func namesMarkedForCopy(names []unistring.String) bool { + return cap(names) > len(names) && names[cap(names)-1 : cap(names)][0] == copyMarker +} + +func clearNamesCopyMarker(names []unistring.String) { + if cap(names) > len(names) { + names[cap(names)-1 : cap(names)][0] = "" + } +} + +func copyNamesIfNeeded(names []unistring.String, extraCap int) []unistring.String { + if namesMarkedForCopy(names) && len(names)+extraCap >= cap(names) { + var newcap int + newsize := len(names) + extraCap + 1 + if newsize > cap(names) { + newcap = growCap(newsize, len(names), cap(names)) + } else { + newcap = cap(names) + } + newNames := make([]unistring.String, len(names), newcap) + copy(newNames, names) + return newNames + } + return names +} + +func (o *baseObject) enumerateOwnKeys() iterNextFunc { + if len(o.propNames) > o.lastSortedPropLen { + o.fixPropOrder() + } + propNames := prepareNamesForCopy(o.propNames) + o.propNames = propNames + return (&objectPropIter{ + o: o, + propNames: propNames, + }).next +} + +func (o *baseObject) equal(objectImpl) bool { + // Rely on parent reference comparison + return false +} + +// Reorder property names so that any integer properties are shifted to the beginning of the list +// in ascending order. This is to conform to https://262.ecma-international.org/#sec-ordinaryownpropertykeys. +// Personally I think this requirement is strange. I can sort of understand where they are coming from, +// this way arrays can be specified just as objects with a 'magic' length property. However, I think +// it's safe to assume most devs don't use Objects to store integer properties. Therefore, performing +// property type checks when adding (and potentially looking up) properties would be unreasonable. +// Instead, we keep insertion order and only change it when (if) the properties get enumerated. +func (o *baseObject) fixPropOrder() { + names := o.propNames + for i := o.lastSortedPropLen; i < len(names); i++ { + name := names[i] + if idx := strToArrayIdx(name); idx != math.MaxUint32 { + k := sort.Search(o.idxPropCount, func(j int) bool { + return strToArrayIdx(names[j]) >= idx + }) + if k < i { + if namesMarkedForCopy(names) { + newNames := make([]unistring.String, len(names), cap(names)) + copy(newNames[:k], names) + copy(newNames[k+1:i+1], names[k:i]) + copy(newNames[i+1:], names[i+1:]) + names = newNames + o.propNames = names + } else { + copy(names[k+1:i+1], names[k:i]) + } + names[k] = name + } + o.idxPropCount++ + } + } + o.lastSortedPropLen = len(names) +} + +func (o *baseObject) ownKeys(all bool, keys []Value) []Value { + if len(o.propNames) > o.lastSortedPropLen { + o.fixPropOrder() + } + if all { + for _, k := range o.propNames { + keys = append(keys, stringValueFromRaw(k)) + } + } else { + for _, k := range o.propNames { + prop := o.values[k] + if prop, ok := prop.(*valueProperty); ok && !prop.enumerable { + continue + } + keys = append(keys, stringValueFromRaw(k)) + } + } + return keys +} + +func (o *baseObject) ownSymbols(all bool, accum []Value) []Value { + if o.symValues != nil { + iter := o.symValues.newIter() + if all { + for { + entry := iter.next() + if entry == nil { + break + } + accum = append(accum, entry.key) + } + } else { + for { + entry := iter.next() + if entry == nil { + break + } + if prop, ok := entry.value.(*valueProperty); ok { + if !prop.enumerable { + continue + } + } + accum = append(accum, entry.key) + } + } + } + + return accum +} + +func (o *baseObject) ownPropertyKeys(all bool, accum []Value) []Value { + return o.ownSymbols(all, o.val.self.ownKeys(all, accum)) +} + +func (o *baseObject) hasInstance(Value) bool { + panic(o.val.runtime.NewTypeError("Expecting a function in instanceof check, but got %s", o.val.toString())) +} + +func toMethod(v Value) func(FunctionCall) Value { + if v == nil || IsUndefined(v) || IsNull(v) { + return nil + } + if obj, ok := v.(*Object); ok { + if call, ok := obj.self.assertCallable(); ok { + return call + } + } + panic(typeError(fmt.Sprintf("%s is not a method", v.String()))) +} + +func instanceOfOperator(o Value, c *Object) bool { + if instOfHandler := toMethod(c.self.getSym(SymHasInstance, c)); instOfHandler != nil { + return instOfHandler(FunctionCall{ + This: c, + Arguments: []Value{o}, + }).ToBoolean() + } + + return c.self.hasInstance(o) +} + +func (o *Object) get(p Value, receiver Value) Value { + switch p := p.(type) { + case valueInt: + return o.self.getIdx(p, receiver) + case *Symbol: + return o.self.getSym(p, receiver) + default: + return o.self.getStr(p.string(), receiver) + } +} + +func (o *Object) getOwnProp(p Value) Value { + switch p := p.(type) { + case valueInt: + return o.self.getOwnPropIdx(p) + case *Symbol: + return o.self.getOwnPropSym(p) + default: + return o.self.getOwnPropStr(p.string()) + } +} + +func (o *Object) hasOwnProperty(p Value) bool { + switch p := p.(type) { + case valueInt: + return o.self.hasOwnPropertyIdx(p) + case *Symbol: + return o.self.hasOwnPropertySym(p) + default: + return o.self.hasOwnPropertyStr(p.string()) + } +} + +func (o *Object) hasProperty(p Value) bool { + switch p := p.(type) { + case valueInt: + return o.self.hasPropertyIdx(p) + case *Symbol: + return o.self.hasPropertySym(p) + default: + return o.self.hasPropertyStr(p.string()) + } +} + +func (o *Object) setStr(name unistring.String, val, receiver Value, throw bool) bool { + if receiver == o { + return o.self.setOwnStr(name, val, throw) + } else { + if res, ok := o.self.setForeignStr(name, val, receiver, throw); !ok { + if robj, ok := receiver.(*Object); ok { + if prop := robj.self.getOwnPropStr(name); prop != nil { + if desc, ok := prop.(*valueProperty); ok { + if desc.accessor { + o.runtime.typeErrorResult(throw, "Receiver property %s is an accessor", name) + return false + } + if !desc.writable { + o.runtime.typeErrorResult(throw, "Cannot assign to read only property '%s'", name) + return false + } + } + robj.self.defineOwnPropertyStr(name, PropertyDescriptor{Value: val}, throw) + } else { + robj.self.defineOwnPropertyStr(name, PropertyDescriptor{ + Value: val, + Writable: FLAG_TRUE, + Configurable: FLAG_TRUE, + Enumerable: FLAG_TRUE, + }, throw) + } + } else { + o.runtime.typeErrorResult(throw, "Receiver is not an object: %v", receiver) + return false + } + } else { + return res + } + } + return true +} + +func (o *Object) set(name Value, val, receiver Value, throw bool) bool { + switch name := name.(type) { + case valueInt: + return o.setIdx(name, val, receiver, throw) + case *Symbol: + return o.setSym(name, val, receiver, throw) + default: + return o.setStr(name.string(), val, receiver, throw) + } +} + +func (o *Object) setOwn(name Value, val Value, throw bool) bool { + switch name := name.(type) { + case valueInt: + return o.self.setOwnIdx(name, val, throw) + case *Symbol: + return o.self.setOwnSym(name, val, throw) + default: + return o.self.setOwnStr(name.string(), val, throw) + } +} + +func (o *Object) setIdx(name valueInt, val, receiver Value, throw bool) bool { + if receiver == o { + return o.self.setOwnIdx(name, val, throw) + } else { + if res, ok := o.self.setForeignIdx(name, val, receiver, throw); !ok { + if robj, ok := receiver.(*Object); ok { + if prop := robj.self.getOwnPropIdx(name); prop != nil { + if desc, ok := prop.(*valueProperty); ok { + if desc.accessor { + o.runtime.typeErrorResult(throw, "Receiver property %s is an accessor", name) + return false + } + if !desc.writable { + o.runtime.typeErrorResult(throw, "Cannot assign to read only property '%s'", name) + return false + } + } + robj.self.defineOwnPropertyIdx(name, PropertyDescriptor{Value: val}, throw) + } else { + robj.self.defineOwnPropertyIdx(name, PropertyDescriptor{ + Value: val, + Writable: FLAG_TRUE, + Configurable: FLAG_TRUE, + Enumerable: FLAG_TRUE, + }, throw) + } + } else { + o.runtime.typeErrorResult(throw, "Receiver is not an object: %v", receiver) + return false + } + } else { + return res + } + } + return true +} + +func (o *Object) setSym(name *Symbol, val, receiver Value, throw bool) bool { + if receiver == o { + return o.self.setOwnSym(name, val, throw) + } else { + if res, ok := o.self.setForeignSym(name, val, receiver, throw); !ok { + if robj, ok := receiver.(*Object); ok { + if prop := robj.self.getOwnPropSym(name); prop != nil { + if desc, ok := prop.(*valueProperty); ok { + if desc.accessor { + o.runtime.typeErrorResult(throw, "Receiver property %s is an accessor", name) + return false + } + if !desc.writable { + o.runtime.typeErrorResult(throw, "Cannot assign to read only property '%s'", name) + return false + } + } + robj.self.defineOwnPropertySym(name, PropertyDescriptor{Value: val}, throw) + } else { + robj.self.defineOwnPropertySym(name, PropertyDescriptor{ + Value: val, + Writable: FLAG_TRUE, + Configurable: FLAG_TRUE, + Enumerable: FLAG_TRUE, + }, throw) + } + } else { + o.runtime.typeErrorResult(throw, "Receiver is not an object: %v", receiver) + return false + } + } else { + return res + } + } + return true +} + +func (o *Object) delete(n Value, throw bool) bool { + switch n := n.(type) { + case valueInt: + return o.self.deleteIdx(n, throw) + case *Symbol: + return o.self.deleteSym(n, throw) + default: + return o.self.deleteStr(n.string(), throw) + } +} + +func (o *Object) defineOwnProperty(n Value, desc PropertyDescriptor, throw bool) bool { + switch n := n.(type) { + case valueInt: + return o.self.defineOwnPropertyIdx(n, desc, throw) + case *Symbol: + return o.self.defineOwnPropertySym(n, desc, throw) + default: + return o.self.defineOwnPropertyStr(n.string(), desc, throw) + } +} + +func (o *Object) getWeakRefs() map[weakMap]Value { + refs := o.weakRefs + if refs == nil { + refs = make(map[weakMap]Value) + o.weakRefs = refs + } + return refs +} + +func (o *Object) getId() uint64 { + id := o.id + if id == 0 { + id = o.runtime.genId() + o.id = id + } + return id +} + +func (o *guardedObject) guard(props ...unistring.String) { + if o.guardedProps == nil { + o.guardedProps = make(map[unistring.String]struct{}) + } + for _, p := range props { + o.guardedProps[p] = struct{}{} + } +} + +func (o *guardedObject) check(p unistring.String) { + if _, exists := o.guardedProps[p]; exists { + o.val.self = &o.baseObject + } +} + +func (o *guardedObject) setOwnStr(p unistring.String, v Value, throw bool) bool { + res := o.baseObject.setOwnStr(p, v, throw) + if res { + o.check(p) + } + return res +} + +func (o *guardedObject) defineOwnPropertyStr(name unistring.String, desc PropertyDescriptor, throw bool) bool { + res := o.baseObject.defineOwnPropertyStr(name, desc, throw) + if res { + o.check(name) + } + return res +} + +func (o *guardedObject) deleteStr(name unistring.String, throw bool) bool { + res := o.baseObject.deleteStr(name, throw) + if res { + o.check(name) + } + return res +} + +func (ctx *objectExportCtx) get(key objectImpl) (interface{}, bool) { + if v, exists := ctx.cache[key]; exists { + if item, ok := v.(objectExportCacheItem); ok { + r, exists := item[key.exportType()] + return r, exists + } else { + return v, true + } + } + return nil, false +} + +func (ctx *objectExportCtx) getTyped(key objectImpl, typ reflect.Type) (interface{}, bool) { + if v, exists := ctx.cache[key]; exists { + if item, ok := v.(objectExportCacheItem); ok { + r, exists := item[typ] + return r, exists + } else { + if reflect.TypeOf(v) == typ { + return v, true + } + } + } + return nil, false +} + +func (ctx *objectExportCtx) put(key objectImpl, value interface{}) { + if ctx.cache == nil { + ctx.cache = make(map[objectImpl]interface{}) + } + if item, ok := ctx.cache[key].(objectExportCacheItem); ok { + item[key.exportType()] = value + } else { + ctx.cache[key] = value + } +} + +func (ctx *objectExportCtx) putTyped(key objectImpl, typ reflect.Type, value interface{}) { + if ctx.cache == nil { + ctx.cache = make(map[objectImpl]interface{}) + } + v, exists := ctx.cache[key] + if exists { + if item, ok := ctx.cache[key].(objectExportCacheItem); ok { + item[typ] = value + } else { + m := make(objectExportCacheItem, 2) + m[key.exportType()] = v + m[typ] = value + ctx.cache[key] = m + } + } else { + m := make(objectExportCacheItem) + m[typ] = value + ctx.cache[key] = m + } +} diff --git a/vendor/github.com/dop251/goja/object_args.go b/vendor/github.com/dop251/goja/object_args.go new file mode 100644 index 0000000000..3cc705853c --- /dev/null +++ b/vendor/github.com/dop251/goja/object_args.go @@ -0,0 +1,139 @@ +package goja + +import "github.com/dop251/goja/unistring" + +type argumentsObject struct { + baseObject + length int +} + +type mappedProperty struct { + valueProperty + v *Value +} + +func (a *argumentsObject) getStr(name unistring.String, receiver Value) Value { + return a.getStrWithOwnProp(a.getOwnPropStr(name), name, receiver) +} + +func (a *argumentsObject) getOwnPropStr(name unistring.String) Value { + if mapped, ok := a.values[name].(*mappedProperty); ok { + if mapped.writable && mapped.enumerable && mapped.configurable { + return *mapped.v + } + return &valueProperty{ + value: *mapped.v, + writable: mapped.writable, + configurable: mapped.configurable, + enumerable: mapped.enumerable, + } + } + + return a.baseObject.getOwnPropStr(name) +} + +func (a *argumentsObject) init() { + a.baseObject.init() + a._putProp("length", intToValue(int64(a.length)), true, false, true) +} + +func (a *argumentsObject) setOwnStr(name unistring.String, val Value, throw bool) bool { + if prop, ok := a.values[name].(*mappedProperty); ok { + if !prop.writable { + a.val.runtime.typeErrorResult(throw, "Property is not writable: %s", name) + return false + } + *prop.v = val + return true + } + return a.baseObject.setOwnStr(name, val, throw) +} + +func (a *argumentsObject) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) { + return a._setForeignStr(name, a.getOwnPropStr(name), val, receiver, throw) +} + +func (a *argumentsObject) deleteStr(name unistring.String, throw bool) bool { + if prop, ok := a.values[name].(*mappedProperty); ok { + if !a.checkDeleteProp(name, &prop.valueProperty, throw) { + return false + } + a._delete(name) + return true + } + + return a.baseObject.deleteStr(name, throw) +} + +type argumentsPropIter struct { + wrapped iterNextFunc +} + +func (i *argumentsPropIter) next() (propIterItem, iterNextFunc) { + var item propIterItem + item, i.wrapped = i.wrapped() + if i.wrapped == nil { + return propIterItem{}, nil + } + if prop, ok := item.value.(*mappedProperty); ok { + item.value = *prop.v + } + return item, i.next +} + +func (a *argumentsObject) enumerateOwnKeys() iterNextFunc { + return (&argumentsPropIter{ + wrapped: a.baseObject.enumerateOwnKeys(), + }).next +} + +func (a *argumentsObject) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool { + if mapped, ok := a.values[name].(*mappedProperty); ok { + existing := &valueProperty{ + configurable: mapped.configurable, + writable: true, + enumerable: mapped.enumerable, + value: *mapped.v, + } + + val, ok := a.baseObject._defineOwnProperty(name, existing, descr, throw) + if !ok { + return false + } + + if prop, ok := val.(*valueProperty); ok { + if !prop.accessor { + *mapped.v = prop.value + } + if prop.accessor || !prop.writable { + a._put(name, prop) + return true + } + mapped.configurable = prop.configurable + mapped.enumerable = prop.enumerable + } else { + *mapped.v = val + mapped.configurable = true + mapped.enumerable = true + } + + return true + } + + return a.baseObject.defineOwnPropertyStr(name, descr, throw) +} + +func (a *argumentsObject) export(ctx *objectExportCtx) interface{} { + if v, exists := ctx.get(a); exists { + return v + } + arr := make([]interface{}, a.length) + ctx.put(a, arr) + for i := range arr { + v := a.getIdx(valueInt(int64(i)), nil) + if v != nil { + arr[i] = exportValue(v, ctx) + } + } + return arr +} diff --git a/vendor/github.com/dop251/goja/object_dynamic.go b/vendor/github.com/dop251/goja/object_dynamic.go new file mode 100644 index 0000000000..6cf97d67f6 --- /dev/null +++ b/vendor/github.com/dop251/goja/object_dynamic.go @@ -0,0 +1,739 @@ +package goja + +import ( + "fmt" + "reflect" + "strconv" + + "github.com/dop251/goja/unistring" +) + +/* +DynamicObject is an interface representing a handler for a dynamic Object. Such an object can be created +using the Runtime.NewDynamicObject() method. + +Note that Runtime.ToValue() does not have any special treatment for DynamicObject. The only way to create +a dynamic object is by using the Runtime.NewDynamicObject() method. This is done deliberately to avoid +silent code breaks when this interface changes. +*/ +type DynamicObject interface { + // Get a property value for the key. May return nil if the property does not exist. + Get(key string) Value + // Set a property value for the key. Return true if success, false otherwise. + Set(key string, val Value) bool + // Has should return true if and only if the property exists. + Has(key string) bool + // Delete the property for the key. Returns true on success (note, that includes missing property). + Delete(key string) bool + // Keys returns a list of all existing property keys. There are no checks for duplicates or to make sure + // that the order conforms to https://262.ecma-international.org/#sec-ordinaryownpropertykeys + Keys() []string +} + +/* +DynamicArray is an interface representing a handler for a dynamic array Object. Such an object can be created +using the Runtime.NewDynamicArray() method. + +Any integer property key or a string property key that can be parsed into an int value (including negative +ones) is treated as an index and passed to the trap methods of the DynamicArray. Note this is different from +the regular ECMAScript arrays which only support positive indexes up to 2^32-1. + +DynamicArray cannot be sparse, i.e. hasOwnProperty(num) will return true for num >= 0 && num < Len(). Deleting +such a property is equivalent to setting it to undefined. Note that this creates a slight peculiarity because +hasOwnProperty() will still return true, even after deletion. + +Note that Runtime.ToValue() does not have any special treatment for DynamicArray. The only way to create +a dynamic array is by using the Runtime.NewDynamicArray() method. This is done deliberately to avoid +silent code breaks when this interface changes. +*/ +type DynamicArray interface { + // Len returns the current array length. + Len() int + // Get an item at index idx. Note that idx may be any integer, negative or beyond the current length. + Get(idx int) Value + // Set an item at index idx. Note that idx may be any integer, negative or beyond the current length. + // The expected behaviour when it's beyond length is that the array's length is increased to accommodate + // the item. All elements in the 'new' section of the array should be zeroed. + Set(idx int, val Value) bool + // SetLen is called when the array's 'length' property is changed. If the length is increased all elements in the + // 'new' section of the array should be zeroed. + SetLen(int) bool +} + +type baseDynamicObject struct { + val *Object + prototype *Object +} + +type dynamicObject struct { + baseDynamicObject + d DynamicObject +} + +type dynamicArray struct { + baseDynamicObject + a DynamicArray +} + +/* +NewDynamicObject creates an Object backed by the provided DynamicObject handler. + +All properties of this Object are Writable, Enumerable and Configurable data properties. Any attempt to define +a property that does not conform to this will fail. + +The Object is always extensible and cannot be made non-extensible. Object.preventExtensions() will fail. + +The Object's prototype is initially set to Object.prototype, but can be changed using regular mechanisms +(Object.SetPrototype() in Go or Object.setPrototypeOf() in JS). + +The Object cannot have own Symbol properties, however its prototype can. If you need an iterator support for +example, you could create a regular object, set Symbol.iterator on that object and then use it as a +prototype. See TestDynamicObjectCustomProto for more details. + +Export() returns the original DynamicObject. + +This mechanism is similar to ECMAScript Proxy, however because all properties are enumerable and the object +is always extensible there is no need for invariant checks which removes the need to have a target object and +makes it a lot more efficient. +*/ +func (r *Runtime) NewDynamicObject(d DynamicObject) *Object { + v := &Object{runtime: r} + o := &dynamicObject{ + d: d, + baseDynamicObject: baseDynamicObject{ + val: v, + prototype: r.global.ObjectPrototype, + }, + } + v.self = o + return v +} + +/* +NewDynamicArray creates an array Object backed by the provided DynamicArray handler. +It is similar to NewDynamicObject, the differences are: + +- the Object is an array (i.e. Array.isArray() will return true and it will have the length property). + +- the prototype will be initially set to Array.prototype. + +- the Object cannot have any own string properties except for the 'length'. +*/ +func (r *Runtime) NewDynamicArray(a DynamicArray) *Object { + v := &Object{runtime: r} + o := &dynamicArray{ + a: a, + baseDynamicObject: baseDynamicObject{ + val: v, + prototype: r.global.ArrayPrototype, + }, + } + v.self = o + return v +} + +func (*dynamicObject) sortLen() int64 { + return 0 +} + +func (*dynamicObject) sortGet(i int64) Value { + return nil +} + +func (*dynamicObject) swap(i int64, i2 int64) { +} + +func (*dynamicObject) className() string { + return classObject +} + +func (o *baseDynamicObject) getParentStr(p unistring.String, receiver Value) Value { + if proto := o.prototype; proto != nil { + if receiver == nil { + return proto.self.getStr(p, o.val) + } + return proto.self.getStr(p, receiver) + } + return nil +} + +func (o *dynamicObject) getStr(p unistring.String, receiver Value) Value { + prop := o.d.Get(p.String()) + if prop == nil { + return o.getParentStr(p, receiver) + } + return prop +} + +func (o *baseDynamicObject) getParentIdx(p valueInt, receiver Value) Value { + if proto := o.prototype; proto != nil { + if receiver == nil { + return proto.self.getIdx(p, o.val) + } + return proto.self.getIdx(p, receiver) + } + return nil +} + +func (o *dynamicObject) getIdx(p valueInt, receiver Value) Value { + prop := o.d.Get(p.String()) + if prop == nil { + return o.getParentIdx(p, receiver) + } + return prop +} + +func (o *baseDynamicObject) getSym(p *Symbol, receiver Value) Value { + if proto := o.prototype; proto != nil { + if receiver == nil { + return proto.self.getSym(p, o.val) + } + return proto.self.getSym(p, receiver) + } + return nil +} + +func (o *dynamicObject) getOwnPropStr(u unistring.String) Value { + return o.d.Get(u.String()) +} + +func (o *dynamicObject) getOwnPropIdx(v valueInt) Value { + return o.d.Get(v.String()) +} + +func (*baseDynamicObject) getOwnPropSym(*Symbol) Value { + return nil +} + +func (o *dynamicObject) _set(prop string, v Value, throw bool) bool { + if o.d.Set(prop, v) { + return true + } + o.val.runtime.typeErrorResult(throw, "'Set' on a dynamic object returned false") + return false +} + +func (o *baseDynamicObject) _setSym(throw bool) { + o.val.runtime.typeErrorResult(throw, "Dynamic objects do not support Symbol properties") +} + +func (o *dynamicObject) setOwnStr(p unistring.String, v Value, throw bool) bool { + prop := p.String() + if !o.d.Has(prop) { + if proto := o.prototype; proto != nil { + // we know it's foreign because prototype loops are not allowed + if res, handled := proto.self.setForeignStr(p, v, o.val, throw); handled { + return res + } + } + } + return o._set(prop, v, throw) +} + +func (o *dynamicObject) setOwnIdx(p valueInt, v Value, throw bool) bool { + prop := p.String() + if !o.d.Has(prop) { + if proto := o.prototype; proto != nil { + // we know it's foreign because prototype loops are not allowed + if res, handled := proto.self.setForeignIdx(p, v, o.val, throw); handled { + return res + } + } + } + return o._set(prop, v, throw) +} + +func (o *baseDynamicObject) setOwnSym(s *Symbol, v Value, throw bool) bool { + if proto := o.prototype; proto != nil { + // we know it's foreign because prototype loops are not allowed + if res, handled := proto.self.setForeignSym(s, v, o.val, throw); handled { + return res + } + } + o._setSym(throw) + return false +} + +func (o *baseDynamicObject) setParentForeignStr(p unistring.String, v, receiver Value, throw bool) (res bool, handled bool) { + if proto := o.prototype; proto != nil { + if receiver != proto { + return proto.self.setForeignStr(p, v, receiver, throw) + } + return proto.self.setOwnStr(p, v, throw), true + } + return false, false +} + +func (o *dynamicObject) setForeignStr(p unistring.String, v, receiver Value, throw bool) (res bool, handled bool) { + prop := p.String() + if !o.d.Has(prop) { + return o.setParentForeignStr(p, v, receiver, throw) + } + return false, false +} + +func (o *baseDynamicObject) setParentForeignIdx(p valueInt, v, receiver Value, throw bool) (res bool, handled bool) { + if proto := o.prototype; proto != nil { + if receiver != proto { + return proto.self.setForeignIdx(p, v, receiver, throw) + } + return proto.self.setOwnIdx(p, v, throw), true + } + return false, false +} + +func (o *dynamicObject) setForeignIdx(p valueInt, v, receiver Value, throw bool) (res bool, handled bool) { + prop := p.String() + if !o.d.Has(prop) { + return o.setParentForeignIdx(p, v, receiver, throw) + } + return false, false +} + +func (o *baseDynamicObject) setForeignSym(p *Symbol, v, receiver Value, throw bool) (res bool, handled bool) { + if proto := o.prototype; proto != nil { + if receiver != proto { + return proto.self.setForeignSym(p, v, receiver, throw) + } + return proto.self.setOwnSym(p, v, throw), true + } + return false, false +} + +func (o *dynamicObject) hasPropertyStr(u unistring.String) bool { + if o.hasOwnPropertyStr(u) { + return true + } + if proto := o.prototype; proto != nil { + return proto.self.hasPropertyStr(u) + } + return false +} + +func (o *dynamicObject) hasPropertyIdx(idx valueInt) bool { + if o.hasOwnPropertyIdx(idx) { + return true + } + if proto := o.prototype; proto != nil { + return proto.self.hasPropertyIdx(idx) + } + return false +} + +func (o *baseDynamicObject) hasPropertySym(s *Symbol) bool { + if proto := o.prototype; proto != nil { + return proto.self.hasPropertySym(s) + } + return false +} + +func (o *dynamicObject) hasOwnPropertyStr(u unistring.String) bool { + return o.d.Has(u.String()) +} + +func (o *dynamicObject) hasOwnPropertyIdx(v valueInt) bool { + return o.d.Has(v.String()) +} + +func (*baseDynamicObject) hasOwnPropertySym(_ *Symbol) bool { + return false +} + +func (o *baseDynamicObject) checkDynamicObjectPropertyDescr(name fmt.Stringer, descr PropertyDescriptor, throw bool) bool { + if descr.Getter != nil || descr.Setter != nil { + o.val.runtime.typeErrorResult(throw, "Dynamic objects do not support accessor properties") + return false + } + if descr.Writable == FLAG_FALSE { + o.val.runtime.typeErrorResult(throw, "Dynamic object field %q cannot be made read-only", name.String()) + return false + } + if descr.Enumerable == FLAG_FALSE { + o.val.runtime.typeErrorResult(throw, "Dynamic object field %q cannot be made non-enumerable", name.String()) + return false + } + if descr.Configurable == FLAG_FALSE { + o.val.runtime.typeErrorResult(throw, "Dynamic object field %q cannot be made non-configurable", name.String()) + return false + } + return true +} + +func (o *dynamicObject) defineOwnPropertyStr(name unistring.String, desc PropertyDescriptor, throw bool) bool { + if o.checkDynamicObjectPropertyDescr(name, desc, throw) { + return o._set(name.String(), desc.Value, throw) + } + return false +} + +func (o *dynamicObject) defineOwnPropertyIdx(name valueInt, desc PropertyDescriptor, throw bool) bool { + if o.checkDynamicObjectPropertyDescr(name, desc, throw) { + return o._set(name.String(), desc.Value, throw) + } + return false +} + +func (o *baseDynamicObject) defineOwnPropertySym(name *Symbol, desc PropertyDescriptor, throw bool) bool { + o._setSym(throw) + return false +} + +func (o *dynamicObject) _delete(prop string, throw bool) bool { + if o.d.Delete(prop) { + return true + } + o.val.runtime.typeErrorResult(throw, "Could not delete property %q of a dynamic object", prop) + return false +} + +func (o *dynamicObject) deleteStr(name unistring.String, throw bool) bool { + return o._delete(name.String(), throw) +} + +func (o *dynamicObject) deleteIdx(idx valueInt, throw bool) bool { + return o._delete(idx.String(), throw) +} + +func (*baseDynamicObject) deleteSym(_ *Symbol, _ bool) bool { + return true +} + +func (o *baseDynamicObject) toPrimitiveNumber() Value { + return o.val.genericToPrimitiveNumber() +} + +func (o *baseDynamicObject) toPrimitiveString() Value { + return o.val.genericToPrimitiveString() +} + +func (o *baseDynamicObject) toPrimitive() Value { + return o.val.genericToPrimitive() +} + +func (o *baseDynamicObject) assertCallable() (call func(FunctionCall) Value, ok bool) { + return nil, false +} + +func (*baseDynamicObject) assertConstructor() func(args []Value, newTarget *Object) *Object { + return nil +} + +func (o *baseDynamicObject) proto() *Object { + return o.prototype +} + +func (o *baseDynamicObject) setProto(proto *Object, throw bool) bool { + o.prototype = proto + return true +} + +func (o *baseDynamicObject) hasInstance(v Value) bool { + panic(o.val.runtime.NewTypeError("Expecting a function in instanceof check, but got a dynamic object")) +} + +func (*baseDynamicObject) isExtensible() bool { + return true +} + +func (o *baseDynamicObject) preventExtensions(throw bool) bool { + o.val.runtime.typeErrorResult(throw, "Cannot make a dynamic object non-extensible") + return false +} + +type dynamicObjectPropIter struct { + o *dynamicObject + propNames []string + idx int +} + +func (i *dynamicObjectPropIter) next() (propIterItem, iterNextFunc) { + for i.idx < len(i.propNames) { + name := i.propNames[i.idx] + i.idx++ + if i.o.d.Has(name) { + return propIterItem{name: unistring.NewFromString(name), enumerable: _ENUM_TRUE}, i.next + } + } + return propIterItem{}, nil +} + +func (o *dynamicObject) enumerateOwnKeys() iterNextFunc { + keys := o.d.Keys() + return (&dynamicObjectPropIter{ + o: o, + propNames: keys, + }).next +} + +func (o *dynamicObject) export(ctx *objectExportCtx) interface{} { + return o.d +} + +func (o *dynamicObject) exportType() reflect.Type { + return reflect.TypeOf(o.d) +} + +func (o *dynamicObject) equal(impl objectImpl) bool { + if other, ok := impl.(*dynamicObject); ok { + return o.d == other.d + } + return false +} + +func (o *dynamicObject) ownKeys(all bool, accum []Value) []Value { + keys := o.d.Keys() + if l := len(accum) + len(keys); l > cap(accum) { + oldAccum := accum + accum = make([]Value, len(accum), l) + copy(accum, oldAccum) + } + for _, key := range keys { + accum = append(accum, newStringValue(key)) + } + return accum +} + +func (*baseDynamicObject) ownSymbols(all bool, accum []Value) []Value { + return accum +} + +func (o *dynamicObject) ownPropertyKeys(all bool, accum []Value) []Value { + return o.ownKeys(all, accum) +} + +func (*baseDynamicObject) _putProp(name unistring.String, value Value, writable, enumerable, configurable bool) Value { + return nil +} + +func (*baseDynamicObject) _putSym(s *Symbol, prop Value) { +} + +func (a *dynamicArray) sortLen() int64 { + return int64(a.a.Len()) +} + +func (a *dynamicArray) sortGet(i int64) Value { + return a.a.Get(int(i)) +} + +func (a *dynamicArray) swap(i int64, j int64) { + x := a.sortGet(i) + y := a.sortGet(j) + a.a.Set(int(i), y) + a.a.Set(int(j), x) +} + +func (a *dynamicArray) className() string { + return classArray +} + +func (a *dynamicArray) getStr(p unistring.String, receiver Value) Value { + if p == "length" { + return intToValue(int64(a.a.Len())) + } + if idx, ok := strToInt(p); ok { + return a.a.Get(idx) + } + return a.getParentStr(p, receiver) +} + +func (a *dynamicArray) getIdx(p valueInt, receiver Value) Value { + if val := a.getOwnPropIdx(p); val != nil { + return val + } + return a.getParentIdx(p, receiver) +} + +func (a *dynamicArray) getOwnPropStr(u unistring.String) Value { + if u == "length" { + return &valueProperty{ + value: intToValue(int64(a.a.Len())), + writable: true, + } + } + if idx, ok := strToInt(u); ok { + return a.a.Get(idx) + } + return nil +} + +func (a *dynamicArray) getOwnPropIdx(v valueInt) Value { + return a.a.Get(toIntStrict(int64(v))) +} + +func (a *dynamicArray) _setLen(v Value, throw bool) bool { + if a.a.SetLen(toIntStrict(v.ToInteger())) { + return true + } + a.val.runtime.typeErrorResult(throw, "'SetLen' on a dynamic array returned false") + return false +} + +func (a *dynamicArray) setOwnStr(p unistring.String, v Value, throw bool) bool { + if p == "length" { + return a._setLen(v, throw) + } + if idx, ok := strToInt(p); ok { + return a._setIdx(idx, v, throw) + } + a.val.runtime.typeErrorResult(throw, "Cannot set property %q on a dynamic array", p.String()) + return false +} + +func (a *dynamicArray) _setIdx(idx int, v Value, throw bool) bool { + if a.a.Set(idx, v) { + return true + } + a.val.runtime.typeErrorResult(throw, "'Set' on a dynamic array returned false") + return false +} + +func (a *dynamicArray) setOwnIdx(p valueInt, v Value, throw bool) bool { + return a._setIdx(toIntStrict(int64(p)), v, throw) +} + +func (a *dynamicArray) setForeignStr(p unistring.String, v, receiver Value, throw bool) (res bool, handled bool) { + return a.setParentForeignStr(p, v, receiver, throw) +} + +func (a *dynamicArray) setForeignIdx(p valueInt, v, receiver Value, throw bool) (res bool, handled bool) { + return a.setParentForeignIdx(p, v, receiver, throw) +} + +func (a *dynamicArray) hasPropertyStr(u unistring.String) bool { + if a.hasOwnPropertyStr(u) { + return true + } + if proto := a.prototype; proto != nil { + return proto.self.hasPropertyStr(u) + } + return false +} + +func (a *dynamicArray) hasPropertyIdx(idx valueInt) bool { + if a.hasOwnPropertyIdx(idx) { + return true + } + if proto := a.prototype; proto != nil { + return proto.self.hasPropertyIdx(idx) + } + return false +} + +func (a *dynamicArray) _has(idx int) bool { + return idx >= 0 && idx < a.a.Len() +} + +func (a *dynamicArray) hasOwnPropertyStr(u unistring.String) bool { + if u == "length" { + return true + } + if idx, ok := strToInt(u); ok { + return a._has(idx) + } + return false +} + +func (a *dynamicArray) hasOwnPropertyIdx(v valueInt) bool { + return a._has(toIntStrict(int64(v))) +} + +func (a *dynamicArray) defineOwnPropertyStr(name unistring.String, desc PropertyDescriptor, throw bool) bool { + if a.checkDynamicObjectPropertyDescr(name, desc, throw) { + if idx, ok := strToInt(name); ok { + return a._setIdx(idx, desc.Value, throw) + } + a.val.runtime.typeErrorResult(throw, "Cannot define property %q on a dynamic array", name.String()) + } + return false +} + +func (a *dynamicArray) defineOwnPropertyIdx(name valueInt, desc PropertyDescriptor, throw bool) bool { + if a.checkDynamicObjectPropertyDescr(name, desc, throw) { + return a._setIdx(toIntStrict(int64(name)), desc.Value, throw) + } + return false +} + +func (a *dynamicArray) _delete(idx int, throw bool) bool { + if a._has(idx) { + a._setIdx(idx, _undefined, throw) + } + return true +} + +func (a *dynamicArray) deleteStr(name unistring.String, throw bool) bool { + if idx, ok := strToInt(name); ok { + return a._delete(idx, throw) + } + if a.hasOwnPropertyStr(name) { + a.val.runtime.typeErrorResult(throw, "Cannot delete property %q on a dynamic array", name.String()) + return false + } + return true +} + +func (a *dynamicArray) deleteIdx(idx valueInt, throw bool) bool { + return a._delete(toIntStrict(int64(idx)), throw) +} + +type dynArrayPropIter struct { + a DynamicArray + idx, limit int +} + +func (i *dynArrayPropIter) next() (propIterItem, iterNextFunc) { + if i.idx < i.limit && i.idx < i.a.Len() { + name := strconv.Itoa(i.idx) + i.idx++ + return propIterItem{name: unistring.String(name), enumerable: _ENUM_TRUE}, i.next + } + + return propIterItem{}, nil +} + +func (a *dynamicArray) enumerateOwnKeys() iterNextFunc { + return (&dynArrayPropIter{ + a: a.a, + limit: a.a.Len(), + }).next +} + +func (a *dynamicArray) export(ctx *objectExportCtx) interface{} { + return a.a +} + +func (a *dynamicArray) exportType() reflect.Type { + return reflect.TypeOf(a.a) +} + +func (a *dynamicArray) equal(impl objectImpl) bool { + if other, ok := impl.(*dynamicArray); ok { + return a == other + } + return false +} + +func (a *dynamicArray) ownKeys(all bool, accum []Value) []Value { + al := a.a.Len() + l := len(accum) + al + if all { + l++ + } + if l > cap(accum) { + oldAccum := accum + accum = make([]Value, len(oldAccum), l) + copy(accum, oldAccum) + } + for i := 0; i < al; i++ { + accum = append(accum, asciiString(strconv.Itoa(i))) + } + if all { + accum = append(accum, asciiString("length")) + } + return accum +} + +func (a *dynamicArray) ownPropertyKeys(all bool, accum []Value) []Value { + return a.ownKeys(all, accum) +} diff --git a/vendor/github.com/dop251/goja/object_gomap.go b/vendor/github.com/dop251/goja/object_gomap.go new file mode 100644 index 0000000000..b578b768b1 --- /dev/null +++ b/vendor/github.com/dop251/goja/object_gomap.go @@ -0,0 +1,176 @@ +package goja + +import ( + "reflect" + + "github.com/dop251/goja/unistring" +) + +type objectGoMapSimple struct { + baseObject + data map[string]interface{} +} + +func (o *objectGoMapSimple) init() { + o.baseObject.init() + o.prototype = o.val.runtime.global.ObjectPrototype + o.class = classObject + o.extensible = true +} + +func (o *objectGoMapSimple) _getStr(name string) Value { + v, exists := o.data[name] + if !exists { + return nil + } + return o.val.runtime.ToValue(v) +} + +func (o *objectGoMapSimple) getStr(name unistring.String, receiver Value) Value { + if v := o._getStr(name.String()); v != nil { + return v + } + return o.baseObject.getStr(name, receiver) +} + +func (o *objectGoMapSimple) getOwnPropStr(name unistring.String) Value { + if v := o._getStr(name.String()); v != nil { + return v + } + return nil +} + +func (o *objectGoMapSimple) setOwnStr(name unistring.String, val Value, throw bool) bool { + n := name.String() + if _, exists := o.data[n]; exists { + o.data[n] = val.Export() + return true + } + if proto := o.prototype; proto != nil { + // we know it's foreign because prototype loops are not allowed + if res, ok := proto.self.setForeignStr(name, val, o.val, throw); ok { + return res + } + } + // new property + if !o.extensible { + o.val.runtime.typeErrorResult(throw, "Cannot add property %s, object is not extensible", name) + return false + } else { + o.data[n] = val.Export() + } + return true +} + +func trueValIfPresent(present bool) Value { + if present { + return valueTrue + } + return nil +} + +func (o *objectGoMapSimple) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) { + return o._setForeignStr(name, trueValIfPresent(o._hasStr(name.String())), val, receiver, throw) +} + +func (o *objectGoMapSimple) _hasStr(name string) bool { + _, exists := o.data[name] + return exists +} + +func (o *objectGoMapSimple) hasOwnPropertyStr(name unistring.String) bool { + return o._hasStr(name.String()) +} + +func (o *objectGoMapSimple) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool { + if !o.val.runtime.checkHostObjectPropertyDescr(name, descr, throw) { + return false + } + + n := name.String() + if o.extensible || o._hasStr(n) { + o.data[n] = descr.Value.Export() + return true + } + + o.val.runtime.typeErrorResult(throw, "Cannot define property %s, object is not extensible", n) + return false +} + +/* +func (o *objectGoMapSimple) toPrimitiveNumber() Value { + return o.toPrimitiveString() +} + +func (o *objectGoMapSimple) toPrimitiveString() Value { + return stringObjectObject +} + +func (o *objectGoMapSimple) toPrimitive() Value { + return o.toPrimitiveString() +} + +func (o *objectGoMapSimple) assertCallable() (call func(FunctionCall) Value, ok bool) { + return nil, false +} +*/ + +func (o *objectGoMapSimple) deleteStr(name unistring.String, _ bool) bool { + delete(o.data, name.String()) + return true +} + +type gomapPropIter struct { + o *objectGoMapSimple + propNames []string + idx int +} + +func (i *gomapPropIter) next() (propIterItem, iterNextFunc) { + for i.idx < len(i.propNames) { + name := i.propNames[i.idx] + i.idx++ + if _, exists := i.o.data[name]; exists { + return propIterItem{name: unistring.NewFromString(name), enumerable: _ENUM_TRUE}, i.next + } + } + + return propIterItem{}, nil +} + +func (o *objectGoMapSimple) enumerateOwnKeys() iterNextFunc { + propNames := make([]string, len(o.data)) + i := 0 + for key := range o.data { + propNames[i] = key + i++ + } + + return (&gomapPropIter{ + o: o, + propNames: propNames, + }).next +} + +func (o *objectGoMapSimple) ownKeys(_ bool, accum []Value) []Value { + // all own keys are enumerable + for key := range o.data { + accum = append(accum, newStringValue(key)) + } + return accum +} + +func (o *objectGoMapSimple) export(*objectExportCtx) interface{} { + return o.data +} + +func (o *objectGoMapSimple) exportType() reflect.Type { + return reflectTypeMap +} + +func (o *objectGoMapSimple) equal(other objectImpl) bool { + if other, ok := other.(*objectGoMapSimple); ok { + return o == other + } + return false +} diff --git a/vendor/github.com/dop251/goja/object_gomap_reflect.go b/vendor/github.com/dop251/goja/object_gomap_reflect.go new file mode 100644 index 0000000000..29f9dfb92b --- /dev/null +++ b/vendor/github.com/dop251/goja/object_gomap_reflect.go @@ -0,0 +1,273 @@ +package goja + +import ( + "reflect" + + "github.com/dop251/goja/unistring" +) + +type objectGoMapReflect struct { + objectGoReflect + + keyType, valueType reflect.Type +} + +func (o *objectGoMapReflect) init() { + o.objectGoReflect.init() + o.keyType = o.value.Type().Key() + o.valueType = o.value.Type().Elem() +} + +func (o *objectGoMapReflect) toKey(n Value, throw bool) reflect.Value { + key := reflect.New(o.keyType).Elem() + err := o.val.runtime.toReflectValue(n, key, &objectExportCtx{}) + if err != nil { + o.val.runtime.typeErrorResult(throw, "map key conversion error: %v", err) + return reflect.Value{} + } + return key +} + +func (o *objectGoMapReflect) strToKey(name string, throw bool) reflect.Value { + if o.keyType.Kind() == reflect.String { + return reflect.ValueOf(name).Convert(o.keyType) + } + return o.toKey(newStringValue(name), throw) +} + +func (o *objectGoMapReflect) _get(n Value) Value { + key := o.toKey(n, false) + if !key.IsValid() { + return nil + } + if v := o.value.MapIndex(key); v.IsValid() { + return o.val.runtime.ToValue(v.Interface()) + } + + return nil +} + +func (o *objectGoMapReflect) _getStr(name string) Value { + key := o.strToKey(name, false) + if !key.IsValid() { + return nil + } + if v := o.value.MapIndex(key); v.IsValid() { + return o.val.runtime.ToValue(v.Interface()) + } + + return nil +} + +func (o *objectGoMapReflect) getStr(name unistring.String, receiver Value) Value { + if v := o._getStr(name.String()); v != nil { + return v + } + return o.objectGoReflect.getStr(name, receiver) +} + +func (o *objectGoMapReflect) getIdx(idx valueInt, receiver Value) Value { + if v := o._get(idx); v != nil { + return v + } + return o.objectGoReflect.getIdx(idx, receiver) +} + +func (o *objectGoMapReflect) getOwnPropStr(name unistring.String) Value { + if v := o._getStr(name.String()); v != nil { + return &valueProperty{ + value: v, + writable: true, + enumerable: true, + } + } + return o.objectGoReflect.getOwnPropStr(name) +} + +func (o *objectGoMapReflect) getOwnPropIdx(idx valueInt) Value { + if v := o._get(idx); v != nil { + return &valueProperty{ + value: v, + writable: true, + enumerable: true, + } + } + return o.objectGoReflect.getOwnPropStr(idx.string()) +} + +func (o *objectGoMapReflect) toValue(val Value, throw bool) (reflect.Value, bool) { + v := reflect.New(o.valueType).Elem() + err := o.val.runtime.toReflectValue(val, v, &objectExportCtx{}) + if err != nil { + o.val.runtime.typeErrorResult(throw, "map value conversion error: %v", err) + return reflect.Value{}, false + } + + return v, true +} + +func (o *objectGoMapReflect) _put(key reflect.Value, val Value, throw bool) bool { + if key.IsValid() { + if o.extensible || o.value.MapIndex(key).IsValid() { + v, ok := o.toValue(val, throw) + if !ok { + return false + } + o.value.SetMapIndex(key, v) + } else { + o.val.runtime.typeErrorResult(throw, "Cannot set property %s, object is not extensible", key.String()) + return false + } + return true + } + return false +} + +func (o *objectGoMapReflect) setOwnStr(name unistring.String, val Value, throw bool) bool { + n := name.String() + key := o.strToKey(n, false) + if !key.IsValid() || !o.value.MapIndex(key).IsValid() { + if proto := o.prototype; proto != nil { + // we know it's foreign because prototype loops are not allowed + if res, ok := proto.self.setForeignStr(name, val, o.val, throw); ok { + return res + } + } + // new property + if !o.extensible { + o.val.runtime.typeErrorResult(throw, "Cannot add property %s, object is not extensible", n) + return false + } else { + if throw && !key.IsValid() { + o.strToKey(n, true) + return false + } + } + } + o._put(key, val, throw) + return true +} + +func (o *objectGoMapReflect) setOwnIdx(idx valueInt, val Value, throw bool) bool { + key := o.toKey(idx, false) + if !key.IsValid() || !o.value.MapIndex(key).IsValid() { + if proto := o.prototype; proto != nil { + // we know it's foreign because prototype loops are not allowed + if res, ok := proto.self.setForeignIdx(idx, val, o.val, throw); ok { + return res + } + } + // new property + if !o.extensible { + o.val.runtime.typeErrorResult(throw, "Cannot add property %d, object is not extensible", idx) + return false + } else { + if throw && !key.IsValid() { + o.toKey(idx, true) + return false + } + } + } + o._put(key, val, throw) + return true +} + +func (o *objectGoMapReflect) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) { + return o._setForeignStr(name, trueValIfPresent(o.hasOwnPropertyStr(name)), val, receiver, throw) +} + +func (o *objectGoMapReflect) setForeignIdx(idx valueInt, val, receiver Value, throw bool) (bool, bool) { + return o._setForeignIdx(idx, trueValIfPresent(o.hasOwnPropertyIdx(idx)), val, receiver, throw) +} + +func (o *objectGoMapReflect) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool { + if !o.val.runtime.checkHostObjectPropertyDescr(name, descr, throw) { + return false + } + + return o._put(o.strToKey(name.String(), throw), descr.Value, throw) +} + +func (o *objectGoMapReflect) defineOwnPropertyIdx(idx valueInt, descr PropertyDescriptor, throw bool) bool { + if !o.val.runtime.checkHostObjectPropertyDescr(idx.string(), descr, throw) { + return false + } + + return o._put(o.toKey(idx, throw), descr.Value, throw) +} + +func (o *objectGoMapReflect) hasOwnPropertyStr(name unistring.String) bool { + key := o.strToKey(name.String(), false) + if key.IsValid() && o.value.MapIndex(key).IsValid() { + return true + } + return false +} + +func (o *objectGoMapReflect) hasOwnPropertyIdx(idx valueInt) bool { + key := o.toKey(idx, false) + if key.IsValid() && o.value.MapIndex(key).IsValid() { + return true + } + return false +} + +func (o *objectGoMapReflect) deleteStr(name unistring.String, throw bool) bool { + key := o.strToKey(name.String(), throw) + if !key.IsValid() { + return false + } + o.value.SetMapIndex(key, reflect.Value{}) + return true +} + +func (o *objectGoMapReflect) deleteIdx(idx valueInt, throw bool) bool { + key := o.toKey(idx, throw) + if !key.IsValid() { + return false + } + o.value.SetMapIndex(key, reflect.Value{}) + return true +} + +type gomapReflectPropIter struct { + o *objectGoMapReflect + keys []reflect.Value + idx int +} + +func (i *gomapReflectPropIter) next() (propIterItem, iterNextFunc) { + for i.idx < len(i.keys) { + key := i.keys[i.idx] + v := i.o.value.MapIndex(key) + i.idx++ + if v.IsValid() { + return propIterItem{name: unistring.NewFromString(key.String()), enumerable: _ENUM_TRUE}, i.next + } + } + + return propIterItem{}, nil +} + +func (o *objectGoMapReflect) enumerateOwnKeys() iterNextFunc { + return (&gomapReflectPropIter{ + o: o, + keys: o.value.MapKeys(), + }).next +} + +func (o *objectGoMapReflect) ownKeys(_ bool, accum []Value) []Value { + // all own keys are enumerable + for _, key := range o.value.MapKeys() { + accum = append(accum, newStringValue(key.String())) + } + + return accum +} + +func (o *objectGoMapReflect) equal(other objectImpl) bool { + if other, ok := other.(*objectGoMapReflect); ok { + return o.value.Interface() == other.value.Interface() + } + return false +} diff --git a/vendor/github.com/dop251/goja/object_goreflect.go b/vendor/github.com/dop251/goja/object_goreflect.go new file mode 100644 index 0000000000..6ac9b86ba7 --- /dev/null +++ b/vendor/github.com/dop251/goja/object_goreflect.go @@ -0,0 +1,539 @@ +package goja + +import ( + "fmt" + "go/ast" + "reflect" + "strings" + + "github.com/dop251/goja/parser" + "github.com/dop251/goja/unistring" +) + +// JsonEncodable allows custom JSON encoding by JSON.stringify() +// Note that if the returned value itself also implements JsonEncodable, it won't have any effect. +type JsonEncodable interface { + JsonEncodable() interface{} +} + +// FieldNameMapper provides custom mapping between Go and JavaScript property names. +type FieldNameMapper interface { + // FieldName returns a JavaScript name for the given struct field in the given type. + // If this method returns "" the field becomes hidden. + FieldName(t reflect.Type, f reflect.StructField) string + + // MethodName returns a JavaScript name for the given method in the given type. + // If this method returns "" the method becomes hidden. + MethodName(t reflect.Type, m reflect.Method) string +} + +type tagFieldNameMapper struct { + tagName string + uncapMethods bool +} + +func (tfm tagFieldNameMapper) FieldName(_ reflect.Type, f reflect.StructField) string { + tag := f.Tag.Get(tfm.tagName) + if idx := strings.IndexByte(tag, ','); idx != -1 { + tag = tag[:idx] + } + if parser.IsIdentifier(tag) { + return tag + } + return "" +} + +func uncapitalize(s string) string { + return strings.ToLower(s[0:1]) + s[1:] +} + +func (tfm tagFieldNameMapper) MethodName(_ reflect.Type, m reflect.Method) string { + if tfm.uncapMethods { + return uncapitalize(m.Name) + } + return m.Name +} + +type uncapFieldNameMapper struct { +} + +func (u uncapFieldNameMapper) FieldName(_ reflect.Type, f reflect.StructField) string { + return uncapitalize(f.Name) +} + +func (u uncapFieldNameMapper) MethodName(_ reflect.Type, m reflect.Method) string { + return uncapitalize(m.Name) +} + +type reflectFieldInfo struct { + Index []int + Anonymous bool +} + +type reflectTypeInfo struct { + Fields map[string]reflectFieldInfo + Methods map[string]int + FieldNames, MethodNames []string +} + +type objectGoReflect struct { + baseObject + origValue, value reflect.Value + + valueTypeInfo, origValueTypeInfo *reflectTypeInfo + + toJson func() interface{} +} + +func (o *objectGoReflect) init() { + o.baseObject.init() + switch o.value.Kind() { + case reflect.Bool: + o.class = classBoolean + o.prototype = o.val.runtime.global.BooleanPrototype + case reflect.String: + o.class = classString + o.prototype = o.val.runtime.global.StringPrototype + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + + o.class = classNumber + o.prototype = o.val.runtime.global.NumberPrototype + default: + o.class = classObject + o.prototype = o.val.runtime.global.ObjectPrototype + } + o.extensible = true + + o.baseObject._putProp("toString", o.val.runtime.newNativeFunc(o.toStringFunc, nil, "toString", nil, 0), true, false, true) + o.baseObject._putProp("valueOf", o.val.runtime.newNativeFunc(o.valueOfFunc, nil, "valueOf", nil, 0), true, false, true) + + o.valueTypeInfo = o.val.runtime.typeInfo(o.value.Type()) + o.origValueTypeInfo = o.val.runtime.typeInfo(o.origValue.Type()) + + if j, ok := o.origValue.Interface().(JsonEncodable); ok { + o.toJson = j.JsonEncodable + } +} + +func (o *objectGoReflect) toStringFunc(FunctionCall) Value { + return o.toPrimitiveString() +} + +func (o *objectGoReflect) valueOfFunc(FunctionCall) Value { + return o.toPrimitive() +} + +func (o *objectGoReflect) getStr(name unistring.String, receiver Value) Value { + if v := o._get(name.String()); v != nil { + return v + } + return o.baseObject.getStr(name, receiver) +} + +func (o *objectGoReflect) _getField(jsName string) reflect.Value { + if info, exists := o.valueTypeInfo.Fields[jsName]; exists { + v := o.value.FieldByIndex(info.Index) + return v + } + + return reflect.Value{} +} + +func (o *objectGoReflect) _getMethod(jsName string) reflect.Value { + if idx, exists := o.origValueTypeInfo.Methods[jsName]; exists { + return o.origValue.Method(idx) + } + + return reflect.Value{} +} + +func (o *objectGoReflect) getAddr(v reflect.Value) reflect.Value { + if (v.Kind() == reflect.Struct || v.Kind() == reflect.Slice) && v.CanAddr() { + return v.Addr() + } + return v +} + +func (o *objectGoReflect) _get(name string) Value { + if o.value.Kind() == reflect.Struct { + if v := o._getField(name); v.IsValid() { + return o.val.runtime.ToValue(o.getAddr(v).Interface()) + } + } + + if v := o._getMethod(name); v.IsValid() { + return o.val.runtime.ToValue(v.Interface()) + } + + return nil +} + +func (o *objectGoReflect) getOwnPropStr(name unistring.String) Value { + n := name.String() + if o.value.Kind() == reflect.Struct { + if v := o._getField(n); v.IsValid() { + return &valueProperty{ + value: o.val.runtime.ToValue(o.getAddr(v).Interface()), + writable: v.CanSet(), + enumerable: true, + } + } + } + + if v := o._getMethod(n); v.IsValid() { + return &valueProperty{ + value: o.val.runtime.ToValue(v.Interface()), + enumerable: true, + } + } + + return nil +} + +func (o *objectGoReflect) setOwnStr(name unistring.String, val Value, throw bool) bool { + has, ok := o._put(name.String(), val, throw) + if !has { + if res, ok := o._setForeignStr(name, nil, val, o.val, throw); !ok { + o.val.runtime.typeErrorResult(throw, "Cannot assign to property %s of a host object", name) + return false + } else { + return res + } + } + return ok +} + +func (o *objectGoReflect) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) { + return o._setForeignStr(name, trueValIfPresent(o._has(name.String())), val, receiver, throw) +} + +func (o *objectGoReflect) setForeignIdx(idx valueInt, val, receiver Value, throw bool) (bool, bool) { + return o._setForeignIdx(idx, nil, val, receiver, throw) +} + +func (o *objectGoReflect) _put(name string, val Value, throw bool) (has, ok bool) { + if o.value.Kind() == reflect.Struct { + if v := o._getField(name); v.IsValid() { + if !v.CanSet() { + o.val.runtime.typeErrorResult(throw, "Cannot assign to a non-addressable or read-only property %s of a host object", name) + return true, false + } + err := o.val.runtime.toReflectValue(val, v, &objectExportCtx{}) + if err != nil { + o.val.runtime.typeErrorResult(throw, "Go struct conversion error: %v", err) + return true, false + } + return true, true + } + } + return false, false +} + +func (o *objectGoReflect) _putProp(name unistring.String, value Value, writable, enumerable, configurable bool) Value { + if _, ok := o._put(name.String(), value, false); ok { + return value + } + return o.baseObject._putProp(name, value, writable, enumerable, configurable) +} + +func (r *Runtime) checkHostObjectPropertyDescr(name unistring.String, descr PropertyDescriptor, throw bool) bool { + if descr.Getter != nil || descr.Setter != nil { + r.typeErrorResult(throw, "Host objects do not support accessor properties") + return false + } + if descr.Writable == FLAG_FALSE { + r.typeErrorResult(throw, "Host object field %s cannot be made read-only", name) + return false + } + if descr.Configurable == FLAG_TRUE { + r.typeErrorResult(throw, "Host object field %s cannot be made configurable", name) + return false + } + return true +} + +func (o *objectGoReflect) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool { + if o.val.runtime.checkHostObjectPropertyDescr(name, descr, throw) { + n := name.String() + if has, ok := o._put(n, descr.Value, throw); !has { + o.val.runtime.typeErrorResult(throw, "Cannot define property '%s' on a host object", n) + return false + } else { + return ok + } + } + return false +} + +func (o *objectGoReflect) _has(name string) bool { + if o.value.Kind() == reflect.Struct { + if v := o._getField(name); v.IsValid() { + return true + } + } + if v := o._getMethod(name); v.IsValid() { + return true + } + return false +} + +func (o *objectGoReflect) hasOwnPropertyStr(name unistring.String) bool { + return o._has(name.String()) +} + +func (o *objectGoReflect) _toNumber() Value { + switch o.value.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intToValue(o.value.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return intToValue(int64(o.value.Uint())) + case reflect.Bool: + if o.value.Bool() { + return intToValue(1) + } else { + return intToValue(0) + } + case reflect.Float32, reflect.Float64: + return floatToValue(o.value.Float()) + } + return nil +} + +func (o *objectGoReflect) _toString() Value { + switch o.value.Kind() { + case reflect.String: + return newStringValue(o.value.String()) + case reflect.Bool: + if o.value.Interface().(bool) { + return stringTrue + } else { + return stringFalse + } + } + switch v := o.origValue.Interface().(type) { + case fmt.Stringer: + return newStringValue(v.String()) + case error: + return newStringValue(v.Error()) + } + + return stringObjectObject +} + +func (o *objectGoReflect) toPrimitiveNumber() Value { + if v := o._toNumber(); v != nil { + return v + } + return o._toString() +} + +func (o *objectGoReflect) toPrimitiveString() Value { + if v := o._toNumber(); v != nil { + return v.toString() + } + return o._toString() +} + +func (o *objectGoReflect) toPrimitive() Value { + if o.prototype == o.val.runtime.global.NumberPrototype { + return o.toPrimitiveNumber() + } + return o.toPrimitiveString() +} + +func (o *objectGoReflect) deleteStr(name unistring.String, throw bool) bool { + n := name.String() + if o._has(n) { + o.val.runtime.typeErrorResult(throw, "Cannot delete property %s from a Go type", n) + return false + } + return o.baseObject.deleteStr(name, throw) +} + +type goreflectPropIter struct { + o *objectGoReflect + idx int +} + +func (i *goreflectPropIter) nextField() (propIterItem, iterNextFunc) { + names := i.o.valueTypeInfo.FieldNames + if i.idx < len(names) { + name := names[i.idx] + i.idx++ + return propIterItem{name: unistring.NewFromString(name), enumerable: _ENUM_TRUE}, i.nextField + } + + i.idx = 0 + return i.nextMethod() +} + +func (i *goreflectPropIter) nextMethod() (propIterItem, iterNextFunc) { + names := i.o.origValueTypeInfo.MethodNames + if i.idx < len(names) { + name := names[i.idx] + i.idx++ + return propIterItem{name: unistring.NewFromString(name), enumerable: _ENUM_TRUE}, i.nextMethod + } + + return propIterItem{}, nil +} + +func (o *objectGoReflect) enumerateOwnKeys() iterNextFunc { + r := &goreflectPropIter{ + o: o, + } + if o.value.Kind() == reflect.Struct { + return r.nextField + } + + return r.nextMethod +} + +func (o *objectGoReflect) ownKeys(_ bool, accum []Value) []Value { + // all own keys are enumerable + for _, name := range o.valueTypeInfo.FieldNames { + accum = append(accum, newStringValue(name)) + } + + for _, name := range o.valueTypeInfo.MethodNames { + accum = append(accum, newStringValue(name)) + } + + return accum +} + +func (o *objectGoReflect) export(*objectExportCtx) interface{} { + return o.origValue.Interface() +} + +func (o *objectGoReflect) exportType() reflect.Type { + return o.origValue.Type() +} + +func (o *objectGoReflect) equal(other objectImpl) bool { + if other, ok := other.(*objectGoReflect); ok { + return o.value.Interface() == other.value.Interface() + } + return false +} + +func (r *Runtime) buildFieldInfo(t reflect.Type, index []int, info *reflectTypeInfo) { + n := t.NumField() + for i := 0; i < n; i++ { + field := t.Field(i) + name := field.Name + if !ast.IsExported(name) { + continue + } + if r.fieldNameMapper != nil { + name = r.fieldNameMapper.FieldName(t, field) + } + + if name != "" { + if inf, exists := info.Fields[name]; !exists { + info.FieldNames = append(info.FieldNames, name) + } else { + if len(inf.Index) <= len(index) { + continue + } + } + } + + if name != "" || field.Anonymous { + idx := make([]int, len(index)+1) + copy(idx, index) + idx[len(idx)-1] = i + + if name != "" { + info.Fields[name] = reflectFieldInfo{ + Index: idx, + Anonymous: field.Anonymous, + } + } + if field.Anonymous { + typ := field.Type + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + if typ.Kind() == reflect.Struct { + r.buildFieldInfo(typ, idx, info) + } + } + } + } +} + +func (r *Runtime) buildTypeInfo(t reflect.Type) (info *reflectTypeInfo) { + info = new(reflectTypeInfo) + if t.Kind() == reflect.Struct { + info.Fields = make(map[string]reflectFieldInfo) + n := t.NumField() + info.FieldNames = make([]string, 0, n) + r.buildFieldInfo(t, nil, info) + } + + info.Methods = make(map[string]int) + n := t.NumMethod() + info.MethodNames = make([]string, 0, n) + for i := 0; i < n; i++ { + method := t.Method(i) + name := method.Name + if !ast.IsExported(name) { + continue + } + if r.fieldNameMapper != nil { + name = r.fieldNameMapper.MethodName(t, method) + if name == "" { + continue + } + } + + if _, exists := info.Methods[name]; !exists { + info.MethodNames = append(info.MethodNames, name) + } + + info.Methods[name] = i + } + return +} + +func (r *Runtime) typeInfo(t reflect.Type) (info *reflectTypeInfo) { + var exists bool + if info, exists = r.typeInfoCache[t]; !exists { + info = r.buildTypeInfo(t) + if r.typeInfoCache == nil { + r.typeInfoCache = make(map[reflect.Type]*reflectTypeInfo) + } + r.typeInfoCache[t] = info + } + + return +} + +// SetFieldNameMapper sets a custom field name mapper for Go types. It can be called at any time, however +// the mapping for any given value is fixed at the point of creation. +// Setting this to nil restores the default behaviour which is all exported fields and methods are mapped to their +// original unchanged names. +func (r *Runtime) SetFieldNameMapper(mapper FieldNameMapper) { + r.fieldNameMapper = mapper + r.typeInfoCache = nil +} + +// TagFieldNameMapper returns a FieldNameMapper that uses the given tagName for struct fields and optionally +// uncapitalises (making the first letter lower case) method names. +// The common tag value syntax is supported (name[,options]), however options are ignored. +// Setting name to anything other than a valid ECMAScript identifier makes the field hidden. +func TagFieldNameMapper(tagName string, uncapMethods bool) FieldNameMapper { + return tagFieldNameMapper{ + tagName: tagName, + uncapMethods: uncapMethods, + } +} + +// UncapFieldNameMapper returns a FieldNameMapper that uncapitalises struct field and method names +// making the first letter lower case. +func UncapFieldNameMapper() FieldNameMapper { + return uncapFieldNameMapper{} +} diff --git a/vendor/github.com/dop251/goja/object_goslice.go b/vendor/github.com/dop251/goja/object_goslice.go new file mode 100644 index 0000000000..0c2b2322b3 --- /dev/null +++ b/vendor/github.com/dop251/goja/object_goslice.go @@ -0,0 +1,325 @@ +package goja + +import ( + "reflect" + "strconv" + + "github.com/dop251/goja/unistring" +) + +type objectGoSlice struct { + baseObject + data *[]interface{} + lengthProp valueProperty +} + +func (o *objectGoSlice) init() { + o.baseObject.init() + o.class = classArray + o.prototype = o.val.runtime.global.ArrayPrototype + o.lengthProp.writable = true + o.extensible = true + o.updateLen() + o.baseObject._put("length", &o.lengthProp) +} + +func (o *objectGoSlice) updateLen() { + o.lengthProp.value = intToValue(int64(len(*o.data))) +} + +func (o *objectGoSlice) getStr(name unistring.String, receiver Value) Value { + var ownProp Value + if idx := strToGoIdx(name); idx >= 0 && idx < len(*o.data) { + v := (*o.data)[idx] + ownProp = o.val.runtime.ToValue(v) + } else if name == "length" { + ownProp = &o.lengthProp + } + + return o.getStrWithOwnProp(ownProp, name, receiver) +} + +func (o *objectGoSlice) getIdx(idx valueInt, receiver Value) Value { + if idx := int64(idx); idx >= 0 && idx < int64(len(*o.data)) { + v := (*o.data)[idx] + return o.val.runtime.ToValue(v) + } + if o.prototype != nil { + if receiver == nil { + return o.prototype.self.getIdx(idx, o.val) + } + return o.prototype.self.getIdx(idx, receiver) + } + return nil +} + +func (o *objectGoSlice) getOwnPropStr(name unistring.String) Value { + if idx := strToGoIdx(name); idx >= 0 { + if idx < len(*o.data) { + v := o.val.runtime.ToValue((*o.data)[idx]) + return &valueProperty{ + value: v, + writable: true, + enumerable: true, + } + } + return nil + } + if name == "length" { + return &o.lengthProp + } + return nil +} + +func (o *objectGoSlice) getOwnPropIdx(idx valueInt) Value { + if idx := int64(idx); idx >= 0 && idx < int64(len(*o.data)) { + v := o.val.runtime.ToValue((*o.data)[idx]) + return &valueProperty{ + value: v, + writable: true, + enumerable: true, + } + } + return nil +} + +func (o *objectGoSlice) grow(size int) { + oldcap := cap(*o.data) + if oldcap < size { + n := make([]interface{}, size, growCap(size, len(*o.data), oldcap)) + copy(n, *o.data) + *o.data = n + } else { + tail := (*o.data)[len(*o.data):size] + for k := range tail { + tail[k] = nil + } + *o.data = (*o.data)[:size] + } + o.updateLen() +} + +func (o *objectGoSlice) shrink(size int) { + tail := (*o.data)[size:] + for k := range tail { + tail[k] = nil + } + *o.data = (*o.data)[:size] + o.updateLen() +} + +func (o *objectGoSlice) putIdx(idx int, v Value, throw bool) { + if idx >= len(*o.data) { + o.grow(idx + 1) + } + (*o.data)[idx] = v.Export() +} + +func (o *objectGoSlice) putLength(v Value, throw bool) bool { + newLen := toIntStrict(toLength(v)) + curLen := len(*o.data) + if newLen > curLen { + o.grow(newLen) + } else if newLen < curLen { + o.shrink(newLen) + } + return true +} + +func (o *objectGoSlice) setOwnIdx(idx valueInt, val Value, throw bool) bool { + if i := toIntStrict(int64(idx)); i >= 0 { + if i >= len(*o.data) { + if res, ok := o._setForeignIdx(idx, nil, val, o.val, throw); ok { + return res + } + } + o.putIdx(i, val, throw) + } else { + name := idx.string() + if res, ok := o._setForeignStr(name, nil, val, o.val, throw); !ok { + o.val.runtime.typeErrorResult(throw, "Can't set property '%s' on Go slice", name) + return false + } else { + return res + } + } + return true +} + +func (o *objectGoSlice) setOwnStr(name unistring.String, val Value, throw bool) bool { + if idx := strToGoIdx(name); idx >= 0 { + if idx >= len(*o.data) { + if res, ok := o._setForeignStr(name, nil, val, o.val, throw); ok { + return res + } + } + o.putIdx(idx, val, throw) + } else { + if name == "length" { + return o.putLength(val, throw) + } + if res, ok := o._setForeignStr(name, nil, val, o.val, throw); !ok { + o.val.runtime.typeErrorResult(throw, "Can't set property '%s' on Go slice", name) + return false + } else { + return res + } + } + return true +} + +func (o *objectGoSlice) setForeignIdx(idx valueInt, val, receiver Value, throw bool) (bool, bool) { + return o._setForeignIdx(idx, trueValIfPresent(o.hasOwnPropertyIdx(idx)), val, receiver, throw) +} + +func (o *objectGoSlice) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) { + return o._setForeignStr(name, trueValIfPresent(o.hasOwnPropertyStr(name)), val, receiver, throw) +} + +func (o *objectGoSlice) hasOwnPropertyIdx(idx valueInt) bool { + if idx := int64(idx); idx >= 0 { + return idx < int64(len(*o.data)) + } + return false +} + +func (o *objectGoSlice) hasOwnPropertyStr(name unistring.String) bool { + if idx := strToIdx64(name); idx >= 0 { + return idx < int64(len(*o.data)) + } + return false +} + +func (o *objectGoSlice) defineOwnPropertyIdx(idx valueInt, descr PropertyDescriptor, throw bool) bool { + if i := toIntStrict(int64(idx)); i >= 0 { + if !o.val.runtime.checkHostObjectPropertyDescr(idx.string(), descr, throw) { + return false + } + val := descr.Value + if val == nil { + val = _undefined + } + o.putIdx(i, val, throw) + return true + } + o.val.runtime.typeErrorResult(throw, "Cannot define property '%d' on a Go slice", idx) + return false +} + +func (o *objectGoSlice) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool { + if idx := strToGoIdx(name); idx >= 0 { + if !o.val.runtime.checkHostObjectPropertyDescr(name, descr, throw) { + return false + } + val := descr.Value + if val == nil { + val = _undefined + } + o.putIdx(idx, val, throw) + return true + } + if name == "length" { + return o.val.runtime.defineArrayLength(&o.lengthProp, descr, o.putLength, throw) + } + o.val.runtime.typeErrorResult(throw, "Cannot define property '%s' on a Go slice", name) + return false +} + +func (o *objectGoSlice) toPrimitiveNumber() Value { + return o.toPrimitiveString() +} + +func (o *objectGoSlice) toPrimitiveString() Value { + return o.val.runtime.arrayproto_join(FunctionCall{ + This: o.val, + }) +} + +func (o *objectGoSlice) toPrimitive() Value { + return o.toPrimitiveString() +} + +func (o *objectGoSlice) _deleteIdx(idx int64) { + if idx < int64(len(*o.data)) { + (*o.data)[idx] = nil + } +} + +func (o *objectGoSlice) deleteStr(name unistring.String, throw bool) bool { + if idx := strToIdx64(name); idx >= 0 { + o._deleteIdx(idx) + return true + } + return o.baseObject.deleteStr(name, throw) +} + +func (o *objectGoSlice) deleteIdx(i valueInt, throw bool) bool { + idx := int64(i) + if idx >= 0 { + o._deleteIdx(idx) + } + return true +} + +type goslicePropIter struct { + o *objectGoSlice + idx, limit int +} + +func (i *goslicePropIter) next() (propIterItem, iterNextFunc) { + if i.idx < i.limit && i.idx < len(*i.o.data) { + name := strconv.Itoa(i.idx) + i.idx++ + return propIterItem{name: unistring.String(name), enumerable: _ENUM_TRUE}, i.next + } + + return propIterItem{}, nil +} + +func (o *objectGoSlice) enumerateOwnKeys() iterNextFunc { + return (&goslicePropIter{ + o: o, + limit: len(*o.data), + }).next +} + +func (o *objectGoSlice) ownKeys(_ bool, accum []Value) []Value { + for i := range *o.data { + accum = append(accum, asciiString(strconv.Itoa(i))) + } + + return accum +} + +func (o *objectGoSlice) export(*objectExportCtx) interface{} { + return *o.data +} + +func (o *objectGoSlice) exportType() reflect.Type { + return reflectTypeArray +} + +func (o *objectGoSlice) equal(other objectImpl) bool { + if other, ok := other.(*objectGoSlice); ok { + return o.data == other.data + } + return false +} + +func (o *objectGoSlice) sortLen() int64 { + return int64(len(*o.data)) +} + +func (o *objectGoSlice) sortGet(i int64) Value { + return o.getIdx(valueInt(i), nil) +} + +func (o *objectGoSlice) swap(i, j int64) { + ii := valueInt(i) + jj := valueInt(j) + x := o.getIdx(ii, nil) + y := o.getIdx(jj, nil) + + o.setOwnIdx(ii, y, false) + o.setOwnIdx(jj, x, false) +} diff --git a/vendor/github.com/dop251/goja/object_goslice_reflect.go b/vendor/github.com/dop251/goja/object_goslice_reflect.go new file mode 100644 index 0000000000..658c65f9b5 --- /dev/null +++ b/vendor/github.com/dop251/goja/object_goslice_reflect.go @@ -0,0 +1,335 @@ +package goja + +import ( + "reflect" + "strconv" + + "github.com/dop251/goja/unistring" +) + +type objectGoSliceReflect struct { + objectGoReflect + lengthProp valueProperty +} + +func (o *objectGoSliceReflect) init() { + o.objectGoReflect.init() + o.class = classArray + o.prototype = o.val.runtime.global.ArrayPrototype + if !o.value.CanSet() { + value := reflect.Indirect(reflect.New(o.value.Type())) + value.Set(o.value) + o.value = value + } + o.lengthProp.writable = true + o.updateLen() + o.baseObject._put("length", &o.lengthProp) +} + +func (o *objectGoSliceReflect) updateLen() { + o.lengthProp.value = intToValue(int64(o.value.Len())) +} + +func (o *objectGoSliceReflect) _hasIdx(idx valueInt) bool { + if idx := int64(idx); idx >= 0 && idx < int64(o.value.Len()) { + return true + } + return false +} + +func (o *objectGoSliceReflect) _hasStr(name unistring.String) bool { + if idx := strToIdx64(name); idx >= 0 && idx < int64(o.value.Len()) { + return true + } + return false +} + +func (o *objectGoSliceReflect) _getIdx(idx int) Value { + v := o.value.Index(idx) + if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() { + return _null + } + return o.val.runtime.ToValue(v.Interface()) +} + +func (o *objectGoSliceReflect) getIdx(idx valueInt, receiver Value) Value { + if idx := toIntStrict(int64(idx)); idx >= 0 && idx < o.value.Len() { + return o._getIdx(idx) + } + return o.objectGoReflect.getStr(idx.string(), receiver) +} + +func (o *objectGoSliceReflect) getStr(name unistring.String, receiver Value) Value { + var ownProp Value + if idx := strToGoIdx(name); idx >= 0 && idx < o.value.Len() { + ownProp = o._getIdx(idx) + } else if name == "length" { + ownProp = &o.lengthProp + } else { + ownProp = o.objectGoReflect.getOwnPropStr(name) + } + return o.getStrWithOwnProp(ownProp, name, receiver) +} + +func (o *objectGoSliceReflect) getOwnPropStr(name unistring.String) Value { + if idx := strToGoIdx(name); idx >= 0 { + if idx < o.value.Len() { + return &valueProperty{ + value: o._getIdx(idx), + writable: true, + enumerable: true, + } + } + return nil + } + if name == "length" { + return &o.lengthProp + } + return o.objectGoReflect.getOwnPropStr(name) +} + +func (o *objectGoSliceReflect) getOwnPropIdx(idx valueInt) Value { + if idx := toIntStrict(int64(idx)); idx >= 0 && idx < o.value.Len() { + return &valueProperty{ + value: o._getIdx(idx), + writable: true, + enumerable: true, + } + } + return nil +} + +func (o *objectGoSliceReflect) putIdx(idx int, v Value, throw bool) bool { + if idx >= o.value.Len() { + o.grow(idx + 1) + } + err := o.val.runtime.toReflectValue(v, o.value.Index(idx), &objectExportCtx{}) + if err != nil { + o.val.runtime.typeErrorResult(throw, "Go type conversion error: %v", err) + return false + } + return true +} + +func (o *objectGoSliceReflect) grow(size int) { + oldcap := o.value.Cap() + if oldcap < size { + n := reflect.MakeSlice(o.value.Type(), size, growCap(size, o.value.Len(), oldcap)) + reflect.Copy(n, o.value) + o.value.Set(n) + } else { + tail := o.value.Slice(o.value.Len(), size) + zero := reflect.Zero(o.value.Type().Elem()) + for i := 0; i < tail.Len(); i++ { + tail.Index(i).Set(zero) + } + o.value.SetLen(size) + } + o.updateLen() +} + +func (o *objectGoSliceReflect) shrink(size int) { + tail := o.value.Slice(size, o.value.Len()) + zero := reflect.Zero(o.value.Type().Elem()) + for i := 0; i < tail.Len(); i++ { + tail.Index(i).Set(zero) + } + o.value.SetLen(size) + o.updateLen() +} + +func (o *objectGoSliceReflect) putLength(v Value, throw bool) bool { + newLen := toIntStrict(toLength(v)) + curLen := o.value.Len() + if newLen > curLen { + o.grow(newLen) + } else if newLen < curLen { + o.shrink(newLen) + } + return true +} + +func (o *objectGoSliceReflect) setOwnIdx(idx valueInt, val Value, throw bool) bool { + if i := toIntStrict(int64(idx)); i >= 0 { + if i >= o.value.Len() { + if res, ok := o._setForeignIdx(idx, nil, val, o.val, throw); ok { + return res + } + } + o.putIdx(i, val, throw) + } else { + name := idx.string() + if res, ok := o._setForeignStr(name, nil, val, o.val, throw); !ok { + o.val.runtime.typeErrorResult(throw, "Can't set property '%s' on Go slice", name) + return false + } else { + return res + } + } + return true +} + +func (o *objectGoSliceReflect) setOwnStr(name unistring.String, val Value, throw bool) bool { + if idx := strToGoIdx(name); idx >= 0 { + if idx >= o.value.Len() { + if res, ok := o._setForeignStr(name, nil, val, o.val, throw); ok { + return res + } + } + o.putIdx(idx, val, throw) + } else { + if name == "length" { + return o.putLength(val, throw) + } + if res, ok := o._setForeignStr(name, nil, val, o.val, throw); !ok { + o.val.runtime.typeErrorResult(throw, "Can't set property '%s' on Go slice", name) + return false + } else { + return res + } + } + return true +} + +func (o *objectGoSliceReflect) setForeignIdx(idx valueInt, val, receiver Value, throw bool) (bool, bool) { + return o._setForeignIdx(idx, trueValIfPresent(o._hasIdx(idx)), val, receiver, throw) +} + +func (o *objectGoSliceReflect) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) { + return o._setForeignStr(name, trueValIfPresent(o._hasStr(name)), val, receiver, throw) +} + +func (o *objectGoSliceReflect) hasOwnPropertyIdx(idx valueInt) bool { + return o._hasIdx(idx) +} + +func (o *objectGoSliceReflect) hasOwnPropertyStr(name unistring.String) bool { + if o._hasStr(name) { + return true + } + return o.objectGoReflect._has(name.String()) +} + +func (o *objectGoSliceReflect) defineOwnPropertyIdx(idx valueInt, descr PropertyDescriptor, throw bool) bool { + if i := toIntStrict(int64(idx)); i >= 0 { + if !o.val.runtime.checkHostObjectPropertyDescr(idx.string(), descr, throw) { + return false + } + val := descr.Value + if val == nil { + val = _undefined + } + o.putIdx(i, val, throw) + return true + } + o.val.runtime.typeErrorResult(throw, "Cannot define property '%d' on a Go slice", idx) + return false +} + +func (o *objectGoSliceReflect) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool { + if idx := strToGoIdx(name); idx >= 0 { + if !o.val.runtime.checkHostObjectPropertyDescr(name, descr, throw) { + return false + } + val := descr.Value + if val == nil { + val = _undefined + } + o.putIdx(idx, val, throw) + return true + } + o.val.runtime.typeErrorResult(throw, "Cannot define property '%s' on a Go slice", name) + return false +} + +func (o *objectGoSliceReflect) toPrimitiveNumber() Value { + return o.toPrimitiveString() +} + +func (o *objectGoSliceReflect) toPrimitiveString() Value { + return o.val.runtime.arrayproto_join(FunctionCall{ + This: o.val, + }) +} + +func (o *objectGoSliceReflect) toPrimitive() Value { + return o.toPrimitiveString() +} + +func (o *objectGoSliceReflect) _deleteIdx(idx int) { + if idx < o.value.Len() { + o.value.Index(idx).Set(reflect.Zero(o.value.Type().Elem())) + } +} + +func (o *objectGoSliceReflect) deleteStr(name unistring.String, throw bool) bool { + if idx := strToGoIdx(name); idx >= 0 { + o._deleteIdx(idx) + return true + } + + return o.objectGoReflect.deleteStr(name, throw) +} + +func (o *objectGoSliceReflect) deleteIdx(i valueInt, throw bool) bool { + idx := toIntStrict(int64(i)) + if idx >= 0 { + o._deleteIdx(idx) + } + return true +} + +type gosliceReflectPropIter struct { + o *objectGoSliceReflect + idx, limit int +} + +func (i *gosliceReflectPropIter) next() (propIterItem, iterNextFunc) { + if i.idx < i.limit && i.idx < i.o.value.Len() { + name := strconv.Itoa(i.idx) + i.idx++ + return propIterItem{name: unistring.String(name), enumerable: _ENUM_TRUE}, i.next + } + + return i.o.objectGoReflect.enumerateOwnKeys()() +} + +func (o *objectGoSliceReflect) ownKeys(all bool, accum []Value) []Value { + for i := 0; i < o.value.Len(); i++ { + accum = append(accum, asciiString(strconv.Itoa(i))) + } + + return o.objectGoReflect.ownKeys(all, accum) +} + +func (o *objectGoSliceReflect) enumerateOwnKeys() iterNextFunc { + return (&gosliceReflectPropIter{ + o: o, + limit: o.value.Len(), + }).next +} + +func (o *objectGoSliceReflect) equal(other objectImpl) bool { + if other, ok := other.(*objectGoSliceReflect); ok { + return o.value.Interface() == other.value.Interface() + } + return false +} + +func (o *objectGoSliceReflect) sortLen() int64 { + return int64(o.value.Len()) +} + +func (o *objectGoSliceReflect) sortGet(i int64) Value { + return o.getIdx(valueInt(i), nil) +} + +func (o *objectGoSliceReflect) swap(i, j int64) { + ii := valueInt(i) + jj := valueInt(j) + x := o.getIdx(ii, nil) + y := o.getIdx(jj, nil) + + o.setOwnIdx(ii, y, false) + o.setOwnIdx(jj, x, false) +} diff --git a/vendor/github.com/dop251/goja/object_lazy.go b/vendor/github.com/dop251/goja/object_lazy.go new file mode 100644 index 0000000000..71ec77ffbc --- /dev/null +++ b/vendor/github.com/dop251/goja/object_lazy.go @@ -0,0 +1,290 @@ +package goja + +import ( + "reflect" + + "github.com/dop251/goja/unistring" +) + +type lazyObject struct { + val *Object + create func(*Object) objectImpl +} + +func (o *lazyObject) className() string { + obj := o.create(o.val) + o.val.self = obj + return obj.className() +} + +func (o *lazyObject) getIdx(p valueInt, receiver Value) Value { + obj := o.create(o.val) + o.val.self = obj + return obj.getIdx(p, receiver) +} + +func (o *lazyObject) getSym(p *Symbol, receiver Value) Value { + obj := o.create(o.val) + o.val.self = obj + return obj.getSym(p, receiver) +} + +func (o *lazyObject) getOwnPropIdx(idx valueInt) Value { + obj := o.create(o.val) + o.val.self = obj + return obj.getOwnPropIdx(idx) +} + +func (o *lazyObject) getOwnPropSym(s *Symbol) Value { + obj := o.create(o.val) + o.val.self = obj + return obj.getOwnPropSym(s) +} + +func (o *lazyObject) hasPropertyIdx(idx valueInt) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.hasPropertyIdx(idx) +} + +func (o *lazyObject) hasPropertySym(s *Symbol) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.hasPropertySym(s) +} + +func (o *lazyObject) hasOwnPropertyIdx(idx valueInt) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.hasOwnPropertyIdx(idx) +} + +func (o *lazyObject) hasOwnPropertySym(s *Symbol) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.hasOwnPropertySym(s) +} + +func (o *lazyObject) defineOwnPropertyStr(name unistring.String, desc PropertyDescriptor, throw bool) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.defineOwnPropertyStr(name, desc, throw) +} + +func (o *lazyObject) defineOwnPropertyIdx(name valueInt, desc PropertyDescriptor, throw bool) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.defineOwnPropertyIdx(name, desc, throw) +} + +func (o *lazyObject) defineOwnPropertySym(name *Symbol, desc PropertyDescriptor, throw bool) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.defineOwnPropertySym(name, desc, throw) +} + +func (o *lazyObject) deleteIdx(idx valueInt, throw bool) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.deleteIdx(idx, throw) +} + +func (o *lazyObject) deleteSym(s *Symbol, throw bool) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.deleteSym(s, throw) +} + +func (o *lazyObject) getStr(name unistring.String, receiver Value) Value { + obj := o.create(o.val) + o.val.self = obj + return obj.getStr(name, receiver) +} + +func (o *lazyObject) getOwnPropStr(name unistring.String) Value { + obj := o.create(o.val) + o.val.self = obj + return obj.getOwnPropStr(name) +} + +func (o *lazyObject) setOwnStr(p unistring.String, v Value, throw bool) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.setOwnStr(p, v, throw) +} + +func (o *lazyObject) setOwnIdx(p valueInt, v Value, throw bool) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.setOwnIdx(p, v, throw) +} + +func (o *lazyObject) setOwnSym(p *Symbol, v Value, throw bool) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.setOwnSym(p, v, throw) +} + +func (o *lazyObject) setForeignStr(p unistring.String, v, receiver Value, throw bool) (bool, bool) { + obj := o.create(o.val) + o.val.self = obj + return obj.setForeignStr(p, v, receiver, throw) +} + +func (o *lazyObject) setForeignIdx(p valueInt, v, receiver Value, throw bool) (bool, bool) { + obj := o.create(o.val) + o.val.self = obj + return obj.setForeignIdx(p, v, receiver, throw) +} + +func (o *lazyObject) setForeignSym(p *Symbol, v, receiver Value, throw bool) (bool, bool) { + obj := o.create(o.val) + o.val.self = obj + return obj.setForeignSym(p, v, receiver, throw) +} + +func (o *lazyObject) hasPropertyStr(name unistring.String) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.hasPropertyStr(name) +} + +func (o *lazyObject) hasOwnPropertyStr(name unistring.String) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.hasOwnPropertyStr(name) +} + +func (o *lazyObject) _putProp(unistring.String, Value, bool, bool, bool) Value { + panic("cannot use _putProp() in lazy object") +} + +func (o *lazyObject) _putSym(*Symbol, Value) { + panic("cannot use _putSym() in lazy object") +} + +func (o *lazyObject) toPrimitiveNumber() Value { + obj := o.create(o.val) + o.val.self = obj + return obj.toPrimitiveNumber() +} + +func (o *lazyObject) toPrimitiveString() Value { + obj := o.create(o.val) + o.val.self = obj + return obj.toPrimitiveString() +} + +func (o *lazyObject) toPrimitive() Value { + obj := o.create(o.val) + o.val.self = obj + return obj.toPrimitive() +} + +func (o *lazyObject) assertCallable() (call func(FunctionCall) Value, ok bool) { + obj := o.create(o.val) + o.val.self = obj + return obj.assertCallable() +} + +func (o *lazyObject) assertConstructor() func(args []Value, newTarget *Object) *Object { + obj := o.create(o.val) + o.val.self = obj + return obj.assertConstructor() +} + +func (o *lazyObject) deleteStr(name unistring.String, throw bool) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.deleteStr(name, throw) +} + +func (o *lazyObject) proto() *Object { + obj := o.create(o.val) + o.val.self = obj + return obj.proto() +} + +func (o *lazyObject) hasInstance(v Value) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.hasInstance(v) +} + +func (o *lazyObject) isExtensible() bool { + obj := o.create(o.val) + o.val.self = obj + return obj.isExtensible() +} + +func (o *lazyObject) preventExtensions(throw bool) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.preventExtensions(throw) +} + +func (o *lazyObject) enumerateOwnKeys() iterNextFunc { + obj := o.create(o.val) + o.val.self = obj + return obj.enumerateOwnKeys() +} + +func (o *lazyObject) export(ctx *objectExportCtx) interface{} { + obj := o.create(o.val) + o.val.self = obj + return obj.export(ctx) +} + +func (o *lazyObject) exportType() reflect.Type { + obj := o.create(o.val) + o.val.self = obj + return obj.exportType() +} + +func (o *lazyObject) equal(other objectImpl) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.equal(other) +} + +func (o *lazyObject) ownKeys(all bool, accum []Value) []Value { + obj := o.create(o.val) + o.val.self = obj + return obj.ownKeys(all, accum) +} + +func (o *lazyObject) ownSymbols(all bool, accum []Value) []Value { + obj := o.create(o.val) + o.val.self = obj + return obj.ownSymbols(all, accum) +} + +func (o *lazyObject) ownPropertyKeys(all bool, accum []Value) []Value { + obj := o.create(o.val) + o.val.self = obj + return obj.ownPropertyKeys(all, accum) +} + +func (o *lazyObject) setProto(proto *Object, throw bool) bool { + obj := o.create(o.val) + o.val.self = obj + return obj.setProto(proto, throw) +} + +func (o *lazyObject) sortLen() int64 { + obj := o.create(o.val) + o.val.self = obj + return obj.sortLen() +} + +func (o *lazyObject) sortGet(i int64) Value { + obj := o.create(o.val) + o.val.self = obj + return obj.sortGet(i) +} + +func (o *lazyObject) swap(i, j int64) { + obj := o.create(o.val) + o.val.self = obj + obj.swap(i, j) +} diff --git a/vendor/github.com/dop251/goja/parser/README.markdown b/vendor/github.com/dop251/goja/parser/README.markdown new file mode 100644 index 0000000000..ec1186d463 --- /dev/null +++ b/vendor/github.com/dop251/goja/parser/README.markdown @@ -0,0 +1,184 @@ +# parser +-- + import "github.com/dop251/goja/parser" + +Package parser implements a parser for JavaScript. Borrowed from https://github.com/robertkrimen/otto/tree/master/parser + + import ( + "github.com/dop251/goja/parser" + ) + +Parse and return an AST + + filename := "" // A filename is optional + src := ` + // Sample xyzzy example + (function(){ + if (3.14159 > 0) { + console.log("Hello, World."); + return; + } + + var xyzzy = NaN; + console.log("Nothing happens."); + return xyzzy; + })(); + ` + + // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList + program, err := parser.ParseFile(nil, filename, src, 0) + + +### Warning + +The parser and AST interfaces are still works-in-progress (particularly where +node types are concerned) and may change in the future. + +## Usage + +#### func ParseFile + +```go +func ParseFile(fileSet *file.FileSet, filename string, src interface{}, mode Mode) (*ast.Program, error) +``` +ParseFile parses the source code of a single JavaScript/ECMAScript source file +and returns the corresponding ast.Program node. + +If fileSet == nil, ParseFile parses source without a FileSet. If fileSet != nil, +ParseFile first adds filename and src to fileSet. + +The filename argument is optional and is used for labelling errors, etc. + +src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST +always be in UTF-8. + + // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList + program, err := parser.ParseFile(nil, "", `if (abc > 1) {}`, 0) + +#### func ParseFunction + +```go +func ParseFunction(parameterList, body string) (*ast.FunctionLiteral, error) +``` +ParseFunction parses a given parameter list and body as a function and returns +the corresponding ast.FunctionLiteral node. + +The parameter list, if any, should be a comma-separated list of identifiers. + +#### func ReadSource + +```go +func ReadSource(filename string, src interface{}) ([]byte, error) +``` + +#### func TransformRegExp + +```go +func TransformRegExp(pattern string) (string, error) +``` +TransformRegExp transforms a JavaScript pattern into a Go "regexp" pattern. + +re2 (Go) cannot do backtracking, so the presence of a lookahead (?=) (?!) or +backreference (\1, \2, ...) will cause an error. + +re2 (Go) has a different definition for \s: [\t\n\f\r ]. The JavaScript +definition, on the other hand, also includes \v, Unicode "Separator, Space", +etc. + +If the pattern is invalid (not valid even in JavaScript), then this function +returns the empty string and an error. + +If the pattern is valid, but incompatible (contains a lookahead or +backreference), then this function returns the transformation (a non-empty +string) AND an error. + +#### type Error + +```go +type Error struct { + Position file.Position + Message string +} +``` + +An Error represents a parsing error. It includes the position where the error +occurred and a message/description. + +#### func (Error) Error + +```go +func (self Error) Error() string +``` + +#### type ErrorList + +```go +type ErrorList []*Error +``` + +ErrorList is a list of *Errors. + +#### func (*ErrorList) Add + +```go +func (self *ErrorList) Add(position file.Position, msg string) +``` +Add adds an Error with given position and message to an ErrorList. + +#### func (ErrorList) Err + +```go +func (self ErrorList) Err() error +``` +Err returns an error equivalent to this ErrorList. If the list is empty, Err +returns nil. + +#### func (ErrorList) Error + +```go +func (self ErrorList) Error() string +``` +Error implements the Error interface. + +#### func (ErrorList) Len + +```go +func (self ErrorList) Len() int +``` + +#### func (ErrorList) Less + +```go +func (self ErrorList) Less(i, j int) bool +``` + +#### func (*ErrorList) Reset + +```go +func (self *ErrorList) Reset() +``` +Reset resets an ErrorList to no errors. + +#### func (ErrorList) Sort + +```go +func (self ErrorList) Sort() +``` + +#### func (ErrorList) Swap + +```go +func (self ErrorList) Swap(i, j int) +``` + +#### type Mode + +```go +type Mode uint +``` + +A Mode value is a set of flags (or 0). They control optional parser +functionality. + +-- +**godocdown** http://github.com/robertkrimen/godocdown diff --git a/vendor/github.com/dop251/goja/parser/error.go b/vendor/github.com/dop251/goja/parser/error.go new file mode 100644 index 0000000000..bfa33f250a --- /dev/null +++ b/vendor/github.com/dop251/goja/parser/error.go @@ -0,0 +1,175 @@ +package parser + +import ( + "fmt" + "sort" + + "github.com/dop251/goja/file" + "github.com/dop251/goja/token" +) + +const ( + err_UnexpectedToken = "Unexpected token %v" + err_UnexpectedEndOfInput = "Unexpected end of input" + err_UnexpectedEscape = "Unexpected escape" +) + +// UnexpectedNumber: 'Unexpected number', +// UnexpectedString: 'Unexpected string', +// UnexpectedIdentifier: 'Unexpected identifier', +// UnexpectedReserved: 'Unexpected reserved word', +// NewlineAfterThrow: 'Illegal newline after throw', +// InvalidRegExp: 'Invalid regular expression', +// UnterminatedRegExp: 'Invalid regular expression: missing /', +// InvalidLHSInAssignment: 'Invalid left-hand side in assignment', +// InvalidLHSInForIn: 'Invalid left-hand side in for-in', +// MultipleDefaultsInSwitch: 'More than one default clause in switch statement', +// NoCatchOrFinally: 'Missing catch or finally after try', +// UnknownLabel: 'Undefined label \'%0\'', +// Redeclaration: '%0 \'%1\' has already been declared', +// IllegalContinue: 'Illegal continue statement', +// IllegalBreak: 'Illegal break statement', +// IllegalReturn: 'Illegal return statement', +// StrictModeWith: 'Strict mode code may not include a with statement', +// StrictCatchVariable: 'Catch variable may not be eval or arguments in strict mode', +// StrictVarName: 'Variable name may not be eval or arguments in strict mode', +// StrictParamName: 'Parameter name eval or arguments is not allowed in strict mode', +// StrictParamDupe: 'Strict mode function may not have duplicate parameter names', +// StrictFunctionName: 'Function name may not be eval or arguments in strict mode', +// StrictOctalLiteral: 'Octal literals are not allowed in strict mode.', +// StrictDelete: 'Delete of an unqualified identifier in strict mode.', +// StrictDuplicateProperty: 'Duplicate data property in object literal not allowed in strict mode', +// AccessorDataProperty: 'Object literal may not have data and accessor property with the same name', +// AccessorGetSet: 'Object literal may not have multiple get/set accessors with the same name', +// StrictLHSAssignment: 'Assignment to eval or arguments is not allowed in strict mode', +// StrictLHSPostfix: 'Postfix increment/decrement may not have eval or arguments operand in strict mode', +// StrictLHSPrefix: 'Prefix increment/decrement may not have eval or arguments operand in strict mode', +// StrictReservedWord: 'Use of future reserved word in strict mode' + +// A SyntaxError is a description of an ECMAScript syntax error. + +// An Error represents a parsing error. It includes the position where the error occurred and a message/description. +type Error struct { + Position file.Position + Message string +} + +// FIXME Should this be "SyntaxError"? + +func (self Error) Error() string { + filename := self.Position.Filename + if filename == "" { + filename = "(anonymous)" + } + return fmt.Sprintf("%s: Line %d:%d %s", + filename, + self.Position.Line, + self.Position.Column, + self.Message, + ) +} + +func (self *_parser) error(place interface{}, msg string, msgValues ...interface{}) *Error { + idx := file.Idx(0) + switch place := place.(type) { + case int: + idx = self.idxOf(place) + case file.Idx: + if place == 0 { + idx = self.idxOf(self.chrOffset) + } else { + idx = place + } + default: + panic(fmt.Errorf("error(%T, ...)", place)) + } + + position := self.position(idx) + msg = fmt.Sprintf(msg, msgValues...) + self.errors.Add(position, msg) + return self.errors[len(self.errors)-1] +} + +func (self *_parser) errorUnexpected(idx file.Idx, chr rune) error { + if chr == -1 { + return self.error(idx, err_UnexpectedEndOfInput) + } + return self.error(idx, err_UnexpectedToken, token.ILLEGAL) +} + +func (self *_parser) errorUnexpectedToken(tkn token.Token) error { + switch tkn { + case token.EOF: + return self.error(file.Idx(0), err_UnexpectedEndOfInput) + } + value := tkn.String() + switch tkn { + case token.BOOLEAN, token.NULL: + value = self.literal + case token.IDENTIFIER: + return self.error(self.idx, "Unexpected identifier") + case token.KEYWORD: + // TODO Might be a future reserved word + return self.error(self.idx, "Unexpected reserved word") + case token.NUMBER: + return self.error(self.idx, "Unexpected number") + case token.STRING: + return self.error(self.idx, "Unexpected string") + } + return self.error(self.idx, err_UnexpectedToken, value) +} + +// ErrorList is a list of *Errors. +// +type ErrorList []*Error + +// Add adds an Error with given position and message to an ErrorList. +func (self *ErrorList) Add(position file.Position, msg string) { + *self = append(*self, &Error{position, msg}) +} + +// Reset resets an ErrorList to no errors. +func (self *ErrorList) Reset() { *self = (*self)[0:0] } + +func (self ErrorList) Len() int { return len(self) } +func (self ErrorList) Swap(i, j int) { self[i], self[j] = self[j], self[i] } +func (self ErrorList) Less(i, j int) bool { + x := &self[i].Position + y := &self[j].Position + if x.Filename < y.Filename { + return true + } + if x.Filename == y.Filename { + if x.Line < y.Line { + return true + } + if x.Line == y.Line { + return x.Column < y.Column + } + } + return false +} + +func (self ErrorList) Sort() { + sort.Sort(self) +} + +// Error implements the Error interface. +func (self ErrorList) Error() string { + switch len(self) { + case 0: + return "no errors" + case 1: + return self[0].Error() + } + return fmt.Sprintf("%s (and %d more errors)", self[0].Error(), len(self)-1) +} + +// Err returns an error equivalent to this ErrorList. +// If the list is empty, Err returns nil. +func (self ErrorList) Err() error { + if len(self) == 0 { + return nil + } + return self +} diff --git a/vendor/github.com/dop251/goja/parser/expression.go b/vendor/github.com/dop251/goja/parser/expression.go new file mode 100644 index 0000000000..21ac15d8dd --- /dev/null +++ b/vendor/github.com/dop251/goja/parser/expression.go @@ -0,0 +1,1154 @@ +package parser + +import ( + "strings" + + "github.com/dop251/goja/ast" + "github.com/dop251/goja/file" + "github.com/dop251/goja/token" + "github.com/dop251/goja/unistring" +) + +func (self *_parser) parseIdentifier() *ast.Identifier { + literal := self.parsedLiteral + idx := self.idx + self.next() + return &ast.Identifier{ + Name: literal, + Idx: idx, + } +} + +func (self *_parser) parsePrimaryExpression() ast.Expression { + literal, parsedLiteral := self.literal, self.parsedLiteral + idx := self.idx + switch self.token { + case token.IDENTIFIER: + self.next() + if len(literal) > 1 { + tkn, strict := token.IsKeyword(literal) + if tkn == token.KEYWORD { + if !strict { + self.error(idx, "Unexpected reserved word") + } + } + } + return &ast.Identifier{ + Name: parsedLiteral, + Idx: idx, + } + case token.NULL: + self.next() + return &ast.NullLiteral{ + Idx: idx, + Literal: literal, + } + case token.BOOLEAN: + self.next() + value := false + switch parsedLiteral { + case "true": + value = true + case "false": + value = false + default: + self.error(idx, "Illegal boolean literal") + } + return &ast.BooleanLiteral{ + Idx: idx, + Literal: literal, + Value: value, + } + case token.STRING: + self.next() + return &ast.StringLiteral{ + Idx: idx, + Literal: literal, + Value: parsedLiteral, + } + case token.NUMBER: + self.next() + value, err := parseNumberLiteral(literal) + if err != nil { + self.error(idx, err.Error()) + value = 0 + } + return &ast.NumberLiteral{ + Idx: idx, + Literal: literal, + Value: value, + } + case token.SLASH, token.QUOTIENT_ASSIGN: + return self.parseRegExpLiteral() + case token.LEFT_BRACE: + return self.parseObjectLiteral() + case token.LEFT_BRACKET: + return self.parseArrayLiteral() + case token.LEFT_PARENTHESIS: + self.expect(token.LEFT_PARENTHESIS) + expression := self.parseExpression() + self.expect(token.RIGHT_PARENTHESIS) + return expression + case token.THIS: + self.next() + return &ast.ThisExpression{ + Idx: idx, + } + case token.FUNCTION: + return self.parseFunction(false) + } + + self.errorUnexpectedToken(self.token) + self.nextStatement() + return &ast.BadExpression{From: idx, To: self.idx} +} + +func (self *_parser) parseRegExpLiteral() *ast.RegExpLiteral { + + offset := self.chrOffset - 1 // Opening slash already gotten + if self.token == token.QUOTIENT_ASSIGN { + offset -= 1 // = + } + idx := self.idxOf(offset) + + pattern, _, err := self.scanString(offset, false) + endOffset := self.chrOffset + + if err == nil { + pattern = pattern[1 : len(pattern)-1] + } + + flags := "" + if !isLineTerminator(self.chr) && !isLineWhiteSpace(self.chr) { + self.next() + + if self.token == token.IDENTIFIER { // gim + + flags = self.literal + self.next() + endOffset = self.chrOffset - 1 + } + } else { + self.next() + } + + literal := self.str[offset:endOffset] + + return &ast.RegExpLiteral{ + Idx: idx, + Literal: literal, + Pattern: pattern, + Flags: flags, + } +} + +func (self *_parser) parseBindingTarget() (target ast.BindingTarget) { + if self.token == token.LET { + self.token = token.IDENTIFIER + } + switch self.token { + case token.IDENTIFIER: + target = &ast.Identifier{ + Name: self.parsedLiteral, + Idx: self.idx, + } + self.next() + case token.LEFT_BRACKET: + target = self.parseArrayBindingPattern() + case token.LEFT_BRACE: + target = self.parseObjectBindingPattern() + default: + idx := self.expect(token.IDENTIFIER) + self.nextStatement() + target = &ast.BadExpression{From: idx, To: self.idx} + } + + return +} + +func (self *_parser) parseVariableDeclaration(declarationList *[]*ast.Binding) ast.Expression { + node := &ast.Binding{ + Target: self.parseBindingTarget(), + } + + if declarationList != nil { + *declarationList = append(*declarationList, node) + } + + if self.token == token.ASSIGN { + self.next() + node.Initializer = self.parseAssignmentExpression() + } + + return node +} + +func (self *_parser) parseVariableDeclarationList() (declarationList []*ast.Binding) { + for { + self.parseVariableDeclaration(&declarationList) + if self.token != token.COMMA { + break + } + self.next() + } + return +} + +func (self *_parser) parseVarDeclarationList(var_ file.Idx) []*ast.Binding { + declarationList := self.parseVariableDeclarationList() + + self.scope.declare(&ast.VariableDeclaration{ + Var: var_, + List: declarationList, + }) + + return declarationList +} + +func (self *_parser) parseObjectPropertyKey() (unistring.String, ast.Expression, token.Token) { + if self.token == token.LEFT_BRACKET { + self.next() + expr := self.parseAssignmentExpression() + self.expect(token.RIGHT_BRACKET) + return "", expr, token.ILLEGAL + } + idx, tkn, literal, parsedLiteral := self.idx, self.token, self.literal, self.parsedLiteral + var value ast.Expression + self.next() + switch tkn { + case token.IDENTIFIER: + value = &ast.StringLiteral{ + Idx: idx, + Literal: literal, + Value: unistring.String(literal), + } + case token.NUMBER: + num, err := parseNumberLiteral(literal) + if err != nil { + self.error(idx, err.Error()) + } else { + value = &ast.NumberLiteral{ + Idx: idx, + Literal: literal, + Value: num, + } + } + case token.STRING: + value = &ast.StringLiteral{ + Idx: idx, + Literal: literal, + Value: parsedLiteral, + } + default: + // null, false, class, etc. + if isId(tkn) { + value = &ast.StringLiteral{ + Idx: idx, + Literal: literal, + Value: unistring.String(literal), + } + tkn = token.KEYWORD + } + } + return parsedLiteral, value, tkn +} + +func (self *_parser) parseObjectProperty() ast.Property { + if self.token == token.ELLIPSIS { + self.next() + return &ast.SpreadElement{ + Expression: self.parseAssignmentExpression(), + } + } + literal, value, tkn := self.parseObjectPropertyKey() + if tkn == token.IDENTIFIER || tkn == token.STRING || tkn == token.KEYWORD || tkn == token.ILLEGAL { + switch { + case self.token == token.LEFT_PARENTHESIS: + idx := self.idx + parameterList := self.parseFunctionParameterList() + + node := &ast.FunctionLiteral{ + Function: idx, + ParameterList: parameterList, + } + self.parseFunctionBlock(node) + + return &ast.PropertyKeyed{ + Key: value, + Kind: ast.PropertyKindMethod, + Value: node, + } + case self.token == token.COMMA || self.token == token.RIGHT_BRACE || self.token == token.ASSIGN: // shorthand property + if tkn == token.IDENTIFIER || tkn == token.KEYWORD && literal == "let" { + var initializer ast.Expression + if self.token == token.ASSIGN { + // allow the initializer syntax here in case the object literal + // needs to be reinterpreted as an assignment pattern, enforce later if it doesn't. + self.next() + initializer = self.parseAssignmentExpression() + } + return &ast.PropertyShort{ + Name: ast.Identifier{ + Name: literal, + Idx: value.Idx0(), + }, + Initializer: initializer, + } + } + case literal == "get" && self.token != token.COLON: + idx := self.idx + _, value, _ := self.parseObjectPropertyKey() + idx1 := self.idx + parameterList := self.parseFunctionParameterList() + if len(parameterList.List) > 0 || parameterList.Rest != nil { + self.error(idx1, "Getter must not have any formal parameters.") + } + node := &ast.FunctionLiteral{ + Function: idx, + ParameterList: parameterList, + } + self.parseFunctionBlock(node) + return &ast.PropertyKeyed{ + Key: value, + Kind: ast.PropertyKindGet, + Value: node, + } + case literal == "set" && self.token != token.COLON: + idx := self.idx + _, value, _ := self.parseObjectPropertyKey() + parameterList := self.parseFunctionParameterList() + + node := &ast.FunctionLiteral{ + Function: idx, + ParameterList: parameterList, + } + + self.parseFunctionBlock(node) + + return &ast.PropertyKeyed{ + Key: value, + Kind: ast.PropertyKindSet, + Value: node, + } + } + } + + self.expect(token.COLON) + + return &ast.PropertyKeyed{ + Key: value, + Kind: ast.PropertyKindValue, + Value: self.parseAssignmentExpression(), + } +} + +func (self *_parser) parseObjectLiteral() *ast.ObjectLiteral { + var value []ast.Property + idx0 := self.expect(token.LEFT_BRACE) + for self.token != token.RIGHT_BRACE && self.token != token.EOF { + property := self.parseObjectProperty() + value = append(value, property) + if self.token != token.RIGHT_BRACE { + self.expect(token.COMMA) + } else { + break + } + } + idx1 := self.expect(token.RIGHT_BRACE) + + return &ast.ObjectLiteral{ + LeftBrace: idx0, + RightBrace: idx1, + Value: value, + } +} + +func (self *_parser) parseArrayLiteral() *ast.ArrayLiteral { + + idx0 := self.expect(token.LEFT_BRACKET) + var value []ast.Expression + for self.token != token.RIGHT_BRACKET && self.token != token.EOF { + if self.token == token.COMMA { + self.next() + value = append(value, nil) + continue + } + if self.token == token.ELLIPSIS { + self.next() + value = append(value, &ast.SpreadElement{ + Expression: self.parseAssignmentExpression(), + }) + } else { + value = append(value, self.parseAssignmentExpression()) + } + if self.token != token.RIGHT_BRACKET { + self.expect(token.COMMA) + } + } + idx1 := self.expect(token.RIGHT_BRACKET) + + return &ast.ArrayLiteral{ + LeftBracket: idx0, + RightBracket: idx1, + Value: value, + } +} + +func (self *_parser) parseArgumentList() (argumentList []ast.Expression, idx0, idx1 file.Idx) { + idx0 = self.expect(token.LEFT_PARENTHESIS) + if self.token != token.RIGHT_PARENTHESIS { + for { + var item ast.Expression + if self.token == token.ELLIPSIS { + self.next() + item = &ast.SpreadElement{ + Expression: self.parseAssignmentExpression(), + } + } else { + item = self.parseAssignmentExpression() + } + argumentList = append(argumentList, item) + if self.token != token.COMMA { + break + } + self.next() + } + } + idx1 = self.expect(token.RIGHT_PARENTHESIS) + return +} + +func (self *_parser) parseCallExpression(left ast.Expression) ast.Expression { + argumentList, idx0, idx1 := self.parseArgumentList() + return &ast.CallExpression{ + Callee: left, + LeftParenthesis: idx0, + ArgumentList: argumentList, + RightParenthesis: idx1, + } +} + +func (self *_parser) parseDotMember(left ast.Expression) ast.Expression { + period := self.expect(token.PERIOD) + + literal := self.parsedLiteral + idx := self.idx + + if self.token != token.IDENTIFIER && !isId(self.token) { + self.expect(token.IDENTIFIER) + self.nextStatement() + return &ast.BadExpression{From: period, To: self.idx} + } + + self.next() + + return &ast.DotExpression{ + Left: left, + Identifier: ast.Identifier{ + Idx: idx, + Name: literal, + }, + } +} + +func (self *_parser) parseBracketMember(left ast.Expression) ast.Expression { + idx0 := self.expect(token.LEFT_BRACKET) + member := self.parseExpression() + idx1 := self.expect(token.RIGHT_BRACKET) + return &ast.BracketExpression{ + LeftBracket: idx0, + Left: left, + Member: member, + RightBracket: idx1, + } +} + +func (self *_parser) parseNewExpression() ast.Expression { + idx := self.expect(token.NEW) + if self.token == token.PERIOD { + self.next() + prop := self.parseIdentifier() + if prop.Name == "target" { + if !self.scope.inFunction { + self.error(idx, "new.target expression is not allowed here") + } + return &ast.MetaProperty{ + Meta: &ast.Identifier{ + Name: unistring.String(token.NEW.String()), + Idx: idx, + }, + Property: prop, + } + } + self.errorUnexpectedToken(token.IDENTIFIER) + } + callee := self.parseLeftHandSideExpression() + node := &ast.NewExpression{ + New: idx, + Callee: callee, + } + if self.token == token.LEFT_PARENTHESIS { + argumentList, idx0, idx1 := self.parseArgumentList() + node.ArgumentList = argumentList + node.LeftParenthesis = idx0 + node.RightParenthesis = idx1 + } + return node +} + +func (self *_parser) parseLeftHandSideExpression() ast.Expression { + + var left ast.Expression + if self.token == token.NEW { + left = self.parseNewExpression() + } else { + left = self.parsePrimaryExpression() + } + + for { + if self.token == token.PERIOD { + left = self.parseDotMember(left) + } else if self.token == token.LEFT_BRACKET { + left = self.parseBracketMember(left) + } else { + break + } + } + + return left +} + +func (self *_parser) parseLeftHandSideExpressionAllowCall() ast.Expression { + + allowIn := self.scope.allowIn + self.scope.allowIn = true + defer func() { + self.scope.allowIn = allowIn + }() + + var left ast.Expression + if self.token == token.NEW { + left = self.parseNewExpression() + } else { + left = self.parsePrimaryExpression() + } + + for { + if self.token == token.PERIOD { + left = self.parseDotMember(left) + } else if self.token == token.LEFT_BRACKET { + left = self.parseBracketMember(left) + } else if self.token == token.LEFT_PARENTHESIS { + left = self.parseCallExpression(left) + } else { + break + } + } + + return left +} + +func (self *_parser) parsePostfixExpression() ast.Expression { + operand := self.parseLeftHandSideExpressionAllowCall() + + switch self.token { + case token.INCREMENT, token.DECREMENT: + // Make sure there is no line terminator here + if self.implicitSemicolon { + break + } + tkn := self.token + idx := self.idx + self.next() + switch operand.(type) { + case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression: + default: + self.error(idx, "Invalid left-hand side in assignment") + self.nextStatement() + return &ast.BadExpression{From: idx, To: self.idx} + } + return &ast.UnaryExpression{ + Operator: tkn, + Idx: idx, + Operand: operand, + Postfix: true, + } + } + + return operand +} + +func (self *_parser) parseUnaryExpression() ast.Expression { + + switch self.token { + case token.PLUS, token.MINUS, token.NOT, token.BITWISE_NOT: + fallthrough + case token.DELETE, token.VOID, token.TYPEOF: + tkn := self.token + idx := self.idx + self.next() + return &ast.UnaryExpression{ + Operator: tkn, + Idx: idx, + Operand: self.parseUnaryExpression(), + } + case token.INCREMENT, token.DECREMENT: + tkn := self.token + idx := self.idx + self.next() + operand := self.parseUnaryExpression() + switch operand.(type) { + case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression: + default: + self.error(idx, "Invalid left-hand side in assignment") + self.nextStatement() + return &ast.BadExpression{From: idx, To: self.idx} + } + return &ast.UnaryExpression{ + Operator: tkn, + Idx: idx, + Operand: operand, + } + } + + return self.parsePostfixExpression() +} + +func (self *_parser) parseMultiplicativeExpression() ast.Expression { + next := self.parseUnaryExpression + left := next() + + for self.token == token.MULTIPLY || self.token == token.SLASH || + self.token == token.REMAINDER { + tkn := self.token + self.next() + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseAdditiveExpression() ast.Expression { + next := self.parseMultiplicativeExpression + left := next() + + for self.token == token.PLUS || self.token == token.MINUS { + tkn := self.token + self.next() + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseShiftExpression() ast.Expression { + next := self.parseAdditiveExpression + left := next() + + for self.token == token.SHIFT_LEFT || self.token == token.SHIFT_RIGHT || + self.token == token.UNSIGNED_SHIFT_RIGHT { + tkn := self.token + self.next() + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseRelationalExpression() ast.Expression { + next := self.parseShiftExpression + left := next() + + allowIn := self.scope.allowIn + self.scope.allowIn = true + defer func() { + self.scope.allowIn = allowIn + }() + + switch self.token { + case token.LESS, token.LESS_OR_EQUAL, token.GREATER, token.GREATER_OR_EQUAL: + tkn := self.token + self.next() + return &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: self.parseRelationalExpression(), + Comparison: true, + } + case token.INSTANCEOF: + tkn := self.token + self.next() + return &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: self.parseRelationalExpression(), + } + case token.IN: + if !allowIn { + return left + } + tkn := self.token + self.next() + return &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: self.parseRelationalExpression(), + } + } + + return left +} + +func (self *_parser) parseEqualityExpression() ast.Expression { + next := self.parseRelationalExpression + left := next() + + for self.token == token.EQUAL || self.token == token.NOT_EQUAL || + self.token == token.STRICT_EQUAL || self.token == token.STRICT_NOT_EQUAL { + tkn := self.token + self.next() + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + Comparison: true, + } + } + + return left +} + +func (self *_parser) parseBitwiseAndExpression() ast.Expression { + next := self.parseEqualityExpression + left := next() + + for self.token == token.AND { + tkn := self.token + self.next() + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseBitwiseExclusiveOrExpression() ast.Expression { + next := self.parseBitwiseAndExpression + left := next() + + for self.token == token.EXCLUSIVE_OR { + tkn := self.token + self.next() + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseBitwiseOrExpression() ast.Expression { + next := self.parseBitwiseExclusiveOrExpression + left := next() + + for self.token == token.OR { + tkn := self.token + self.next() + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseLogicalAndExpression() ast.Expression { + next := self.parseBitwiseOrExpression + left := next() + + for self.token == token.LOGICAL_AND { + tkn := self.token + self.next() + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseLogicalOrExpression() ast.Expression { + next := self.parseLogicalAndExpression + left := next() + + for self.token == token.LOGICAL_OR { + tkn := self.token + self.next() + left = &ast.BinaryExpression{ + Operator: tkn, + Left: left, + Right: next(), + } + } + + return left +} + +func (self *_parser) parseConditionlExpression() ast.Expression { + left := self.parseLogicalOrExpression() + + if self.token == token.QUESTION_MARK { + self.next() + consequent := self.parseAssignmentExpression() + self.expect(token.COLON) + return &ast.ConditionalExpression{ + Test: left, + Consequent: consequent, + Alternate: self.parseAssignmentExpression(), + } + } + + return left +} + +func (self *_parser) parseAssignmentExpression() ast.Expression { + parenthesis := false + if self.token == token.LET { + self.token = token.IDENTIFIER + } else if self.token == token.LEFT_PARENTHESIS { + parenthesis = true + } + left := self.parseConditionlExpression() + var operator token.Token + switch self.token { + case token.ASSIGN: + operator = self.token + case token.ADD_ASSIGN: + operator = token.PLUS + case token.SUBTRACT_ASSIGN: + operator = token.MINUS + case token.MULTIPLY_ASSIGN: + operator = token.MULTIPLY + case token.QUOTIENT_ASSIGN: + operator = token.SLASH + case token.REMAINDER_ASSIGN: + operator = token.REMAINDER + case token.AND_ASSIGN: + operator = token.AND + case token.OR_ASSIGN: + operator = token.OR + case token.EXCLUSIVE_OR_ASSIGN: + operator = token.EXCLUSIVE_OR + case token.SHIFT_LEFT_ASSIGN: + operator = token.SHIFT_LEFT + case token.SHIFT_RIGHT_ASSIGN: + operator = token.SHIFT_RIGHT + case token.UNSIGNED_SHIFT_RIGHT_ASSIGN: + operator = token.UNSIGNED_SHIFT_RIGHT + } + + if operator != 0 { + idx := self.idx + self.next() + ok := false + switch l := left.(type) { + case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression: + ok = true + case *ast.ArrayLiteral: + if !parenthesis && operator == token.ASSIGN { + left = self.reinterpretAsArrayAssignmentPattern(l) + ok = true + } + case *ast.ObjectLiteral: + if !parenthesis && operator == token.ASSIGN { + left = self.reinterpretAsObjectAssignmentPattern(l) + ok = true + } + } + if ok { + return &ast.AssignExpression{ + Left: left, + Operator: operator, + Right: self.parseAssignmentExpression(), + } + } + self.error(left.Idx0(), "Invalid left-hand side in assignment") + self.nextStatement() + return &ast.BadExpression{From: idx, To: self.idx} + } + + return left +} + +func (self *_parser) parseExpression() ast.Expression { + if self.token == token.LET { + self.token = token.IDENTIFIER + } + next := self.parseAssignmentExpression + left := next() + + if self.token == token.COMMA { + sequence := []ast.Expression{left} + for { + if self.token != token.COMMA { + break + } + self.next() + sequence = append(sequence, next()) + } + return &ast.SequenceExpression{ + Sequence: sequence, + } + } + + return left +} + +func (self *_parser) checkComma(from, to file.Idx) { + if pos := strings.IndexByte(self.str[int(from)-self.base:int(to)-self.base], ','); pos >= 0 { + self.error(from+file.Idx(pos), "Comma is not allowed here") + } +} + +func (self *_parser) reinterpretAsArrayAssignmentPattern(left *ast.ArrayLiteral) *ast.ArrayPattern { + value := left.Value + var rest ast.Expression + for i, item := range value { + if spread, ok := item.(*ast.SpreadElement); ok { + if i != len(value)-1 { + self.error(item.Idx0(), "Rest element must be last element") + return nil + } + self.checkComma(spread.Expression.Idx1(), left.RightBracket) + rest = self.reinterpretAsDestructAssignTarget(spread.Expression) + value = value[:len(value)-1] + } else { + value[i] = self.reinterpretAsAssignmentElement(item) + } + } + return &ast.ArrayPattern{ + LeftBracket: left.LeftBracket, + RightBracket: left.RightBracket, + Elements: value, + Rest: rest, + } +} + +func (self *_parser) reinterpretArrayAssignPatternAsBinding(pattern *ast.ArrayPattern) *ast.ArrayPattern { + for i, item := range pattern.Elements { + pattern.Elements[i] = self.reinterpretAsDestructBindingTarget(item) + } + if pattern.Rest != nil { + pattern.Rest = self.reinterpretAsDestructBindingTarget(pattern.Rest) + } + return pattern +} + +func (self *_parser) reinterpretAsArrayBindingPattern(left *ast.ArrayLiteral) *ast.ArrayPattern { + value := left.Value + var rest ast.Expression + for i, item := range value { + if spread, ok := item.(*ast.SpreadElement); ok { + if i != len(value)-1 { + self.error(item.Idx0(), "Rest element must be last element") + return nil + } + self.checkComma(spread.Expression.Idx1(), left.RightBracket) + rest = self.reinterpretAsDestructBindingTarget(spread.Expression) + value = value[:len(value)-1] + } else { + value[i] = self.reinterpretAsBindingElement(item) + } + } + return &ast.ArrayPattern{ + LeftBracket: left.LeftBracket, + RightBracket: left.RightBracket, + Elements: value, + Rest: rest, + } +} + +func (self *_parser) parseArrayBindingPattern() *ast.ArrayPattern { + return self.reinterpretAsArrayBindingPattern(self.parseArrayLiteral()) +} + +func (self *_parser) parseObjectBindingPattern() *ast.ObjectPattern { + return self.reinterpretAsObjectBindingPattern(self.parseObjectLiteral()) +} + +func (self *_parser) reinterpretArrayObjectPatternAsBinding(pattern *ast.ObjectPattern) *ast.ObjectPattern { + for _, prop := range pattern.Properties { + if keyed, ok := prop.(*ast.PropertyKeyed); ok { + keyed.Value = self.reinterpretAsBindingElement(keyed.Value) + } + } + if pattern.Rest != nil { + pattern.Rest = self.reinterpretAsBindingRestElement(pattern.Rest) + } + return pattern +} + +func (self *_parser) reinterpretAsObjectBindingPattern(expr *ast.ObjectLiteral) *ast.ObjectPattern { + var rest ast.Expression + value := expr.Value + for i, prop := range value { + ok := false + switch prop := prop.(type) { + case *ast.PropertyKeyed: + if prop.Kind == ast.PropertyKindValue { + prop.Value = self.reinterpretAsBindingElement(prop.Value) + ok = true + } + case *ast.PropertyShort: + ok = true + case *ast.SpreadElement: + if i != len(expr.Value)-1 { + self.error(prop.Idx0(), "Rest element must be last element") + return nil + } + // TODO make sure there is no trailing comma + rest = self.reinterpretAsBindingRestElement(prop.Expression) + value = value[:i] + ok = true + } + if !ok { + self.error(prop.Idx0(), "Invalid destructuring binding target") + return nil + } + } + return &ast.ObjectPattern{ + LeftBrace: expr.LeftBrace, + RightBrace: expr.RightBrace, + Properties: value, + Rest: rest, + } +} + +func (self *_parser) reinterpretAsObjectAssignmentPattern(l *ast.ObjectLiteral) *ast.ObjectPattern { + var rest ast.Expression + value := l.Value + for i, prop := range value { + ok := false + switch prop := prop.(type) { + case *ast.PropertyKeyed: + if prop.Kind == ast.PropertyKindValue { + prop.Value = self.reinterpretAsAssignmentElement(prop.Value) + ok = true + } + case *ast.PropertyShort: + ok = true + case *ast.SpreadElement: + if i != len(l.Value)-1 { + self.error(prop.Idx0(), "Rest element must be last element") + return nil + } + // TODO make sure there is no trailing comma + rest = prop.Expression + value = value[:i] + ok = true + } + if !ok { + self.error(prop.Idx0(), "Invalid destructuring assignment target") + return nil + } + } + return &ast.ObjectPattern{ + LeftBrace: l.LeftBrace, + RightBrace: l.RightBrace, + Properties: value, + Rest: rest, + } +} + +func (self *_parser) reinterpretAsAssignmentElement(expr ast.Expression) ast.Expression { + switch expr := expr.(type) { + case *ast.AssignExpression: + if expr.Operator == token.ASSIGN { + expr.Left = self.reinterpretAsDestructAssignTarget(expr.Left) + return expr + } else { + self.error(expr.Idx0(), "Invalid destructuring assignment target") + return &ast.BadExpression{From: expr.Idx0(), To: expr.Idx1()} + } + default: + return self.reinterpretAsDestructAssignTarget(expr) + } +} + +func (self *_parser) reinterpretAsBindingElement(expr ast.Expression) ast.Expression { + switch expr := expr.(type) { + case *ast.AssignExpression: + if expr.Operator == token.ASSIGN { + expr.Left = self.reinterpretAsDestructBindingTarget(expr.Left) + return expr + } else { + self.error(expr.Idx0(), "Invalid destructuring assignment target") + return &ast.BadExpression{From: expr.Idx0(), To: expr.Idx1()} + } + default: + return self.reinterpretAsDestructBindingTarget(expr) + } +} + +func (self *_parser) reinterpretAsDestructAssignTarget(item ast.Expression) ast.Expression { + switch item := item.(type) { + case nil: + return nil + case *ast.ArrayLiteral: + return self.reinterpretAsArrayAssignmentPattern(item) + case *ast.ObjectLiteral: + return self.reinterpretAsObjectAssignmentPattern(item) + case ast.Pattern, *ast.Identifier, *ast.DotExpression, *ast.BracketExpression: + return item + } + self.error(item.Idx0(), "Invalid destructuring assignment target") + return &ast.BadExpression{From: item.Idx0(), To: item.Idx1()} +} + +func (self *_parser) reinterpretAsDestructBindingTarget(item ast.Expression) ast.BindingTarget { + switch item := item.(type) { + case nil: + return nil + case *ast.ArrayPattern: + return self.reinterpretArrayAssignPatternAsBinding(item) + case *ast.ObjectPattern: + return self.reinterpretArrayObjectPatternAsBinding(item) + case *ast.ArrayLiteral: + return self.reinterpretAsArrayBindingPattern(item) + case *ast.ObjectLiteral: + return self.reinterpretAsObjectBindingPattern(item) + case *ast.Identifier: + return item + } + self.error(item.Idx0(), "Invalid destructuring binding target") + return &ast.BadExpression{From: item.Idx0(), To: item.Idx1()} +} + +func (self *_parser) reinterpretAsBindingRestElement(expr ast.Expression) ast.Expression { + if _, ok := expr.(*ast.Identifier); ok { + return expr + } + self.error(expr.Idx0(), "Invalid binding rest") + return &ast.BadExpression{From: expr.Idx0(), To: expr.Idx1()} +} diff --git a/vendor/github.com/dop251/goja/parser/lexer.go b/vendor/github.com/dop251/goja/parser/lexer.go new file mode 100644 index 0000000000..ab6fed7f40 --- /dev/null +++ b/vendor/github.com/dop251/goja/parser/lexer.go @@ -0,0 +1,961 @@ +package parser + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "github.com/dop251/goja/file" + "github.com/dop251/goja/token" + "github.com/dop251/goja/unistring" +) + +var matchIdentifier = regexp.MustCompile(`^[$_\p{L}][$_\p{L}\d}]*$`) + +func isDecimalDigit(chr rune) bool { + return '0' <= chr && chr <= '9' +} + +func IsIdentifier(s string) bool { + return matchIdentifier.MatchString(s) +} + +func digitValue(chr rune) int { + switch { + case '0' <= chr && chr <= '9': + return int(chr - '0') + case 'a' <= chr && chr <= 'f': + return int(chr - 'a' + 10) + case 'A' <= chr && chr <= 'F': + return int(chr - 'A' + 10) + } + return 16 // Larger than any legal digit value +} + +func isDigit(chr rune, base int) bool { + return digitValue(chr) < base +} + +func isIdentifierStart(chr rune) bool { + return chr == '$' || chr == '_' || chr == '\\' || + 'a' <= chr && chr <= 'z' || 'A' <= chr && chr <= 'Z' || + chr >= utf8.RuneSelf && unicode.IsLetter(chr) +} + +func isIdentifierPart(chr rune) bool { + return chr == '$' || chr == '_' || chr == '\\' || + 'a' <= chr && chr <= 'z' || 'A' <= chr && chr <= 'Z' || + '0' <= chr && chr <= '9' || + chr >= utf8.RuneSelf && (unicode.IsLetter(chr) || unicode.IsDigit(chr)) +} + +func (self *_parser) scanIdentifier() (string, unistring.String, bool, error) { + offset := self.chrOffset + hasEscape := false + isUnicode := false + length := 0 + for isIdentifierPart(self.chr) { + r := self.chr + length++ + if r == '\\' { + hasEscape = true + distance := self.chrOffset - offset + self.read() + if self.chr != 'u' { + return "", "", false, fmt.Errorf("Invalid identifier escape character: %c (%s)", self.chr, string(self.chr)) + } + var value rune + for j := 0; j < 4; j++ { + self.read() + decimal, ok := hex2decimal(byte(self.chr)) + if !ok { + return "", "", false, fmt.Errorf("Invalid identifier escape character: %c (%s)", self.chr, string(self.chr)) + } + value = value<<4 | decimal + } + if value == '\\' { + return "", "", false, fmt.Errorf("Invalid identifier escape value: %c (%s)", value, string(value)) + } else if distance == 0 { + if !isIdentifierStart(value) { + return "", "", false, fmt.Errorf("Invalid identifier escape value: %c (%s)", value, string(value)) + } + } else if distance > 0 { + if !isIdentifierPart(value) { + return "", "", false, fmt.Errorf("Invalid identifier escape value: %c (%s)", value, string(value)) + } + } + r = value + } + if r >= utf8.RuneSelf { + isUnicode = true + if r > 0xFFFF { + length++ + } + } + self.read() + } + + literal := self.str[offset:self.chrOffset] + var parsed unistring.String + if hasEscape || isUnicode { + var err error + parsed, err = parseStringLiteral1(literal, length, isUnicode) + if err != nil { + return "", "", false, err + } + } else { + parsed = unistring.String(literal) + } + + return literal, parsed, hasEscape, nil +} + +// 7.2 +func isLineWhiteSpace(chr rune) bool { + switch chr { + case '\u0009', '\u000b', '\u000c', '\u0020', '\u00a0', '\ufeff': + return true + case '\u000a', '\u000d', '\u2028', '\u2029': + return false + case '\u0085': + return false + } + return unicode.IsSpace(chr) +} + +// 7.3 +func isLineTerminator(chr rune) bool { + switch chr { + case '\u000a', '\u000d', '\u2028', '\u2029': + return true + } + return false +} + +func isId(tkn token.Token) bool { + switch tkn { + case token.KEYWORD, + token.BOOLEAN, + token.NULL, + token.THIS, + token.IF, + token.IN, + token.OF, + token.DO, + + token.VAR, + token.LET, + token.FOR, + token.NEW, + token.TRY, + + token.ELSE, + token.CASE, + token.VOID, + token.WITH, + + token.CONST, + token.WHILE, + token.BREAK, + token.CATCH, + token.THROW, + + token.RETURN, + token.TYPEOF, + token.DELETE, + token.SWITCH, + + token.DEFAULT, + token.FINALLY, + + token.FUNCTION, + token.CONTINUE, + token.DEBUGGER, + + token.INSTANCEOF: + + return true + } + return false +} + +func (self *_parser) peek() token.Token { + implicitSemicolon, insertSemicolon, chr, chrOffset, offset := self.implicitSemicolon, self.insertSemicolon, self.chr, self.chrOffset, self.offset + tok, _, _, _ := self.scan() + self.implicitSemicolon, self.insertSemicolon, self.chr, self.chrOffset, self.offset = implicitSemicolon, insertSemicolon, chr, chrOffset, offset + return tok +} + +func (self *_parser) scan() (tkn token.Token, literal string, parsedLiteral unistring.String, idx file.Idx) { + + self.implicitSemicolon = false + + for { + self.skipWhiteSpace() + + idx = self.idxOf(self.chrOffset) + insertSemicolon := false + + switch chr := self.chr; { + case isIdentifierStart(chr): + var err error + var hasEscape bool + literal, parsedLiteral, hasEscape, err = self.scanIdentifier() + if err != nil { + tkn = token.ILLEGAL + break + } + if len(parsedLiteral) > 1 { + // Keywords are longer than 1 character, avoid lookup otherwise + var strict bool + tkn, strict = token.IsKeyword(string(parsedLiteral)) + + switch tkn { + + case 0: // Not a keyword + if parsedLiteral == "true" || parsedLiteral == "false" { + if hasEscape { + tkn = token.STRING + return + } + self.insertSemicolon = true + tkn = token.BOOLEAN + return + } else if parsedLiteral == "null" { + if hasEscape { + tkn = token.STRING + return + } + self.insertSemicolon = true + tkn = token.NULL + return + } + + case token.KEYWORD: + if hasEscape { + tkn = token.STRING + return + } + tkn = token.KEYWORD + if strict { + // TODO If strict and in strict mode, then this is not a break + break + } + return + + case + token.THIS, + token.BREAK, + token.THROW, // A newline after a throw is not allowed, but we need to detect it + token.RETURN, + token.CONTINUE, + token.DEBUGGER: + if hasEscape { + tkn = token.STRING + return + } + self.insertSemicolon = true + return + + default: + if hasEscape { + tkn = token.STRING + } + return + + } + } + self.insertSemicolon = true + tkn = token.IDENTIFIER + return + case '0' <= chr && chr <= '9': + self.insertSemicolon = true + tkn, literal = self.scanNumericLiteral(false) + return + default: + self.read() + switch chr { + case -1: + if self.insertSemicolon { + self.insertSemicolon = false + self.implicitSemicolon = true + } + tkn = token.EOF + case '\r', '\n', '\u2028', '\u2029': + self.insertSemicolon = false + self.implicitSemicolon = true + continue + case ':': + tkn = token.COLON + case '.': + if digitValue(self.chr) < 10 { + insertSemicolon = true + tkn, literal = self.scanNumericLiteral(true) + } else { + if self.chr == '.' { + self.read() + if self.chr == '.' { + self.read() + tkn = token.ELLIPSIS + } else { + tkn = token.ILLEGAL + } + } else { + tkn = token.PERIOD + } + } + case ',': + tkn = token.COMMA + case ';': + tkn = token.SEMICOLON + case '(': + tkn = token.LEFT_PARENTHESIS + case ')': + tkn = token.RIGHT_PARENTHESIS + insertSemicolon = true + case '[': + tkn = token.LEFT_BRACKET + case ']': + tkn = token.RIGHT_BRACKET + insertSemicolon = true + case '{': + tkn = token.LEFT_BRACE + case '}': + tkn = token.RIGHT_BRACE + insertSemicolon = true + case '+': + tkn = self.switch3(token.PLUS, token.ADD_ASSIGN, '+', token.INCREMENT) + if tkn == token.INCREMENT { + insertSemicolon = true + } + case '-': + tkn = self.switch3(token.MINUS, token.SUBTRACT_ASSIGN, '-', token.DECREMENT) + if tkn == token.DECREMENT { + insertSemicolon = true + } + case '*': + tkn = self.switch2(token.MULTIPLY, token.MULTIPLY_ASSIGN) + case '/': + if self.chr == '/' { + self.skipSingleLineComment() + continue + } else if self.chr == '*' { + self.skipMultiLineComment() + continue + } else { + // Could be division, could be RegExp literal + tkn = self.switch2(token.SLASH, token.QUOTIENT_ASSIGN) + insertSemicolon = true + } + case '%': + tkn = self.switch2(token.REMAINDER, token.REMAINDER_ASSIGN) + case '^': + tkn = self.switch2(token.EXCLUSIVE_OR, token.EXCLUSIVE_OR_ASSIGN) + case '<': + tkn = self.switch4(token.LESS, token.LESS_OR_EQUAL, '<', token.SHIFT_LEFT, token.SHIFT_LEFT_ASSIGN) + case '>': + tkn = self.switch6(token.GREATER, token.GREATER_OR_EQUAL, '>', token.SHIFT_RIGHT, token.SHIFT_RIGHT_ASSIGN, '>', token.UNSIGNED_SHIFT_RIGHT, token.UNSIGNED_SHIFT_RIGHT_ASSIGN) + case '=': + if self.chr == '>' { + self.read() + tkn = token.ARROW + } else { + tkn = self.switch2(token.ASSIGN, token.EQUAL) + if tkn == token.EQUAL && self.chr == '=' { + self.read() + tkn = token.STRICT_EQUAL + } + } + case '!': + tkn = self.switch2(token.NOT, token.NOT_EQUAL) + if tkn == token.NOT_EQUAL && self.chr == '=' { + self.read() + tkn = token.STRICT_NOT_EQUAL + } + case '&': + tkn = self.switch3(token.AND, token.AND_ASSIGN, '&', token.LOGICAL_AND) + case '|': + tkn = self.switch3(token.OR, token.OR_ASSIGN, '|', token.LOGICAL_OR) + case '~': + tkn = token.BITWISE_NOT + case '?': + tkn = token.QUESTION_MARK + case '"', '\'': + insertSemicolon = true + tkn = token.STRING + var err error + literal, parsedLiteral, err = self.scanString(self.chrOffset-1, true) + if err != nil { + tkn = token.ILLEGAL + } + default: + self.errorUnexpected(idx, chr) + tkn = token.ILLEGAL + } + } + self.insertSemicolon = insertSemicolon + return + } +} + +func (self *_parser) switch2(tkn0, tkn1 token.Token) token.Token { + if self.chr == '=' { + self.read() + return tkn1 + } + return tkn0 +} + +func (self *_parser) switch3(tkn0, tkn1 token.Token, chr2 rune, tkn2 token.Token) token.Token { + if self.chr == '=' { + self.read() + return tkn1 + } + if self.chr == chr2 { + self.read() + return tkn2 + } + return tkn0 +} + +func (self *_parser) switch4(tkn0, tkn1 token.Token, chr2 rune, tkn2, tkn3 token.Token) token.Token { + if self.chr == '=' { + self.read() + return tkn1 + } + if self.chr == chr2 { + self.read() + if self.chr == '=' { + self.read() + return tkn3 + } + return tkn2 + } + return tkn0 +} + +func (self *_parser) switch6(tkn0, tkn1 token.Token, chr2 rune, tkn2, tkn3 token.Token, chr3 rune, tkn4, tkn5 token.Token) token.Token { + if self.chr == '=' { + self.read() + return tkn1 + } + if self.chr == chr2 { + self.read() + if self.chr == '=' { + self.read() + return tkn3 + } + if self.chr == chr3 { + self.read() + if self.chr == '=' { + self.read() + return tkn5 + } + return tkn4 + } + return tkn2 + } + return tkn0 +} + +func (self *_parser) _peek() rune { + if self.offset < self.length { + return rune(self.str[self.offset]) + } + return -1 +} + +func (self *_parser) read() { + if self.offset < self.length { + self.chrOffset = self.offset + chr, width := rune(self.str[self.offset]), 1 + if chr >= utf8.RuneSelf { // !ASCII + chr, width = utf8.DecodeRuneInString(self.str[self.offset:]) + if chr == utf8.RuneError && width == 1 { + self.error(self.chrOffset, "Invalid UTF-8 character") + } + } + self.offset += width + self.chr = chr + } else { + self.chrOffset = self.length + self.chr = -1 // EOF + } +} + +func (self *_parser) skipSingleLineComment() { + for self.chr != -1 { + self.read() + if isLineTerminator(self.chr) { + return + } + } +} + +func (self *_parser) skipMultiLineComment() { + self.read() + for self.chr >= 0 { + chr := self.chr + self.read() + if chr == '*' && self.chr == '/' { + self.read() + return + } + } + + self.errorUnexpected(0, self.chr) +} + +func (self *_parser) skipWhiteSpace() { + for { + switch self.chr { + case ' ', '\t', '\f', '\v', '\u00a0', '\ufeff': + self.read() + continue + case '\r': + if self._peek() == '\n' { + self.read() + } + fallthrough + case '\u2028', '\u2029', '\n': + if self.insertSemicolon { + return + } + self.read() + continue + } + if self.chr >= utf8.RuneSelf { + if unicode.IsSpace(self.chr) { + self.read() + continue + } + } + break + } +} + +func (self *_parser) skipLineWhiteSpace() { + for isLineWhiteSpace(self.chr) { + self.read() + } +} + +func (self *_parser) scanMantissa(base int) { + for digitValue(self.chr) < base { + self.read() + } +} + +func (self *_parser) scanEscape(quote rune) (int, bool) { + + var length, base uint32 + chr := self.chr + switch chr { + case '0', '1', '2', '3', '4', '5', '6', '7': + // Octal: + length, base = 3, 8 + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"', '\'': + self.read() + return 1, false + case '\r': + self.read() + if self.chr == '\n' { + self.read() + return 2, false + } + return 1, false + case '\n': + self.read() + return 1, false + case '\u2028', '\u2029': + self.read() + return 1, true + case 'x': + self.read() + length, base = 2, 16 + case 'u': + self.read() + length, base = 4, 16 + default: + self.read() // Always make progress + } + + if length > 0 { + var value uint32 + for ; length > 0 && self.chr != quote && self.chr >= 0; length-- { + digit := uint32(digitValue(self.chr)) + if digit >= base { + break + } + value = value*base + digit + self.read() + } + chr = rune(value) + } + if chr >= utf8.RuneSelf { + if chr > 0xFFFF { + return 2, true + } + return 1, true + } + return 1, false +} + +func (self *_parser) scanString(offset int, parse bool) (literal string, parsed unistring.String, err error) { + // " ' / + quote := rune(self.str[offset]) + length := 0 + isUnicode := false + for self.chr != quote { + chr := self.chr + if chr == '\n' || chr == '\r' || chr == '\u2028' || chr == '\u2029' || chr < 0 { + goto newline + } + self.read() + if chr == '\\' { + if self.chr == '\n' || self.chr == '\r' || self.chr == '\u2028' || self.chr == '\u2029' || self.chr < 0 { + if quote == '/' { + goto newline + } + self.scanNewline() + } else { + l, u := self.scanEscape(quote) + length += l + if u { + isUnicode = true + } + } + continue + } else if chr == '[' && quote == '/' { + // Allow a slash (/) in a bracket character class ([...]) + // TODO Fix this, this is hacky... + quote = -1 + } else if chr == ']' && quote == -1 { + quote = '/' + } + if chr >= utf8.RuneSelf { + isUnicode = true + if chr > 0xFFFF { + length++ + } + } + length++ + } + + // " ' / + self.read() + literal = self.str[offset:self.chrOffset] + if parse { + parsed, err = parseStringLiteral1(literal[1:len(literal)-1], length, isUnicode) + } + return + +newline: + self.scanNewline() + errStr := "String not terminated" + if quote == '/' { + errStr = "Invalid regular expression: missing /" + self.error(self.idxOf(offset), errStr) + } + return "", "", errors.New(errStr) +} + +func (self *_parser) scanNewline() { + if self.chr == '\r' { + self.read() + if self.chr != '\n' { + return + } + } + self.read() +} + +func hex2decimal(chr byte) (value rune, ok bool) { + { + chr := rune(chr) + switch { + case '0' <= chr && chr <= '9': + return chr - '0', true + case 'a' <= chr && chr <= 'f': + return chr - 'a' + 10, true + case 'A' <= chr && chr <= 'F': + return chr - 'A' + 10, true + } + return + } +} + +func parseNumberLiteral(literal string) (value interface{}, err error) { + // TODO Is Uint okay? What about -MAX_UINT + value, err = strconv.ParseInt(literal, 0, 64) + if err == nil { + return + } + + parseIntErr := err // Save this first error, just in case + + value, err = strconv.ParseFloat(literal, 64) + if err == nil { + return + } else if err.(*strconv.NumError).Err == strconv.ErrRange { + // Infinity, etc. + return value, nil + } + + err = parseIntErr + + if err.(*strconv.NumError).Err == strconv.ErrRange { + if len(literal) > 2 && literal[0] == '0' && (literal[1] == 'X' || literal[1] == 'x') { + // Could just be a very large number (e.g. 0x8000000000000000) + var value float64 + literal = literal[2:] + for _, chr := range literal { + digit := digitValue(chr) + if digit >= 16 { + goto error + } + value = value*16 + float64(digit) + } + return value, nil + } + } + +error: + return nil, errors.New("Illegal numeric literal") +} + +func parseStringLiteral1(literal string, length int, unicode bool) (unistring.String, error) { + var sb strings.Builder + var chars []uint16 + if unicode { + chars = make([]uint16, 1, length+1) + chars[0] = unistring.BOM + } else { + sb.Grow(length) + } + str := literal + for len(str) > 0 { + switch chr := str[0]; { + // We do not explicitly handle the case of the quote + // value, which can be: " ' / + // This assumes we're already passed a partially well-formed literal + case chr >= utf8.RuneSelf: + chr, size := utf8.DecodeRuneInString(str) + if chr <= 0xFFFF { + chars = append(chars, uint16(chr)) + } else { + first, second := utf16.EncodeRune(chr) + chars = append(chars, uint16(first), uint16(second)) + } + str = str[size:] + continue + case chr != '\\': + if unicode { + chars = append(chars, uint16(chr)) + } else { + sb.WriteByte(chr) + } + str = str[1:] + continue + } + + if len(str) <= 1 { + panic("len(str) <= 1") + } + chr := str[1] + var value rune + if chr >= utf8.RuneSelf { + str = str[1:] + var size int + value, size = utf8.DecodeRuneInString(str) + str = str[size:] // \ + + if value == '\u2028' || value == '\u2029' { + continue + } + } else { + str = str[2:] // \ + switch chr { + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u': + size := 0 + switch chr { + case 'x': + size = 2 + case 'u': + size = 4 + } + if len(str) < size { + return "", fmt.Errorf("invalid escape: \\%s: len(%q) != %d", string(chr), str, size) + } + for j := 0; j < size; j++ { + decimal, ok := hex2decimal(str[j]) + if !ok { + return "", fmt.Errorf("invalid escape: \\%s: %q", string(chr), str[:size]) + } + value = value<<4 | decimal + } + str = str[size:] + if chr == 'x' { + break + } + if value > utf8.MaxRune { + panic("value > utf8.MaxRune") + } + case '0': + if len(str) == 0 || '0' > str[0] || str[0] > '7' { + value = 0 + break + } + fallthrough + case '1', '2', '3', '4', '5', '6', '7': + // TODO strict + value = rune(chr) - '0' + j := 0 + for ; j < 2; j++ { + if len(str) < j+1 { + break + } + chr := str[j] + if '0' > chr || chr > '7' { + break + } + decimal := rune(str[j]) - '0' + value = (value << 3) | decimal + } + str = str[j:] + case '\\': + value = '\\' + case '\'', '"': + value = rune(chr) + case '\r': + if len(str) > 0 { + if str[0] == '\n' { + str = str[1:] + } + } + fallthrough + case '\n': + continue + default: + value = rune(chr) + } + } + if unicode { + if value <= 0xFFFF { + chars = append(chars, uint16(value)) + } else { + first, second := utf16.EncodeRune(value) + chars = append(chars, uint16(first), uint16(second)) + } + } else { + if value >= utf8.RuneSelf { + return "", fmt.Errorf("Unexpected unicode character") + } + sb.WriteByte(byte(value)) + } + } + + if unicode { + if len(chars) != length+1 { + panic(fmt.Errorf("unexpected unicode length while parsing '%s'", literal)) + } + return unistring.FromUtf16(chars), nil + } + if sb.Len() != length { + panic(fmt.Errorf("unexpected length while parsing '%s'", literal)) + } + return unistring.String(sb.String()), nil +} + +func (self *_parser) scanNumericLiteral(decimalPoint bool) (token.Token, string) { + + offset := self.chrOffset + tkn := token.NUMBER + + if decimalPoint { + offset-- + self.scanMantissa(10) + goto exponent + } + + if self.chr == '0' { + offset := self.chrOffset + self.read() + if self.chr == 'x' || self.chr == 'X' { + // Hexadecimal + self.read() + if isDigit(self.chr, 16) { + self.read() + } else { + return token.ILLEGAL, self.str[offset:self.chrOffset] + } + self.scanMantissa(16) + + if self.chrOffset-offset <= 2 { + // Only "0x" or "0X" + self.error(0, "Illegal hexadecimal number") + } + + goto hexadecimal + } else if self.chr == '.' { + // Float + goto float + } else { + // Octal, Float + if self.chr == 'e' || self.chr == 'E' { + goto exponent + } + self.scanMantissa(8) + if self.chr == '8' || self.chr == '9' { + return token.ILLEGAL, self.str[offset:self.chrOffset] + } + goto octal + } + } + + self.scanMantissa(10) + +float: + if self.chr == '.' { + self.read() + self.scanMantissa(10) + } + +exponent: + if self.chr == 'e' || self.chr == 'E' { + self.read() + if self.chr == '-' || self.chr == '+' { + self.read() + } + if isDecimalDigit(self.chr) { + self.read() + self.scanMantissa(10) + } else { + return token.ILLEGAL, self.str[offset:self.chrOffset] + } + } + +hexadecimal: +octal: + if isIdentifierStart(self.chr) || isDecimalDigit(self.chr) { + return token.ILLEGAL, self.str[offset:self.chrOffset] + } + + return tkn, self.str[offset:self.chrOffset] +} diff --git a/vendor/github.com/dop251/goja/parser/parser.go b/vendor/github.com/dop251/goja/parser/parser.go new file mode 100644 index 0000000000..b1bc74bb19 --- /dev/null +++ b/vendor/github.com/dop251/goja/parser/parser.go @@ -0,0 +1,269 @@ +/* +Package parser implements a parser for JavaScript. + + import ( + "github.com/dop251/goja/parser" + ) + +Parse and return an AST + + filename := "" // A filename is optional + src := ` + // Sample xyzzy example + (function(){ + if (3.14159 > 0) { + console.log("Hello, World."); + return; + } + + var xyzzy = NaN; + console.log("Nothing happens."); + return xyzzy; + })(); + ` + + // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList + program, err := parser.ParseFile(nil, filename, src, 0) + +Warning + +The parser and AST interfaces are still works-in-progress (particularly where +node types are concerned) and may change in the future. + +*/ +package parser + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + + "github.com/dop251/goja/ast" + "github.com/dop251/goja/file" + "github.com/dop251/goja/token" + "github.com/dop251/goja/unistring" +) + +// A Mode value is a set of flags (or 0). They control optional parser functionality. +type Mode uint + +const ( + IgnoreRegExpErrors Mode = 1 << iota // Ignore RegExp compatibility errors (allow backtracking) +) + +type options struct { + disableSourceMaps bool + sourceMapLoader func(path string) ([]byte, error) +} + +// Option represents one of the options for the parser to use in the Parse methods. Currently supported are: +// WithDisableSourceMaps and WithSourceMapLoader. +type Option func(*options) + +// WithDisableSourceMaps is an option to disable source maps support. May save a bit of time when source maps +// are not in use. +func WithDisableSourceMaps(opts *options) { + opts.disableSourceMaps = true +} + +// WithSourceMapLoader is an option to set a custom source map loader. The loader will be given a path or a +// URL from the sourceMappingURL. If sourceMappingURL is not absolute it is resolved relatively to the name +// of the file being parsed. Any error returned by the loader will fail the parsing. +// Note that setting this to nil does not disable source map support, there is a default loader which reads +// from the filesystem. Use WithDisableSourceMaps to disable source map support. +func WithSourceMapLoader(loader func(path string) ([]byte, error)) Option { + return func(opts *options) { + opts.sourceMapLoader = loader + } +} + +type _parser struct { + str string + length int + base int + + chr rune // The current character + chrOffset int // The offset of current character + offset int // The offset after current character (may be greater than 1) + + idx file.Idx // The index of token + token token.Token // The token + literal string // The literal of the token, if any + parsedLiteral unistring.String + + scope *_scope + insertSemicolon bool // If we see a newline, then insert an implicit semicolon + implicitSemicolon bool // An implicit semicolon exists + + errors ErrorList + + recover struct { + // Scratch when trying to seek to the next statement, etc. + idx file.Idx + count int + } + + mode Mode + opts options + + file *file.File +} + +func _newParser(filename, src string, base int, opts ...Option) *_parser { + p := &_parser{ + chr: ' ', // This is set so we can start scanning by skipping whitespace + str: src, + length: len(src), + base: base, + file: file.NewFile(filename, src, base), + } + for _, opt := range opts { + opt(&p.opts) + } + return p +} + +func newParser(filename, src string) *_parser { + return _newParser(filename, src, 1) +} + +func ReadSource(filename string, src interface{}) ([]byte, error) { + if src != nil { + switch src := src.(type) { + case string: + return []byte(src), nil + case []byte: + return src, nil + case *bytes.Buffer: + if src != nil { + return src.Bytes(), nil + } + case io.Reader: + var bfr bytes.Buffer + if _, err := io.Copy(&bfr, src); err != nil { + return nil, err + } + return bfr.Bytes(), nil + } + return nil, errors.New("invalid source") + } + return ioutil.ReadFile(filename) +} + +// ParseFile parses the source code of a single JavaScript/ECMAScript source file and returns +// the corresponding ast.Program node. +// +// If fileSet == nil, ParseFile parses source without a FileSet. +// If fileSet != nil, ParseFile first adds filename and src to fileSet. +// +// The filename argument is optional and is used for labelling errors, etc. +// +// src may be a string, a byte slice, a bytes.Buffer, or an io.Reader, but it MUST always be in UTF-8. +// +// // Parse some JavaScript, yielding a *ast.Program and/or an ErrorList +// program, err := parser.ParseFile(nil, "", `if (abc > 1) {}`, 0) +// +func ParseFile(fileSet *file.FileSet, filename string, src interface{}, mode Mode, options ...Option) (*ast.Program, error) { + str, err := ReadSource(filename, src) + if err != nil { + return nil, err + } + { + str := string(str) + + base := 1 + if fileSet != nil { + base = fileSet.AddFile(filename, str) + } + + parser := _newParser(filename, str, base, options...) + parser.mode = mode + return parser.parse() + } +} + +// ParseFunction parses a given parameter list and body as a function and returns the +// corresponding ast.FunctionLiteral node. +// +// The parameter list, if any, should be a comma-separated list of identifiers. +// +func ParseFunction(parameterList, body string, options ...Option) (*ast.FunctionLiteral, error) { + + src := "(function(" + parameterList + ") {\n" + body + "\n})" + + parser := _newParser("", src, 1, options...) + program, err := parser.parse() + if err != nil { + return nil, err + } + + return program.Body[0].(*ast.ExpressionStatement).Expression.(*ast.FunctionLiteral), nil +} + +func (self *_parser) slice(idx0, idx1 file.Idx) string { + from := int(idx0) - self.base + to := int(idx1) - self.base + if from >= 0 && to <= len(self.str) { + return self.str[from:to] + } + + return "" +} + +func (self *_parser) parse() (*ast.Program, error) { + self.next() + program := self.parseProgram() + if false { + self.errors.Sort() + } + return program, self.errors.Err() +} + +func (self *_parser) next() { + self.token, self.literal, self.parsedLiteral, self.idx = self.scan() +} + +func (self *_parser) optionalSemicolon() { + if self.token == token.SEMICOLON { + self.next() + return + } + + if self.implicitSemicolon { + self.implicitSemicolon = false + return + } + + if self.token != token.EOF && self.token != token.RIGHT_BRACE { + self.expect(token.SEMICOLON) + } +} + +func (self *_parser) semicolon() { + if self.token != token.RIGHT_PARENTHESIS && self.token != token.RIGHT_BRACE { + if self.implicitSemicolon { + self.implicitSemicolon = false + return + } + + self.expect(token.SEMICOLON) + } +} + +func (self *_parser) idxOf(offset int) file.Idx { + return file.Idx(self.base + offset) +} + +func (self *_parser) expect(value token.Token) file.Idx { + idx := self.idx + if self.token != value { + self.errorUnexpectedToken(self.token) + } + self.next() + return idx +} + +func (self *_parser) position(idx file.Idx) file.Position { + return self.file.Position(int(idx) - self.base) +} diff --git a/vendor/github.com/dop251/goja/parser/regexp.go b/vendor/github.com/dop251/goja/parser/regexp.go new file mode 100644 index 0000000000..12f0cb8b3a --- /dev/null +++ b/vendor/github.com/dop251/goja/parser/regexp.go @@ -0,0 +1,447 @@ +package parser + +import ( + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +const ( + WhitespaceChars = " \f\n\r\t\v\u00a0\u1680\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u2028\u2029\u202f\u205f\u3000\ufeff" +) + +type regexpParseError struct { + offset int + err string +} + +type RegexpErrorIncompatible struct { + regexpParseError +} +type RegexpSyntaxError struct { + regexpParseError +} + +func (s regexpParseError) Error() string { + return s.err +} + +type _RegExp_parser struct { + str string + length int + + chr rune // The current character + chrOffset int // The offset of current character + offset int // The offset after current character (may be greater than 1) + + err error + + goRegexp strings.Builder + passOffset int +} + +// TransformRegExp transforms a JavaScript pattern into a Go "regexp" pattern. +// +// re2 (Go) cannot do backtracking, so the presence of a lookahead (?=) (?!) or +// backreference (\1, \2, ...) will cause an error. +// +// re2 (Go) has a different definition for \s: [\t\n\f\r ]. +// The JavaScript definition, on the other hand, also includes \v, Unicode "Separator, Space", etc. +// +// If the pattern is valid, but incompatible (contains a lookahead or backreference), +// then this function returns an empty string an error of type RegexpErrorIncompatible. +// +// If the pattern is invalid (not valid even in JavaScript), then this function +// returns an empty string and a generic error. +func TransformRegExp(pattern string) (transformed string, err error) { + + if pattern == "" { + return "", nil + } + + parser := _RegExp_parser{ + str: pattern, + length: len(pattern), + } + err = parser.parse() + if err != nil { + return "", err + } + + return parser.ResultString(), nil +} + +func (self *_RegExp_parser) ResultString() string { + if self.passOffset != -1 { + return self.str[:self.passOffset] + } + return self.goRegexp.String() +} + +func (self *_RegExp_parser) parse() (err error) { + self.read() // Pull in the first character + self.scan() + return self.err +} + +func (self *_RegExp_parser) read() { + if self.offset < self.length { + self.chrOffset = self.offset + chr, width := rune(self.str[self.offset]), 1 + if chr >= utf8.RuneSelf { // !ASCII + chr, width = utf8.DecodeRuneInString(self.str[self.offset:]) + if chr == utf8.RuneError && width == 1 { + self.error(true, "Invalid UTF-8 character") + return + } + } + self.offset += width + self.chr = chr + } else { + self.chrOffset = self.length + self.chr = -1 // EOF + } +} + +func (self *_RegExp_parser) stopPassing() { + self.goRegexp.Grow(3 * len(self.str) / 2) + self.goRegexp.WriteString(self.str[:self.passOffset]) + self.passOffset = -1 +} + +func (self *_RegExp_parser) write(p []byte) { + if self.passOffset != -1 { + self.stopPassing() + } + self.goRegexp.Write(p) +} + +func (self *_RegExp_parser) writeByte(b byte) { + if self.passOffset != -1 { + self.stopPassing() + } + self.goRegexp.WriteByte(b) +} + +func (self *_RegExp_parser) writeString(s string) { + if self.passOffset != -1 { + self.stopPassing() + } + self.goRegexp.WriteString(s) +} + +func (self *_RegExp_parser) scan() { + for self.chr != -1 { + switch self.chr { + case '\\': + self.read() + self.scanEscape(false) + case '(': + self.pass() + self.scanGroup() + case '[': + self.scanBracket() + case ')': + self.error(true, "Unmatched ')'") + return + case '.': + self.writeString("[^\\r\\n]") + self.read() + default: + self.pass() + } + } +} + +// (...) +func (self *_RegExp_parser) scanGroup() { + str := self.str[self.chrOffset:] + if len(str) > 1 { // A possibility of (?= or (?! + if str[0] == '?' { + ch := str[1] + switch { + case ch == '=' || ch == '!': + self.error(false, "re2: Invalid (%s) ", self.str[self.chrOffset:self.chrOffset+2]) + return + case ch == '<': + self.error(false, "re2: Invalid (%s) ", self.str[self.chrOffset:self.chrOffset+2]) + return + case ch != ':': + self.error(true, "Invalid group") + return + } + } + } + for self.chr != -1 && self.chr != ')' { + switch self.chr { + case '\\': + self.read() + self.scanEscape(false) + case '(': + self.pass() + self.scanGroup() + case '[': + self.scanBracket() + case '.': + self.writeString("[^\\r\\n]") + self.read() + default: + self.pass() + continue + } + } + if self.chr != ')' { + self.error(true, "Unterminated group") + return + } + self.pass() +} + +// [...] +func (self *_RegExp_parser) scanBracket() { + str := self.str[self.chrOffset:] + if strings.HasPrefix(str, "[]") { + // [] -- Empty character class + self.writeString("[^\u0000-\U0001FFFF]") + self.offset += 1 + self.read() + return + } + + if strings.HasPrefix(str, "[^]") { + self.writeString("[\u0000-\U0001FFFF]") + self.offset += 2 + self.read() + return + } + + self.pass() + for self.chr != -1 { + if self.chr == ']' { + break + } else if self.chr == '\\' { + self.read() + self.scanEscape(true) + continue + } + self.pass() + } + if self.chr != ']' { + self.error(true, "Unterminated character class") + return + } + self.pass() +} + +// \... +func (self *_RegExp_parser) scanEscape(inClass bool) { + offset := self.chrOffset + + var length, base uint32 + switch self.chr { + + case '0', '1', '2', '3', '4', '5', '6', '7': + var value int64 + size := 0 + for { + digit := int64(digitValue(self.chr)) + if digit >= 8 { + // Not a valid digit + break + } + value = value*8 + digit + self.read() + size += 1 + } + if size == 1 { // The number of characters read + if value != 0 { + // An invalid backreference + self.error(false, "re2: Invalid \\%d ", value) + return + } + self.passString(offset-1, self.chrOffset) + return + } + tmp := []byte{'\\', 'x', '0', 0} + if value >= 16 { + tmp = tmp[0:2] + } else { + tmp = tmp[0:3] + } + tmp = strconv.AppendInt(tmp, value, 16) + self.write(tmp) + return + + case '8', '9': + self.read() + self.error(false, "re2: Invalid \\%s ", self.str[offset:self.chrOffset]) + return + + case 'x': + self.read() + length, base = 2, 16 + + case 'u': + self.read() + length, base = 4, 16 + + case 'b': + if inClass { + self.write([]byte{'\\', 'x', '0', '8'}) + self.read() + return + } + fallthrough + + case 'B': + fallthrough + + case 'd', 'D', 'w', 'W': + // This is slightly broken, because ECMAScript + // includes \v in \s, \S, while re2 does not + fallthrough + + case '\\': + fallthrough + + case 'f', 'n', 'r', 't', 'v': + self.passString(offset-1, self.offset) + self.read() + return + + case 'c': + self.read() + var value int64 + if 'a' <= self.chr && self.chr <= 'z' { + value = int64(self.chr - 'a' + 1) + } else if 'A' <= self.chr && self.chr <= 'Z' { + value = int64(self.chr - 'A' + 1) + } else { + self.writeByte('c') + return + } + tmp := []byte{'\\', 'x', '0', 0} + if value >= 16 { + tmp = tmp[0:2] + } else { + tmp = tmp[0:3] + } + tmp = strconv.AppendInt(tmp, value, 16) + self.write(tmp) + self.read() + return + case 's': + if inClass { + self.writeString(WhitespaceChars) + } else { + self.writeString("[" + WhitespaceChars + "]") + } + self.read() + return + case 'S': + if inClass { + self.error(false, "S in class") + return + } else { + self.writeString("[^" + WhitespaceChars + "]") + } + self.read() + return + default: + // $ is an identifier character, so we have to have + // a special case for it here + if self.chr == '$' || self.chr < utf8.RuneSelf && !isIdentifierPart(self.chr) { + // A non-identifier character needs escaping + self.passString(offset-1, self.offset) + self.read() + return + } + // Unescape the character for re2 + self.pass() + return + } + + // Otherwise, we're a \u.... or \x... + valueOffset := self.chrOffset + + var value uint32 + { + length := length + for ; length > 0; length-- { + digit := uint32(digitValue(self.chr)) + if digit >= base { + // Not a valid digit + goto skip + } + value = value*base + digit + self.read() + } + } + + if length == 4 { + self.write([]byte{ + '\\', + 'x', + '{', + self.str[valueOffset+0], + self.str[valueOffset+1], + self.str[valueOffset+2], + self.str[valueOffset+3], + '}', + }) + } else if length == 2 { + self.passString(offset-1, valueOffset+2) + } else { + // Should never, ever get here... + self.error(true, "re2: Illegal branch in scanEscape") + return + } + + return + +skip: + self.passString(offset, self.chrOffset) +} + +func (self *_RegExp_parser) pass() { + if self.passOffset == self.chrOffset { + self.passOffset = self.offset + } else { + if self.passOffset != -1 { + self.stopPassing() + } + if self.chr != -1 { + self.goRegexp.WriteRune(self.chr) + } + } + self.read() +} + +func (self *_RegExp_parser) passString(start, end int) { + if self.passOffset == start { + self.passOffset = end + return + } + if self.passOffset != -1 { + self.stopPassing() + } + self.goRegexp.WriteString(self.str[start:end]) +} + +func (self *_RegExp_parser) error(fatal bool, msg string, msgValues ...interface{}) { + if self.err != nil { + return + } + e := regexpParseError{ + offset: self.offset, + err: fmt.Sprintf(msg, msgValues...), + } + if fatal { + self.err = RegexpSyntaxError{e} + } else { + self.err = RegexpErrorIncompatible{e} + } + self.offset = self.length + self.chr = -1 +} diff --git a/vendor/github.com/dop251/goja/parser/scope.go b/vendor/github.com/dop251/goja/parser/scope.go new file mode 100644 index 0000000000..321dd90ede --- /dev/null +++ b/vendor/github.com/dop251/goja/parser/scope.go @@ -0,0 +1,46 @@ +package parser + +import ( + "github.com/dop251/goja/ast" + "github.com/dop251/goja/unistring" +) + +type _scope struct { + outer *_scope + allowIn bool + allowLet bool + inIteration bool + inSwitch bool + inFunction bool + declarationList []*ast.VariableDeclaration + + labels []unistring.String +} + +func (self *_parser) openScope() { + self.scope = &_scope{ + outer: self.scope, + allowIn: true, + } +} + +func (self *_parser) closeScope() { + self.scope = self.scope.outer +} + +func (self *_scope) declare(declaration *ast.VariableDeclaration) { + self.declarationList = append(self.declarationList, declaration) +} + +func (self *_scope) hasLabel(name unistring.String) bool { + for _, label := range self.labels { + if label == name { + return true + } + } + if self.outer != nil && !self.inFunction { + // Crossing a function boundary to look for a label is verboten + return self.outer.hasLabel(name) + } + return false +} diff --git a/vendor/github.com/dop251/goja/parser/statement.go b/vendor/github.com/dop251/goja/parser/statement.go new file mode 100644 index 0000000000..cc3cb0ce2e --- /dev/null +++ b/vendor/github.com/dop251/goja/parser/statement.go @@ -0,0 +1,847 @@ +package parser + +import ( + "encoding/base64" + "fmt" + "github.com/dop251/goja/ast" + "github.com/dop251/goja/file" + "github.com/dop251/goja/token" + "github.com/go-sourcemap/sourcemap" + "io/ioutil" + "net/url" + "path" + "strings" +) + +func (self *_parser) parseBlockStatement() *ast.BlockStatement { + node := &ast.BlockStatement{} + node.LeftBrace = self.expect(token.LEFT_BRACE) + node.List = self.parseStatementList() + node.RightBrace = self.expect(token.RIGHT_BRACE) + + return node +} + +func (self *_parser) parseEmptyStatement() ast.Statement { + idx := self.expect(token.SEMICOLON) + return &ast.EmptyStatement{Semicolon: idx} +} + +func (self *_parser) parseStatementList() (list []ast.Statement) { + for self.token != token.RIGHT_BRACE && self.token != token.EOF { + self.scope.allowLet = true + list = append(list, self.parseStatement()) + } + + return +} + +func (self *_parser) parseStatement() ast.Statement { + + if self.token == token.EOF { + self.errorUnexpectedToken(self.token) + return &ast.BadStatement{From: self.idx, To: self.idx + 1} + } + + switch self.token { + case token.SEMICOLON: + return self.parseEmptyStatement() + case token.LEFT_BRACE: + return self.parseBlockStatement() + case token.IF: + return self.parseIfStatement() + case token.DO: + return self.parseDoWhileStatement() + case token.WHILE: + return self.parseWhileStatement() + case token.FOR: + return self.parseForOrForInStatement() + case token.BREAK: + return self.parseBreakStatement() + case token.CONTINUE: + return self.parseContinueStatement() + case token.DEBUGGER: + return self.parseDebuggerStatement() + case token.WITH: + return self.parseWithStatement() + case token.VAR: + return self.parseVariableStatement() + case token.LET: + tok := self.peek() + if tok == token.LEFT_BRACKET || self.scope.allowLet && (tok == token.IDENTIFIER || tok == token.LET || tok == token.LEFT_BRACE) { + return self.parseLexicalDeclaration(self.token) + } + self.insertSemicolon = true + case token.CONST: + return self.parseLexicalDeclaration(self.token) + case token.FUNCTION: + return &ast.FunctionDeclaration{ + Function: self.parseFunction(true), + } + case token.SWITCH: + return self.parseSwitchStatement() + case token.RETURN: + return self.parseReturnStatement() + case token.THROW: + return self.parseThrowStatement() + case token.TRY: + return self.parseTryStatement() + } + + expression := self.parseExpression() + + if identifier, isIdentifier := expression.(*ast.Identifier); isIdentifier && self.token == token.COLON { + // LabelledStatement + colon := self.idx + self.next() // : + label := identifier.Name + for _, value := range self.scope.labels { + if label == value { + self.error(identifier.Idx0(), "Label '%s' already exists", label) + } + } + self.scope.labels = append(self.scope.labels, label) // Push the label + self.scope.allowLet = false + statement := self.parseStatement() + self.scope.labels = self.scope.labels[:len(self.scope.labels)-1] // Pop the label + return &ast.LabelledStatement{ + Label: identifier, + Colon: colon, + Statement: statement, + } + } + + self.optionalSemicolon() + + return &ast.ExpressionStatement{ + Expression: expression, + } +} + +func (self *_parser) parseTryStatement() ast.Statement { + + node := &ast.TryStatement{ + Try: self.expect(token.TRY), + Body: self.parseBlockStatement(), + } + + if self.token == token.CATCH { + catch := self.idx + self.next() + var parameter ast.BindingTarget + if self.token == token.LEFT_PARENTHESIS { + self.next() + parameter = self.parseBindingTarget() + self.expect(token.RIGHT_PARENTHESIS) + } + node.Catch = &ast.CatchStatement{ + Catch: catch, + Parameter: parameter, + Body: self.parseBlockStatement(), + } + } + + if self.token == token.FINALLY { + self.next() + node.Finally = self.parseBlockStatement() + } + + if node.Catch == nil && node.Finally == nil { + self.error(node.Try, "Missing catch or finally after try") + return &ast.BadStatement{From: node.Try, To: node.Body.Idx1()} + } + + return node +} + +func (self *_parser) parseFunctionParameterList() *ast.ParameterList { + opening := self.expect(token.LEFT_PARENTHESIS) + var list []*ast.Binding + var rest ast.Expression + for self.token != token.RIGHT_PARENTHESIS && self.token != token.EOF { + if self.token == token.ELLIPSIS { + self.next() + rest = self.reinterpretAsDestructBindingTarget(self.parseAssignmentExpression()) + break + } + self.parseVariableDeclaration(&list) + if self.token != token.RIGHT_PARENTHESIS { + self.expect(token.COMMA) + } + } + closing := self.expect(token.RIGHT_PARENTHESIS) + + return &ast.ParameterList{ + Opening: opening, + List: list, + Rest: rest, + Closing: closing, + } +} + +func (self *_parser) parseFunction(declaration bool) *ast.FunctionLiteral { + + node := &ast.FunctionLiteral{ + Function: self.expect(token.FUNCTION), + } + + var name *ast.Identifier + if self.token == token.IDENTIFIER { + name = self.parseIdentifier() + } else if declaration { + // Use expect error handling + self.expect(token.IDENTIFIER) + } + node.Name = name + node.ParameterList = self.parseFunctionParameterList() + self.parseFunctionBlock(node) + node.Source = self.slice(node.Idx0(), node.Idx1()) + + return node +} + +func (self *_parser) parseFunctionBlock(node *ast.FunctionLiteral) { + { + self.openScope() + inFunction := self.scope.inFunction + self.scope.inFunction = true + defer func() { + self.scope.inFunction = inFunction + self.closeScope() + }() + node.Body = self.parseBlockStatement() + node.DeclarationList = self.scope.declarationList + } +} + +func (self *_parser) parseDebuggerStatement() ast.Statement { + idx := self.expect(token.DEBUGGER) + + node := &ast.DebuggerStatement{ + Debugger: idx, + } + + self.semicolon() + + return node +} + +func (self *_parser) parseReturnStatement() ast.Statement { + idx := self.expect(token.RETURN) + + if !self.scope.inFunction { + self.error(idx, "Illegal return statement") + self.nextStatement() + return &ast.BadStatement{From: idx, To: self.idx} + } + + node := &ast.ReturnStatement{ + Return: idx, + } + + if !self.implicitSemicolon && self.token != token.SEMICOLON && self.token != token.RIGHT_BRACE && self.token != token.EOF { + node.Argument = self.parseExpression() + } + + self.semicolon() + + return node +} + +func (self *_parser) parseThrowStatement() ast.Statement { + idx := self.expect(token.THROW) + + if self.implicitSemicolon { + if self.chr == -1 { // Hackish + self.error(idx, "Unexpected end of input") + } else { + self.error(idx, "Illegal newline after throw") + } + self.nextStatement() + return &ast.BadStatement{From: idx, To: self.idx} + } + + node := &ast.ThrowStatement{ + Argument: self.parseExpression(), + } + + self.semicolon() + + return node +} + +func (self *_parser) parseSwitchStatement() ast.Statement { + self.expect(token.SWITCH) + self.expect(token.LEFT_PARENTHESIS) + node := &ast.SwitchStatement{ + Discriminant: self.parseExpression(), + Default: -1, + } + self.expect(token.RIGHT_PARENTHESIS) + + self.expect(token.LEFT_BRACE) + + inSwitch := self.scope.inSwitch + self.scope.inSwitch = true + defer func() { + self.scope.inSwitch = inSwitch + }() + + for index := 0; self.token != token.EOF; index++ { + if self.token == token.RIGHT_BRACE { + self.next() + break + } + + clause := self.parseCaseStatement() + if clause.Test == nil { + if node.Default != -1 { + self.error(clause.Case, "Already saw a default in switch") + } + node.Default = index + } + node.Body = append(node.Body, clause) + } + + return node +} + +func (self *_parser) parseWithStatement() ast.Statement { + self.expect(token.WITH) + self.expect(token.LEFT_PARENTHESIS) + node := &ast.WithStatement{ + Object: self.parseExpression(), + } + self.expect(token.RIGHT_PARENTHESIS) + self.scope.allowLet = false + node.Body = self.parseStatement() + + return node +} + +func (self *_parser) parseCaseStatement() *ast.CaseStatement { + + node := &ast.CaseStatement{ + Case: self.idx, + } + if self.token == token.DEFAULT { + self.next() + } else { + self.expect(token.CASE) + node.Test = self.parseExpression() + } + self.expect(token.COLON) + + for { + if self.token == token.EOF || + self.token == token.RIGHT_BRACE || + self.token == token.CASE || + self.token == token.DEFAULT { + break + } + node.Consequent = append(node.Consequent, self.parseStatement()) + + } + + return node +} + +func (self *_parser) parseIterationStatement() ast.Statement { + inIteration := self.scope.inIteration + self.scope.inIteration = true + defer func() { + self.scope.inIteration = inIteration + }() + self.scope.allowLet = false + return self.parseStatement() +} + +func (self *_parser) parseForIn(idx file.Idx, into ast.ForInto) *ast.ForInStatement { + + // Already have consumed " in" + + source := self.parseExpression() + self.expect(token.RIGHT_PARENTHESIS) + + return &ast.ForInStatement{ + For: idx, + Into: into, + Source: source, + Body: self.parseIterationStatement(), + } +} + +func (self *_parser) parseForOf(idx file.Idx, into ast.ForInto) *ast.ForOfStatement { + + // Already have consumed " of" + + source := self.parseAssignmentExpression() + self.expect(token.RIGHT_PARENTHESIS) + + return &ast.ForOfStatement{ + For: idx, + Into: into, + Source: source, + Body: self.parseIterationStatement(), + } +} + +func (self *_parser) parseFor(idx file.Idx, initializer ast.ForLoopInitializer) *ast.ForStatement { + + // Already have consumed " ;" + + var test, update ast.Expression + + if self.token != token.SEMICOLON { + test = self.parseExpression() + } + self.expect(token.SEMICOLON) + + if self.token != token.RIGHT_PARENTHESIS { + update = self.parseExpression() + } + self.expect(token.RIGHT_PARENTHESIS) + + return &ast.ForStatement{ + For: idx, + Initializer: initializer, + Test: test, + Update: update, + Body: self.parseIterationStatement(), + } +} + +func (self *_parser) parseForOrForInStatement() ast.Statement { + idx := self.expect(token.FOR) + self.expect(token.LEFT_PARENTHESIS) + + var initializer ast.ForLoopInitializer + + forIn := false + forOf := false + var into ast.ForInto + if self.token != token.SEMICOLON { + + allowIn := self.scope.allowIn + self.scope.allowIn = false + tok := self.token + if tok == token.LET { + switch self.peek() { + case token.IDENTIFIER, token.LEFT_BRACKET, token.LEFT_BRACE: + default: + tok = token.IDENTIFIER + } + } + if tok == token.VAR || tok == token.LET || tok == token.CONST { + idx := self.idx + self.next() + var list []*ast.Binding + if tok == token.VAR { + list = self.parseVarDeclarationList(idx) + } else { + list = self.parseVariableDeclarationList() + } + if len(list) == 1 { + if self.token == token.IN { + self.next() // in + forIn = true + } else if self.token == token.IDENTIFIER && self.literal == "of" { + self.next() + forOf = true + } + } + if forIn || forOf { + if tok == token.VAR { + into = &ast.ForIntoVar{ + Binding: list[0], + } + } else { + if list[0].Initializer != nil { + self.error(list[0].Initializer.Idx0(), "for-in loop variable declaration may not have an initializer") + } + into = &ast.ForDeclaration{ + Idx: idx, + IsConst: tok == token.CONST, + Target: list[0].Target, + } + } + } else { + self.ensurePatternInit(list) + if tok == token.VAR { + initializer = &ast.ForLoopInitializerVarDeclList{ + List: list, + } + } else { + initializer = &ast.ForLoopInitializerLexicalDecl{ + LexicalDeclaration: ast.LexicalDeclaration{ + Idx: idx, + Token: tok, + List: list, + }, + } + } + } + } else { + expr := self.parseExpression() + if self.token == token.IN { + self.next() + forIn = true + } else if self.token == token.IDENTIFIER && self.literal == "of" { + self.next() + forOf = true + } + if forIn || forOf { + switch e := expr.(type) { + case *ast.Identifier, *ast.DotExpression, *ast.BracketExpression, *ast.Binding: + // These are all acceptable + case *ast.ObjectLiteral: + expr = self.reinterpretAsObjectAssignmentPattern(e) + case *ast.ArrayLiteral: + expr = self.reinterpretAsArrayAssignmentPattern(e) + default: + self.error(idx, "Invalid left-hand side in for-in or for-of") + self.nextStatement() + return &ast.BadStatement{From: idx, To: self.idx} + } + into = &ast.ForIntoExpression{ + Expression: expr, + } + } else { + initializer = &ast.ForLoopInitializerExpression{ + Expression: expr, + } + } + } + self.scope.allowIn = allowIn + } + + if forIn { + return self.parseForIn(idx, into) + } + if forOf { + return self.parseForOf(idx, into) + } + + self.expect(token.SEMICOLON) + return self.parseFor(idx, initializer) +} + +func (self *_parser) ensurePatternInit(list []*ast.Binding) { + for _, item := range list { + if _, ok := item.Target.(ast.Pattern); ok { + if item.Initializer == nil { + self.error(item.Idx1(), "Missing initializer in destructuring declaration") + break + } + } + } +} + +func (self *_parser) parseVariableStatement() *ast.VariableStatement { + + idx := self.expect(token.VAR) + + list := self.parseVarDeclarationList(idx) + self.ensurePatternInit(list) + self.semicolon() + + return &ast.VariableStatement{ + Var: idx, + List: list, + } +} + +func (self *_parser) parseLexicalDeclaration(tok token.Token) *ast.LexicalDeclaration { + idx := self.expect(tok) + if !self.scope.allowLet { + self.error(idx, "Lexical declaration cannot appear in a single-statement context") + } + + list := self.parseVariableDeclarationList() + self.ensurePatternInit(list) + self.semicolon() + + return &ast.LexicalDeclaration{ + Idx: idx, + Token: tok, + List: list, + } +} + +func (self *_parser) parseDoWhileStatement() ast.Statement { + inIteration := self.scope.inIteration + self.scope.inIteration = true + defer func() { + self.scope.inIteration = inIteration + }() + + self.expect(token.DO) + node := &ast.DoWhileStatement{} + if self.token == token.LEFT_BRACE { + node.Body = self.parseBlockStatement() + } else { + self.scope.allowLet = false + node.Body = self.parseStatement() + } + + self.expect(token.WHILE) + self.expect(token.LEFT_PARENTHESIS) + node.Test = self.parseExpression() + self.expect(token.RIGHT_PARENTHESIS) + if self.token == token.SEMICOLON { + self.next() + } + + return node +} + +func (self *_parser) parseWhileStatement() ast.Statement { + self.expect(token.WHILE) + self.expect(token.LEFT_PARENTHESIS) + node := &ast.WhileStatement{ + Test: self.parseExpression(), + } + self.expect(token.RIGHT_PARENTHESIS) + node.Body = self.parseIterationStatement() + + return node +} + +func (self *_parser) parseIfStatement() ast.Statement { + self.expect(token.IF) + self.expect(token.LEFT_PARENTHESIS) + node := &ast.IfStatement{ + Test: self.parseExpression(), + } + self.expect(token.RIGHT_PARENTHESIS) + + if self.token == token.LEFT_BRACE { + node.Consequent = self.parseBlockStatement() + } else { + self.scope.allowLet = false + node.Consequent = self.parseStatement() + } + + if self.token == token.ELSE { + self.next() + self.scope.allowLet = false + node.Alternate = self.parseStatement() + } + + return node +} + +func (self *_parser) parseSourceElements() (body []ast.Statement) { + for self.token != token.EOF { + self.scope.allowLet = true + body = append(body, self.parseStatement()) + } + + return body +} + +func (self *_parser) parseProgram() *ast.Program { + self.openScope() + defer self.closeScope() + prg := &ast.Program{ + Body: self.parseSourceElements(), + DeclarationList: self.scope.declarationList, + File: self.file, + } + self.file.SetSourceMap(self.parseSourceMap()) + return prg +} + +func extractSourceMapLine(str string) string { + for { + p := strings.LastIndexByte(str, '\n') + line := str[p+1:] + if line != "" && line != "})" { + if strings.HasPrefix(line, "//# sourceMappingURL=") { + return line + } + break + } + if p >= 0 { + str = str[:p] + } else { + break + } + } + return "" +} + +func (self *_parser) parseSourceMap() *sourcemap.Consumer { + if self.opts.disableSourceMaps { + return nil + } + if smLine := extractSourceMapLine(self.str); smLine != "" { + urlIndex := strings.Index(smLine, "=") + urlStr := smLine[urlIndex+1:] + + var data []byte + var err error + if strings.HasPrefix(urlStr, "data:application/json") { + b64Index := strings.Index(urlStr, ",") + b64 := urlStr[b64Index+1:] + data, err = base64.StdEncoding.DecodeString(b64) + } else { + var smUrl *url.URL + if smUrl, err = url.Parse(urlStr); err == nil { + p := smUrl.Path + if !path.IsAbs(p) { + baseName := self.file.Name() + baseUrl, err1 := url.Parse(baseName) + if err1 == nil && baseUrl.Scheme != "" { + baseUrl.Path = path.Join(path.Dir(baseUrl.Path), p) + p = baseUrl.String() + } else { + p = path.Join(path.Dir(baseName), p) + } + } + if self.opts.sourceMapLoader != nil { + data, err = self.opts.sourceMapLoader(p) + } else { + if smUrl.Scheme == "" || smUrl.Scheme == "file" { + data, err = ioutil.ReadFile(p) + } else { + err = fmt.Errorf("unsupported source map URL scheme: %s", smUrl.Scheme) + } + } + } + } + + if err != nil { + self.error(file.Idx(0), "Could not load source map: %v", err) + return nil + } + if data == nil { + return nil + } + + if sm, err := sourcemap.Parse(self.file.Name(), data); err == nil { + return sm + } else { + self.error(file.Idx(0), "Could not parse source map: %v", err) + } + } + return nil +} + +func (self *_parser) parseBreakStatement() ast.Statement { + idx := self.expect(token.BREAK) + semicolon := self.implicitSemicolon + if self.token == token.SEMICOLON { + semicolon = true + self.next() + } + + if semicolon || self.token == token.RIGHT_BRACE { + self.implicitSemicolon = false + if !self.scope.inIteration && !self.scope.inSwitch { + goto illegal + } + return &ast.BranchStatement{ + Idx: idx, + Token: token.BREAK, + } + } + + if self.token == token.IDENTIFIER { + identifier := self.parseIdentifier() + if !self.scope.hasLabel(identifier.Name) { + self.error(idx, "Undefined label '%s'", identifier.Name) + return &ast.BadStatement{From: idx, To: identifier.Idx1()} + } + self.semicolon() + return &ast.BranchStatement{ + Idx: idx, + Token: token.BREAK, + Label: identifier, + } + } + + self.expect(token.IDENTIFIER) + +illegal: + self.error(idx, "Illegal break statement") + self.nextStatement() + return &ast.BadStatement{From: idx, To: self.idx} +} + +func (self *_parser) parseContinueStatement() ast.Statement { + idx := self.expect(token.CONTINUE) + semicolon := self.implicitSemicolon + if self.token == token.SEMICOLON { + semicolon = true + self.next() + } + + if semicolon || self.token == token.RIGHT_BRACE { + self.implicitSemicolon = false + if !self.scope.inIteration { + goto illegal + } + return &ast.BranchStatement{ + Idx: idx, + Token: token.CONTINUE, + } + } + + if self.token == token.IDENTIFIER { + identifier := self.parseIdentifier() + if !self.scope.hasLabel(identifier.Name) { + self.error(idx, "Undefined label '%s'", identifier.Name) + return &ast.BadStatement{From: idx, To: identifier.Idx1()} + } + if !self.scope.inIteration { + goto illegal + } + self.semicolon() + return &ast.BranchStatement{ + Idx: idx, + Token: token.CONTINUE, + Label: identifier, + } + } + + self.expect(token.IDENTIFIER) + +illegal: + self.error(idx, "Illegal continue statement") + self.nextStatement() + return &ast.BadStatement{From: idx, To: self.idx} +} + +// Find the next statement after an error (recover) +func (self *_parser) nextStatement() { + for { + switch self.token { + case token.BREAK, token.CONTINUE, + token.FOR, token.IF, token.RETURN, token.SWITCH, + token.VAR, token.DO, token.TRY, token.WITH, + token.WHILE, token.THROW, token.CATCH, token.FINALLY: + // Return only if parser made some progress since last + // sync or if it has not reached 10 next calls without + // progress. Otherwise consume at least one token to + // avoid an endless parser loop + if self.idx == self.recover.idx && self.recover.count < 10 { + self.recover.count++ + return + } + if self.idx > self.recover.idx { + self.recover.idx = self.idx + self.recover.count = 0 + return + } + // Reaching here indicates a parser bug, likely an + // incorrect token list in this function, but it only + // leads to skipping of possibly correct code if a + // previous error is present, and thus is preferred + // over a non-terminating parse. + case token.EOF: + return + } + self.next() + } +} diff --git a/vendor/github.com/dop251/goja/proxy.go b/vendor/github.com/dop251/goja/proxy.go new file mode 100644 index 0000000000..de4901315e --- /dev/null +++ b/vendor/github.com/dop251/goja/proxy.go @@ -0,0 +1,1046 @@ +package goja + +import ( + "fmt" + "reflect" + + "github.com/dop251/goja/unistring" +) + +// Proxy is a Go wrapper around ECMAScript Proxy. Calling Runtime.ToValue() on it +// returns the underlying Proxy. Calling Export() on an ECMAScript Proxy returns a wrapper. +// Use Runtime.NewProxy() to create one. +type Proxy struct { + proxy *proxyObject +} + +var ( + proxyType = reflect.TypeOf(Proxy{}) +) + +type proxyPropIter struct { + p *proxyObject + names []Value + idx int +} + +func (i *proxyPropIter) next() (propIterItem, iterNextFunc) { + for i.idx < len(i.names) { + name := i.names[i.idx] + i.idx++ + if prop := i.p.val.getOwnProp(name); prop != nil { + return propIterItem{name: name.string(), value: prop}, i.next + } + } + return propIterItem{}, nil +} + +func (r *Runtime) newProxyObject(target, handler, proto *Object) *proxyObject { + if p, ok := target.self.(*proxyObject); ok { + if p.handler == nil { + panic(r.NewTypeError("Cannot create proxy with a revoked proxy as target")) + } + } + if p, ok := handler.self.(*proxyObject); ok { + if p.handler == nil { + panic(r.NewTypeError("Cannot create proxy with a revoked proxy as handler")) + } + } + return r._newProxyObject(target, &jsProxyHandler{handler: handler}, proto) +} + +func (r *Runtime) _newProxyObject(target *Object, handler proxyHandler, proto *Object) *proxyObject { + v := &Object{runtime: r} + p := &proxyObject{} + v.self = p + p.val = v + p.class = classObject + if proto == nil { + p.prototype = r.global.ObjectPrototype + } else { + p.prototype = proto + } + p.extensible = false + p.init() + p.target = target + p.handler = handler + if call, ok := target.self.assertCallable(); ok { + p.call = call + } + if ctor := target.self.assertConstructor(); ctor != nil { + p.ctor = ctor + } + return p +} + +func (p Proxy) Revoke() { + p.proxy.revoke() +} + +func (p Proxy) Handler() *Object { + if handler := p.proxy.handler; handler != nil { + return handler.toObject(p.proxy.val.runtime) + } + return nil +} + +func (p Proxy) Target() *Object { + return p.proxy.target +} + +func (p Proxy) toValue(r *Runtime) Value { + if p.proxy == nil { + return _null + } + proxy := p.proxy.val + if proxy.runtime != r { + panic(r.NewTypeError("Illegal runtime transition of a Proxy")) + } + return proxy +} + +type proxyTrap string + +const ( + proxy_trap_getPrototypeOf = "getPrototypeOf" + proxy_trap_setPrototypeOf = "setPrototypeOf" + proxy_trap_isExtensible = "isExtensible" + proxy_trap_preventExtensions = "preventExtensions" + proxy_trap_getOwnPropertyDescriptor = "getOwnPropertyDescriptor" + proxy_trap_defineProperty = "defineProperty" + proxy_trap_has = "has" + proxy_trap_get = "get" + proxy_trap_set = "set" + proxy_trap_deleteProperty = "deleteProperty" + proxy_trap_ownKeys = "ownKeys" + proxy_trap_apply = "apply" + proxy_trap_construct = "construct" +) + +func (p proxyTrap) String() (name string) { + return string(p) +} + +type proxyHandler interface { + getPrototypeOf(target *Object) (Value, bool) + setPrototypeOf(target *Object, proto *Object) (bool, bool) + isExtensible(target *Object) (bool, bool) + preventExtensions(target *Object) (bool, bool) + + getOwnPropertyDescriptorStr(target *Object, prop unistring.String) (Value, bool) + getOwnPropertyDescriptorIdx(target *Object, prop valueInt) (Value, bool) + getOwnPropertyDescriptorSym(target *Object, prop *Symbol) (Value, bool) + + definePropertyStr(target *Object, prop unistring.String, desc PropertyDescriptor) (bool, bool) + definePropertyIdx(target *Object, prop valueInt, desc PropertyDescriptor) (bool, bool) + definePropertySym(target *Object, prop *Symbol, desc PropertyDescriptor) (bool, bool) + + hasStr(target *Object, prop unistring.String) (bool, bool) + hasIdx(target *Object, prop valueInt) (bool, bool) + hasSym(target *Object, prop *Symbol) (bool, bool) + + getStr(target *Object, prop unistring.String, receiver Value) (Value, bool) + getIdx(target *Object, prop valueInt, receiver Value) (Value, bool) + getSym(target *Object, prop *Symbol, receiver Value) (Value, bool) + + setStr(target *Object, prop unistring.String, value Value, receiver Value) (bool, bool) + setIdx(target *Object, prop valueInt, value Value, receiver Value) (bool, bool) + setSym(target *Object, prop *Symbol, value Value, receiver Value) (bool, bool) + + deleteStr(target *Object, prop unistring.String) (bool, bool) + deleteIdx(target *Object, prop valueInt) (bool, bool) + deleteSym(target *Object, prop *Symbol) (bool, bool) + + ownKeys(target *Object) (*Object, bool) + apply(target *Object, this Value, args []Value) (Value, bool) + construct(target *Object, args []Value, newTarget *Object) (Value, bool) + + toObject(*Runtime) *Object +} + +type jsProxyHandler struct { + handler *Object +} + +func (h *jsProxyHandler) toObject(*Runtime) *Object { + return h.handler +} + +func (h *jsProxyHandler) proxyCall(trap proxyTrap, args ...Value) (Value, bool) { + r := h.handler.runtime + + if m := toMethod(r.getVStr(h.handler, unistring.String(trap.String()))); m != nil { + return m(FunctionCall{ + This: h.handler, + Arguments: args, + }), true + } + + return nil, false +} + +func (h *jsProxyHandler) boolProxyCall(trap proxyTrap, args ...Value) (bool, bool) { + if v, ok := h.proxyCall(trap, args...); ok { + return v.ToBoolean(), true + } + return false, false +} + +func (h *jsProxyHandler) getPrototypeOf(target *Object) (Value, bool) { + return h.proxyCall(proxy_trap_getPrototypeOf, target) +} + +func (h *jsProxyHandler) setPrototypeOf(target *Object, proto *Object) (bool, bool) { + var protoVal Value + if proto != nil { + protoVal = proto + } else { + protoVal = _null + } + return h.boolProxyCall(proxy_trap_setPrototypeOf, target, protoVal) +} + +func (h *jsProxyHandler) isExtensible(target *Object) (bool, bool) { + return h.boolProxyCall(proxy_trap_isExtensible, target) +} + +func (h *jsProxyHandler) preventExtensions(target *Object) (bool, bool) { + return h.boolProxyCall(proxy_trap_preventExtensions, target) +} + +func (h *jsProxyHandler) getOwnPropertyDescriptorStr(target *Object, prop unistring.String) (Value, bool) { + return h.proxyCall(proxy_trap_getOwnPropertyDescriptor, target, stringValueFromRaw(prop)) +} + +func (h *jsProxyHandler) getOwnPropertyDescriptorIdx(target *Object, prop valueInt) (Value, bool) { + return h.proxyCall(proxy_trap_getOwnPropertyDescriptor, target, prop.toString()) +} + +func (h *jsProxyHandler) getOwnPropertyDescriptorSym(target *Object, prop *Symbol) (Value, bool) { + return h.proxyCall(proxy_trap_getOwnPropertyDescriptor, target, prop) +} + +func (h *jsProxyHandler) definePropertyStr(target *Object, prop unistring.String, desc PropertyDescriptor) (bool, bool) { + return h.boolProxyCall(proxy_trap_defineProperty, target, stringValueFromRaw(prop), desc.toValue(h.handler.runtime)) +} + +func (h *jsProxyHandler) definePropertyIdx(target *Object, prop valueInt, desc PropertyDescriptor) (bool, bool) { + return h.boolProxyCall(proxy_trap_defineProperty, target, prop.toString(), desc.toValue(h.handler.runtime)) +} + +func (h *jsProxyHandler) definePropertySym(target *Object, prop *Symbol, desc PropertyDescriptor) (bool, bool) { + return h.boolProxyCall(proxy_trap_defineProperty, target, prop, desc.toValue(h.handler.runtime)) +} + +func (h *jsProxyHandler) hasStr(target *Object, prop unistring.String) (bool, bool) { + return h.boolProxyCall(proxy_trap_has, target, stringValueFromRaw(prop)) +} + +func (h *jsProxyHandler) hasIdx(target *Object, prop valueInt) (bool, bool) { + return h.boolProxyCall(proxy_trap_has, target, prop.toString()) +} + +func (h *jsProxyHandler) hasSym(target *Object, prop *Symbol) (bool, bool) { + return h.boolProxyCall(proxy_trap_has, target, prop) +} + +func (h *jsProxyHandler) getStr(target *Object, prop unistring.String, receiver Value) (Value, bool) { + return h.proxyCall(proxy_trap_get, target, stringValueFromRaw(prop), receiver) +} + +func (h *jsProxyHandler) getIdx(target *Object, prop valueInt, receiver Value) (Value, bool) { + return h.proxyCall(proxy_trap_get, target, prop.toString(), receiver) +} + +func (h *jsProxyHandler) getSym(target *Object, prop *Symbol, receiver Value) (Value, bool) { + return h.proxyCall(proxy_trap_get, target, prop, receiver) +} + +func (h *jsProxyHandler) setStr(target *Object, prop unistring.String, value Value, receiver Value) (bool, bool) { + return h.boolProxyCall(proxy_trap_set, target, stringValueFromRaw(prop), value, receiver) +} + +func (h *jsProxyHandler) setIdx(target *Object, prop valueInt, value Value, receiver Value) (bool, bool) { + return h.boolProxyCall(proxy_trap_set, target, prop.toString(), value, receiver) +} + +func (h *jsProxyHandler) setSym(target *Object, prop *Symbol, value Value, receiver Value) (bool, bool) { + return h.boolProxyCall(proxy_trap_set, target, prop, value, receiver) +} + +func (h *jsProxyHandler) deleteStr(target *Object, prop unistring.String) (bool, bool) { + return h.boolProxyCall(proxy_trap_deleteProperty, target, stringValueFromRaw(prop)) +} + +func (h *jsProxyHandler) deleteIdx(target *Object, prop valueInt) (bool, bool) { + return h.boolProxyCall(proxy_trap_deleteProperty, target, prop.toString()) +} + +func (h *jsProxyHandler) deleteSym(target *Object, prop *Symbol) (bool, bool) { + return h.boolProxyCall(proxy_trap_deleteProperty, target, prop) +} + +func (h *jsProxyHandler) ownKeys(target *Object) (*Object, bool) { + if v, ok := h.proxyCall(proxy_trap_ownKeys, target); ok { + return h.handler.runtime.toObject(v), true + } + return nil, false +} + +func (h *jsProxyHandler) apply(target *Object, this Value, args []Value) (Value, bool) { + return h.proxyCall(proxy_trap_apply, target, this, h.handler.runtime.newArrayValues(args)) +} + +func (h *jsProxyHandler) construct(target *Object, args []Value, newTarget *Object) (Value, bool) { + return h.proxyCall(proxy_trap_construct, target, h.handler.runtime.newArrayValues(args), newTarget) +} + +type proxyObject struct { + baseObject + target *Object + handler proxyHandler + call func(FunctionCall) Value + ctor func(args []Value, newTarget *Object) *Object +} + +func (p *proxyObject) checkHandler() proxyHandler { + r := p.val.runtime + if handler := p.handler; handler != nil { + return handler + } + panic(r.NewTypeError("Proxy already revoked")) +} + +func (p *proxyObject) proto() *Object { + target := p.target + if v, ok := p.checkHandler().getPrototypeOf(target); ok { + var handlerProto *Object + if v != _null { + handlerProto = p.val.runtime.toObject(v) + } + if !target.self.isExtensible() && !p.__sameValue(handlerProto, target.self.proto()) { + panic(p.val.runtime.NewTypeError("'getPrototypeOf' on proxy: proxy target is non-extensible but the trap did not return its actual prototype")) + } + return handlerProto + } + + return target.self.proto() +} + +func (p *proxyObject) setProto(proto *Object, throw bool) bool { + target := p.target + if v, ok := p.checkHandler().setPrototypeOf(target, proto); ok { + if v { + if !target.self.isExtensible() && !p.__sameValue(proto, target.self.proto()) { + panic(p.val.runtime.NewTypeError("'setPrototypeOf' on proxy: trap returned truish for setting a new prototype on the non-extensible proxy target")) + } + return true + } else { + p.val.runtime.typeErrorResult(throw, "'setPrototypeOf' on proxy: trap returned falsish") + return false + } + } + + return target.self.setProto(proto, throw) +} + +func (p *proxyObject) isExtensible() bool { + target := p.target + if booleanTrapResult, ok := p.checkHandler().isExtensible(p.target); ok { + if te := target.self.isExtensible(); booleanTrapResult != te { + panic(p.val.runtime.NewTypeError("'isExtensible' on proxy: trap result does not reflect extensibility of proxy target (which is '%v')", te)) + } + return booleanTrapResult + } + + return target.self.isExtensible() +} + +func (p *proxyObject) preventExtensions(throw bool) bool { + target := p.target + if booleanTrapResult, ok := p.checkHandler().preventExtensions(target); ok { + if !booleanTrapResult { + p.val.runtime.typeErrorResult(throw, "'preventExtensions' on proxy: trap returned falsish") + return false + } + if te := target.self.isExtensible(); booleanTrapResult && te { + panic(p.val.runtime.NewTypeError("'preventExtensions' on proxy: trap returned truish but the proxy target is extensible")) + } + } + + return target.self.preventExtensions(throw) +} + +func propToValueProp(v Value) *valueProperty { + if v == nil { + return nil + } + if v, ok := v.(*valueProperty); ok { + return v + } + return &valueProperty{ + value: v, + writable: true, + configurable: true, + enumerable: true, + } +} + +func (p *proxyObject) proxyDefineOwnPropertyPreCheck(trapResult, throw bool) bool { + if !trapResult { + p.val.runtime.typeErrorResult(throw, "'defineProperty' on proxy: trap returned falsish") + return false + } + return true +} + +func (p *proxyObject) proxyDefineOwnPropertyPostCheck(prop Value, target *Object, descr PropertyDescriptor) { + targetDesc := propToValueProp(prop) + extensibleTarget := target.self.isExtensible() + settingConfigFalse := descr.Configurable == FLAG_FALSE + if targetDesc == nil { + if !extensibleTarget { + panic(p.val.runtime.NewTypeError()) + } + if settingConfigFalse { + panic(p.val.runtime.NewTypeError()) + } + } else { + if !p.__isCompatibleDescriptor(extensibleTarget, &descr, targetDesc) { + panic(p.val.runtime.NewTypeError()) + } + if settingConfigFalse && targetDesc.configurable { + panic(p.val.runtime.NewTypeError()) + } + if targetDesc.value != nil && !targetDesc.configurable && targetDesc.writable { + if descr.Writable == FLAG_FALSE { + panic(p.val.runtime.NewTypeError()) + } + } + } +} + +func (p *proxyObject) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool { + target := p.target + if booleanTrapResult, ok := p.checkHandler().definePropertyStr(target, name, descr); ok { + if !p.proxyDefineOwnPropertyPreCheck(booleanTrapResult, throw) { + return false + } + p.proxyDefineOwnPropertyPostCheck(target.self.getOwnPropStr(name), target, descr) + return true + } + return target.self.defineOwnPropertyStr(name, descr, throw) +} + +func (p *proxyObject) defineOwnPropertyIdx(idx valueInt, descr PropertyDescriptor, throw bool) bool { + target := p.target + if booleanTrapResult, ok := p.checkHandler().definePropertyIdx(target, idx, descr); ok { + if !p.proxyDefineOwnPropertyPreCheck(booleanTrapResult, throw) { + return false + } + p.proxyDefineOwnPropertyPostCheck(target.self.getOwnPropIdx(idx), target, descr) + return true + } + + return target.self.defineOwnPropertyIdx(idx, descr, throw) +} + +func (p *proxyObject) defineOwnPropertySym(s *Symbol, descr PropertyDescriptor, throw bool) bool { + target := p.target + if booleanTrapResult, ok := p.checkHandler().definePropertySym(target, s, descr); ok { + if !p.proxyDefineOwnPropertyPreCheck(booleanTrapResult, throw) { + return false + } + p.proxyDefineOwnPropertyPostCheck(target.self.getOwnPropSym(s), target, descr) + return true + } + + return target.self.defineOwnPropertySym(s, descr, throw) +} + +func (p *proxyObject) proxyHasChecks(targetProp Value, target *Object, name fmt.Stringer) { + targetDesc := propToValueProp(targetProp) + if targetDesc != nil { + if !targetDesc.configurable { + panic(p.val.runtime.NewTypeError("'has' on proxy: trap returned falsish for property '%s' which exists in the proxy target as non-configurable", name.String())) + } + if !target.self.isExtensible() { + panic(p.val.runtime.NewTypeError("'has' on proxy: trap returned falsish for property '%s' but the proxy target is not extensible", name.String())) + } + } +} + +func (p *proxyObject) hasPropertyStr(name unistring.String) bool { + target := p.target + if b, ok := p.checkHandler().hasStr(target, name); ok { + if !b { + p.proxyHasChecks(target.self.getOwnPropStr(name), target, name) + } + return b + } + + return target.self.hasPropertyStr(name) +} + +func (p *proxyObject) hasPropertyIdx(idx valueInt) bool { + target := p.target + if b, ok := p.checkHandler().hasIdx(target, idx); ok { + if !b { + p.proxyHasChecks(target.self.getOwnPropIdx(idx), target, idx) + } + return b + } + + return target.self.hasPropertyIdx(idx) +} + +func (p *proxyObject) hasPropertySym(s *Symbol) bool { + target := p.target + if b, ok := p.checkHandler().hasSym(target, s); ok { + if !b { + p.proxyHasChecks(target.self.getOwnPropSym(s), target, s) + } + return b + } + + return target.self.hasPropertySym(s) +} + +func (p *proxyObject) hasOwnPropertyStr(name unistring.String) bool { + return p.getOwnPropStr(name) != nil +} + +func (p *proxyObject) hasOwnPropertyIdx(idx valueInt) bool { + return p.getOwnPropIdx(idx) != nil +} + +func (p *proxyObject) hasOwnPropertySym(s *Symbol) bool { + return p.getOwnPropSym(s) != nil +} + +func (p *proxyObject) proxyGetOwnPropertyDescriptor(targetProp Value, target *Object, trapResult Value, name fmt.Stringer) Value { + r := p.val.runtime + targetDesc := propToValueProp(targetProp) + var trapResultObj *Object + if trapResult != nil && trapResult != _undefined { + if obj, ok := trapResult.(*Object); ok { + trapResultObj = obj + } else { + panic(r.NewTypeError("'getOwnPropertyDescriptor' on proxy: trap returned neither object nor undefined for property '%s'", name.String())) + } + } + if trapResultObj == nil { + if targetDesc == nil { + return nil + } + if !targetDesc.configurable { + panic(r.NewTypeError()) + } + if !target.self.isExtensible() { + panic(r.NewTypeError()) + } + return nil + } + extensibleTarget := target.self.isExtensible() + resultDesc := r.toPropertyDescriptor(trapResultObj) + resultDesc.complete() + if !p.__isCompatibleDescriptor(extensibleTarget, &resultDesc, targetDesc) { + panic(r.NewTypeError("'getOwnPropertyDescriptor' on proxy: trap returned descriptor for property '%s' that is incompatible with the existing property in the proxy target", name.String())) + } + + if resultDesc.Configurable == FLAG_FALSE { + if targetDesc == nil { + panic(r.NewTypeError("'getOwnPropertyDescriptor' on proxy: trap reported non-configurability for property '%s' which is non-existent in the proxy target", name.String())) + } + + if targetDesc.configurable { + panic(r.NewTypeError("'getOwnPropertyDescriptor' on proxy: trap reported non-configurability for property '%s' which is configurable in the proxy target", name.String())) + } + + if resultDesc.Writable == FLAG_FALSE && targetDesc.writable { + panic(r.NewTypeError("'getOwnPropertyDescriptor' on proxy: trap reported non-configurable and writable for property '%s' which is non-configurable, non-writable in the proxy target", name.String())) + } + } + + if resultDesc.Writable == FLAG_TRUE && resultDesc.Configurable == FLAG_TRUE && + resultDesc.Enumerable == FLAG_TRUE { + return resultDesc.Value + } + return r.toValueProp(trapResultObj) +} + +func (p *proxyObject) getOwnPropStr(name unistring.String) Value { + target := p.target + if v, ok := p.checkHandler().getOwnPropertyDescriptorStr(target, name); ok { + return p.proxyGetOwnPropertyDescriptor(target.self.getOwnPropStr(name), target, v, name) + } + + return target.self.getOwnPropStr(name) +} + +func (p *proxyObject) getOwnPropIdx(idx valueInt) Value { + target := p.target + if v, ok := p.checkHandler().getOwnPropertyDescriptorIdx(target, idx); ok { + return p.proxyGetOwnPropertyDescriptor(target.self.getOwnPropIdx(idx), target, v, idx) + } + + return target.self.getOwnPropIdx(idx) +} + +func (p *proxyObject) getOwnPropSym(s *Symbol) Value { + target := p.target + if v, ok := p.checkHandler().getOwnPropertyDescriptorSym(target, s); ok { + return p.proxyGetOwnPropertyDescriptor(target.self.getOwnPropSym(s), target, v, s) + } + + return target.self.getOwnPropSym(s) +} + +func (p *proxyObject) proxyGetChecks(targetProp, trapResult Value, name fmt.Stringer) { + if targetDesc, ok := targetProp.(*valueProperty); ok { + if !targetDesc.accessor { + if !targetDesc.writable && !targetDesc.configurable && !trapResult.SameAs(targetDesc.value) { + panic(p.val.runtime.NewTypeError("'get' on proxy: property '%s' is a read-only and non-configurable data property on the proxy target but the proxy did not return its actual value (expected '%s' but got '%s')", name.String(), nilSafe(targetDesc.value), ret)) + } + } else { + if !targetDesc.configurable && targetDesc.getterFunc == nil && trapResult != _undefined { + panic(p.val.runtime.NewTypeError("'get' on proxy: property '%s' is a non-configurable accessor property on the proxy target and does not have a getter function, but the trap did not return 'undefined' (got '%s')", name.String(), ret)) + } + } + } +} + +func (p *proxyObject) getStr(name unistring.String, receiver Value) Value { + target := p.target + if receiver == nil { + receiver = p.val + } + if v, ok := p.checkHandler().getStr(target, name, receiver); ok { + p.proxyGetChecks(target.self.getOwnPropStr(name), v, name) + return v + } + return target.self.getStr(name, receiver) +} + +func (p *proxyObject) getIdx(idx valueInt, receiver Value) Value { + target := p.target + if receiver == nil { + receiver = p.val + } + if v, ok := p.checkHandler().getIdx(target, idx, receiver); ok { + p.proxyGetChecks(target.self.getOwnPropIdx(idx), v, idx) + return v + } + return target.self.getIdx(idx, receiver) +} + +func (p *proxyObject) getSym(s *Symbol, receiver Value) Value { + target := p.target + if receiver == nil { + receiver = p.val + } + if v, ok := p.checkHandler().getSym(target, s, receiver); ok { + p.proxyGetChecks(target.self.getOwnPropSym(s), v, s) + return v + } + + return target.self.getSym(s, receiver) +} + +func (p *proxyObject) proxySetPreCheck(trapResult, throw bool, name fmt.Stringer) bool { + if !trapResult { + p.val.runtime.typeErrorResult(throw, "'set' on proxy: trap returned falsish for property '%s'", name.String()) + } + return trapResult +} + +func (p *proxyObject) proxySetPostCheck(targetProp, value Value, name fmt.Stringer) { + if prop, ok := targetProp.(*valueProperty); ok { + if prop.accessor { + if !prop.configurable && prop.setterFunc == nil { + panic(p.val.runtime.NewTypeError("'set' on proxy: trap returned truish for property '%s' which exists in the proxy target as a non-configurable and non-writable accessor property without a setter", name.String())) + } + } else if !prop.configurable && !prop.writable && !p.__sameValue(prop.value, value) { + panic(p.val.runtime.NewTypeError("'set' on proxy: trap returned truish for property '%s' which exists in the proxy target as a non-configurable and non-writable data property with a different value", name.String())) + } + } +} + +func (p *proxyObject) proxySetStr(name unistring.String, value, receiver Value, throw bool) bool { + target := p.target + if v, ok := p.checkHandler().setStr(target, name, value, receiver); ok { + if p.proxySetPreCheck(v, throw, name) { + p.proxySetPostCheck(target.self.getOwnPropStr(name), value, name) + return true + } + return false + } + return target.setStr(name, value, receiver, throw) +} + +func (p *proxyObject) proxySetIdx(idx valueInt, value, receiver Value, throw bool) bool { + target := p.target + if v, ok := p.checkHandler().setIdx(target, idx, value, receiver); ok { + if p.proxySetPreCheck(v, throw, idx) { + p.proxySetPostCheck(target.self.getOwnPropIdx(idx), value, idx) + return true + } + return false + } + return target.setIdx(idx, value, receiver, throw) +} + +func (p *proxyObject) proxySetSym(s *Symbol, value, receiver Value, throw bool) bool { + target := p.target + if v, ok := p.checkHandler().setSym(target, s, value, receiver); ok { + if p.proxySetPreCheck(v, throw, s) { + p.proxySetPostCheck(target.self.getOwnPropSym(s), value, s) + return true + } + return false + } + return target.setSym(s, value, receiver, throw) +} + +func (p *proxyObject) setOwnStr(name unistring.String, v Value, throw bool) bool { + return p.proxySetStr(name, v, p.val, throw) +} + +func (p *proxyObject) setOwnIdx(idx valueInt, v Value, throw bool) bool { + return p.proxySetIdx(idx, v, p.val, throw) +} + +func (p *proxyObject) setOwnSym(s *Symbol, v Value, throw bool) bool { + return p.proxySetSym(s, v, p.val, throw) +} + +func (p *proxyObject) setForeignStr(name unistring.String, v, receiver Value, throw bool) (bool, bool) { + return p.proxySetStr(name, v, receiver, throw), true +} + +func (p *proxyObject) setForeignIdx(idx valueInt, v, receiver Value, throw bool) (bool, bool) { + return p.proxySetIdx(idx, v, receiver, throw), true +} + +func (p *proxyObject) setForeignSym(s *Symbol, v, receiver Value, throw bool) (bool, bool) { + return p.proxySetSym(s, v, receiver, throw), true +} + +func (p *proxyObject) proxyDeleteCheck(trapResult bool, targetProp Value, name fmt.Stringer, target *Object) { + if trapResult { + if targetProp == nil { + return + } + if targetDesc, ok := targetProp.(*valueProperty); ok { + if !targetDesc.configurable { + panic(p.val.runtime.NewTypeError("'deleteProperty' on proxy: property '%s' is a non-configurable property but the trap returned truish", name.String())) + } + } + if !target.self.isExtensible() { + panic(p.val.runtime.NewTypeError("'deleteProperty' on proxy: trap returned truish for property '%s' but the proxy target is non-extensible", name.String())) + } + } +} + +func (p *proxyObject) deleteStr(name unistring.String, throw bool) bool { + target := p.target + if v, ok := p.checkHandler().deleteStr(target, name); ok { + p.proxyDeleteCheck(v, target.self.getOwnPropStr(name), name, target) + return v + } + + return target.self.deleteStr(name, throw) +} + +func (p *proxyObject) deleteIdx(idx valueInt, throw bool) bool { + target := p.target + if v, ok := p.checkHandler().deleteIdx(target, idx); ok { + p.proxyDeleteCheck(v, target.self.getOwnPropIdx(idx), idx, target) + return v + } + + return target.self.deleteIdx(idx, throw) +} + +func (p *proxyObject) deleteSym(s *Symbol, throw bool) bool { + target := p.target + if v, ok := p.checkHandler().deleteSym(target, s); ok { + p.proxyDeleteCheck(v, target.self.getOwnPropSym(s), s, target) + return v + } + + return target.self.deleteSym(s, throw) +} + +func (p *proxyObject) ownPropertyKeys(all bool, _ []Value) []Value { + if v, ok := p.proxyOwnKeys(); ok { + if !all { + k := 0 + for i, key := range v { + prop := p.val.getOwnProp(key) + if prop == nil { + continue + } + if prop, ok := prop.(*valueProperty); ok && !prop.enumerable { + continue + } + if k != i { + v[k] = v[i] + } + k++ + } + v = v[:k] + } + return v + } + return p.target.self.ownPropertyKeys(all, nil) +} + +func (p *proxyObject) proxyOwnKeys() ([]Value, bool) { + target := p.target + if v, ok := p.checkHandler().ownKeys(target); ok { + keys := p.val.runtime.toObject(v) + var keyList []Value + keySet := make(map[Value]struct{}) + l := toLength(keys.self.getStr("length", nil)) + for k := int64(0); k < l; k++ { + item := keys.self.getIdx(valueInt(k), nil) + if _, ok := item.(valueString); !ok { + if _, ok := item.(*Symbol); !ok { + panic(p.val.runtime.NewTypeError("%s is not a valid property name", item.String())) + } + } + if _, exists := keySet[item]; exists { + panic(p.val.runtime.NewTypeError("'ownKeys' on proxy: trap returned duplicate entries")) + } + keyList = append(keyList, item) + keySet[item] = struct{}{} + } + ext := target.self.isExtensible() + for _, itemName := range target.self.ownPropertyKeys(true, nil) { + if _, exists := keySet[itemName]; exists { + delete(keySet, itemName) + } else { + if !ext { + panic(p.val.runtime.NewTypeError("'ownKeys' on proxy: trap result did not include '%s'", itemName.String())) + } + prop := target.getOwnProp(itemName) + if prop, ok := prop.(*valueProperty); ok && !prop.configurable { + panic(p.val.runtime.NewTypeError("'ownKeys' on proxy: trap result did not include non-configurable '%s'", itemName.String())) + } + } + } + if !ext && len(keyList) > 0 && len(keySet) > 0 { + panic(p.val.runtime.NewTypeError("'ownKeys' on proxy: trap returned extra keys but proxy target is non-extensible")) + } + + return keyList, true + } + + return nil, false +} + +func (p *proxyObject) enumerateOwnKeys() iterNextFunc { + return (&proxyPropIter{ + p: p, + names: p.ownKeys(true, nil), + }).next +} + +func (p *proxyObject) assertCallable() (call func(FunctionCall) Value, ok bool) { + if p.call != nil { + return func(call FunctionCall) Value { + return p.apply(call) + }, true + } + return nil, false +} + +func (p *proxyObject) assertConstructor() func(args []Value, newTarget *Object) *Object { + if p.ctor != nil { + return p.construct + } + return nil +} + +func (p *proxyObject) apply(call FunctionCall) Value { + if p.call == nil { + panic(p.val.runtime.NewTypeError("proxy target is not a function")) + } + if v, ok := p.checkHandler().apply(p.target, nilSafe(call.This), call.Arguments); ok { + return v + } + return p.call(call) +} + +func (p *proxyObject) construct(args []Value, newTarget *Object) *Object { + if p.ctor == nil { + panic(p.val.runtime.NewTypeError("proxy target is not a constructor")) + } + if newTarget == nil { + newTarget = p.val + } + if v, ok := p.checkHandler().construct(p.target, args, newTarget); ok { + return p.val.runtime.toObject(v) + } + return p.ctor(args, newTarget) +} + +func (p *proxyObject) __isCompatibleDescriptor(extensible bool, desc *PropertyDescriptor, current *valueProperty) bool { + if current == nil { + return extensible + } + + /*if desc.Empty() { + return true + }*/ + + /*if p.__isEquivalentDescriptor(desc, current) { + return true + }*/ + + if !current.configurable { + if desc.Configurable == FLAG_TRUE { + return false + } + + if desc.Enumerable != FLAG_NOT_SET && desc.Enumerable.Bool() != current.enumerable { + return false + } + + if desc.IsGeneric() { + return true + } + + if desc.IsData() != !current.accessor { + return desc.Configurable != FLAG_FALSE + } + + if desc.IsData() && !current.accessor { + if !current.configurable { + if desc.Writable == FLAG_TRUE && !current.writable { + return false + } + if !current.writable { + if desc.Value != nil && !desc.Value.SameAs(current.value) { + return false + } + } + } + return true + } + if desc.IsAccessor() && current.accessor { + if !current.configurable { + if desc.Setter != nil && desc.Setter.SameAs(current.setterFunc) { + return false + } + if desc.Getter != nil && desc.Getter.SameAs(current.getterFunc) { + return false + } + } + } + } + return true +} + +func (p *proxyObject) __sameValue(val1, val2 Value) bool { + if val1 == nil && val2 == nil { + return true + } + if val1 != nil { + return val1.SameAs(val2) + } + return false +} + +func (p *proxyObject) filterKeys(vals []Value, all, symbols bool) []Value { + if !all { + k := 0 + for i, val := range vals { + var prop Value + if symbols { + if s, ok := val.(*Symbol); ok { + prop = p.getOwnPropSym(s) + } else { + continue + } + } else { + if _, ok := val.(*Symbol); !ok { + prop = p.getOwnPropStr(val.string()) + } else { + continue + } + } + if prop == nil { + continue + } + if prop, ok := prop.(*valueProperty); ok && !prop.enumerable { + continue + } + if k != i { + vals[k] = vals[i] + } + k++ + } + vals = vals[:k] + } else { + k := 0 + for i, val := range vals { + if _, ok := val.(*Symbol); ok != symbols { + continue + } + if k != i { + vals[k] = vals[i] + } + k++ + } + vals = vals[:k] + } + return vals +} + +func (p *proxyObject) ownKeys(all bool, _ []Value) []Value { // we can assume accum is empty + if vals, ok := p.proxyOwnKeys(); ok { + return p.filterKeys(vals, all, false) + } + + return p.target.self.ownKeys(all, nil) +} + +func (p *proxyObject) ownSymbols(all bool, accum []Value) []Value { + if vals, ok := p.proxyOwnKeys(); ok { + res := p.filterKeys(vals, all, true) + if accum == nil { + return res + } + accum = append(accum, res...) + return accum + } + + return p.target.self.ownSymbols(all, accum) +} + +func (p *proxyObject) className() string { + if p.target == nil { + panic(p.val.runtime.NewTypeError("proxy has been revoked")) + } + if p.call != nil || p.ctor != nil { + return classFunction + } + return classObject +} + +func (p *proxyObject) exportType() reflect.Type { + return proxyType +} + +func (p *proxyObject) export(*objectExportCtx) interface{} { + return Proxy{ + proxy: p, + } +} + +func (p *proxyObject) revoke() { + p.handler = nil + p.target = nil +} diff --git a/vendor/github.com/dop251/goja/regexp.go b/vendor/github.com/dop251/goja/regexp.go new file mode 100644 index 0000000000..e98cd28a7c --- /dev/null +++ b/vendor/github.com/dop251/goja/regexp.go @@ -0,0 +1,647 @@ +package goja + +import ( + "fmt" + "github.com/dlclark/regexp2" + "github.com/dop251/goja/unistring" + "io" + "regexp" + "sort" + "strings" + "unicode/utf16" +) + +type regexp2MatchCache struct { + target valueString + runes []rune + posMap []int +} + +// Not goroutine-safe. Use regexp2Wrapper.clone() +type regexp2Wrapper struct { + rx *regexp2.Regexp + cache *regexp2MatchCache +} + +type regexpWrapper regexp.Regexp + +type positionMapItem struct { + src, dst int +} +type positionMap []positionMapItem + +func (m positionMap) get(src int) int { + if src <= 0 { + return src + } + res := sort.Search(len(m), func(n int) bool { return m[n].src >= src }) + if res >= len(m) || m[res].src != src { + panic("index not found") + } + return m[res].dst +} + +type arrayRuneReader struct { + runes []rune + pos int +} + +func (rd *arrayRuneReader) ReadRune() (r rune, size int, err error) { + if rd.pos < len(rd.runes) { + r = rd.runes[rd.pos] + size = 1 + rd.pos++ + } else { + err = io.EOF + } + return +} + +// Not goroutine-safe. Use regexpPattern.clone() +type regexpPattern struct { + src string + + global, ignoreCase, multiline, sticky, unicode bool + + regexpWrapper *regexpWrapper + regexp2Wrapper *regexp2Wrapper +} + +func compileRegexp2(src string, multiline, ignoreCase bool) (*regexp2Wrapper, error) { + var opts regexp2.RegexOptions = regexp2.ECMAScript + if multiline { + opts |= regexp2.Multiline + } + if ignoreCase { + opts |= regexp2.IgnoreCase + } + regexp2Pattern, err1 := regexp2.Compile(src, opts) + if err1 != nil { + return nil, fmt.Errorf("Invalid regular expression (regexp2): %s (%v)", src, err1) + } + + return ®exp2Wrapper{rx: regexp2Pattern}, nil +} + +func (p *regexpPattern) createRegexp2() { + if p.regexp2Wrapper != nil { + return + } + rx, err := compileRegexp2(p.src, p.multiline, p.ignoreCase) + if err != nil { + // At this point the regexp should have been successfully converted to re2, if it fails now, it's a bug. + panic(err) + } + p.regexp2Wrapper = rx +} + +func buildUTF8PosMap(s valueString) (positionMap, string) { + pm := make(positionMap, 0, s.length()) + rd := s.reader(0) + sPos, utf8Pos := 0, 0 + var sb strings.Builder + for { + r, size, err := rd.ReadRune() + if err == io.EOF { + break + } + if err != nil { + // the string contains invalid UTF-16, bailing out + return nil, "" + } + utf8Size, _ := sb.WriteRune(r) + sPos += size + utf8Pos += utf8Size + pm = append(pm, positionMapItem{src: utf8Pos, dst: sPos}) + } + return pm, sb.String() +} + +func (p *regexpPattern) findSubmatchIndex(s valueString, start int) []int { + if p.regexpWrapper == nil { + return p.regexp2Wrapper.findSubmatchIndex(s, start, p.unicode, p.global || p.sticky) + } + if start != 0 { + // Unfortunately Go's regexp library does not allow starting from an arbitrary position. + // If we just drop the first _start_ characters of the string the assertions (^, $, \b and \B) will not + // work correctly. + p.createRegexp2() + return p.regexp2Wrapper.findSubmatchIndex(s, start, p.unicode, p.global || p.sticky) + } + return p.regexpWrapper.findSubmatchIndex(s, p.unicode) +} + +func (p *regexpPattern) findAllSubmatchIndex(s valueString, start int, limit int, sticky bool) [][]int { + if p.regexpWrapper == nil { + return p.regexp2Wrapper.findAllSubmatchIndex(s, start, limit, sticky, p.unicode) + } + if start == 0 { + if s, ok := s.(asciiString); ok { + return p.regexpWrapper.findAllSubmatchIndex(s.String(), limit, sticky) + } + if limit == 1 { + result := p.regexpWrapper.findSubmatchIndexUnicode(s.(unicodeString), p.unicode) + if result == nil { + return nil + } + return [][]int{result} + } + // Unfortunately Go's regexp library lacks FindAllReaderSubmatchIndex(), so we have to use a UTF-8 string as an + // input. + if p.unicode { + // Try to convert s to UTF-8. If it does not contain any invalid UTF-16 we can do the matching in UTF-8. + pm, str := buildUTF8PosMap(s) + if pm != nil { + res := p.regexpWrapper.findAllSubmatchIndex(str, limit, sticky) + for _, result := range res { + for i, idx := range result { + result[i] = pm.get(idx) + } + } + return res + } + } + } + + p.createRegexp2() + return p.regexp2Wrapper.findAllSubmatchIndex(s, start, limit, sticky, p.unicode) +} + +// clone creates a copy of the regexpPattern which can be used concurrently. +func (p *regexpPattern) clone() *regexpPattern { + ret := ®expPattern{ + src: p.src, + global: p.global, + ignoreCase: p.ignoreCase, + multiline: p.multiline, + sticky: p.sticky, + unicode: p.unicode, + } + if p.regexpWrapper != nil { + ret.regexpWrapper = p.regexpWrapper.clone() + } + if p.regexp2Wrapper != nil { + ret.regexp2Wrapper = p.regexp2Wrapper.clone() + } + return ret +} + +type regexpObject struct { + baseObject + pattern *regexpPattern + source valueString + + standard bool +} + +func (r *regexp2Wrapper) findSubmatchIndex(s valueString, start int, fullUnicode, doCache bool) (result []int) { + if fullUnicode { + return r.findSubmatchIndexUnicode(s, start, doCache) + } + return r.findSubmatchIndexUTF16(s, start, doCache) +} + +func (r *regexp2Wrapper) findUTF16Cached(s valueString, start int, doCache bool) (match *regexp2.Match, runes []rune, err error) { + wrapped := r.rx + cache := r.cache + if cache != nil && cache.posMap == nil && cache.target.SameAs(s) { + runes = cache.runes + } else { + runes = s.utf16Runes() + cache = nil + } + match, err = wrapped.FindRunesMatchStartingAt(runes, start) + if doCache && match != nil && err == nil { + if cache == nil { + if r.cache == nil { + r.cache = new(regexp2MatchCache) + } + *r.cache = regexp2MatchCache{ + target: s, + runes: runes, + } + } + } else { + r.cache = nil + } + return +} + +func (r *regexp2Wrapper) findSubmatchIndexUTF16(s valueString, start int, doCache bool) (result []int) { + match, _, err := r.findUTF16Cached(s, start, doCache) + if err != nil { + return + } + + if match == nil { + return + } + groups := match.Groups() + + result = make([]int, 0, len(groups)<<1) + for _, group := range groups { + if len(group.Captures) > 0 { + result = append(result, group.Index, group.Index+group.Length) + } else { + result = append(result, -1, 0) + } + } + return +} + +func (r *regexp2Wrapper) findUnicodeCached(s valueString, start int, doCache bool) (match *regexp2.Match, posMap []int, err error) { + var ( + runes []rune + mappedStart int + splitPair bool + savedRune rune + ) + wrapped := r.rx + cache := r.cache + if cache != nil && cache.posMap != nil && cache.target.SameAs(s) { + runes, posMap = cache.runes, cache.posMap + mappedStart, splitPair = posMapReverseLookup(posMap, start) + } else { + posMap, runes, mappedStart, splitPair = buildPosMap(&lenientUtf16Decoder{utf16Reader: s.utf16Reader(0)}, s.length(), start) + cache = nil + } + if splitPair { + // temporarily set the rune at mappedStart to the second code point of the pair + _, second := utf16.EncodeRune(runes[mappedStart]) + savedRune, runes[mappedStart] = runes[mappedStart], second + } + match, err = wrapped.FindRunesMatchStartingAt(runes, mappedStart) + if doCache && match != nil && err == nil { + if splitPair { + runes[mappedStart] = savedRune + } + if cache == nil { + if r.cache == nil { + r.cache = new(regexp2MatchCache) + } + *r.cache = regexp2MatchCache{ + target: s, + runes: runes, + posMap: posMap, + } + } + } else { + r.cache = nil + } + + return +} + +func (r *regexp2Wrapper) findSubmatchIndexUnicode(s valueString, start int, doCache bool) (result []int) { + match, posMap, err := r.findUnicodeCached(s, start, doCache) + if match == nil || err != nil { + return + } + + groups := match.Groups() + + result = make([]int, 0, len(groups)<<1) + for _, group := range groups { + if len(group.Captures) > 0 { + result = append(result, posMap[group.Index], posMap[group.Index+group.Length]) + } else { + result = append(result, -1, 0) + } + } + return +} + +func (r *regexp2Wrapper) findAllSubmatchIndexUTF16(s valueString, start, limit int, sticky bool) [][]int { + wrapped := r.rx + match, runes, err := r.findUTF16Cached(s, start, false) + if match == nil || err != nil { + return nil + } + if limit < 0 { + limit = len(runes) + 1 + } + results := make([][]int, 0, limit) + for match != nil { + groups := match.Groups() + + result := make([]int, 0, len(groups)<<1) + + for _, group := range groups { + if len(group.Captures) > 0 { + startPos := group.Index + endPos := group.Index + group.Length + result = append(result, startPos, endPos) + } else { + result = append(result, -1, 0) + } + } + + if sticky && len(result) > 1 { + if result[0] != start { + break + } + start = result[1] + } + + results = append(results, result) + limit-- + if limit <= 0 { + break + } + match, err = wrapped.FindNextMatch(match) + if err != nil { + return nil + } + } + return results +} + +func buildPosMap(rd io.RuneReader, l, start int) (posMap []int, runes []rune, mappedStart int, splitPair bool) { + posMap = make([]int, 0, l+1) + curPos := 0 + runes = make([]rune, 0, l) + startFound := false + for { + if !startFound { + if curPos == start { + mappedStart = len(runes) + startFound = true + } + if curPos > start { + // start position splits a surrogate pair + mappedStart = len(runes) - 1 + splitPair = true + startFound = true + } + } + rn, size, err := rd.ReadRune() + if err != nil { + break + } + runes = append(runes, rn) + posMap = append(posMap, curPos) + curPos += size + } + posMap = append(posMap, curPos) + return +} + +func posMapReverseLookup(posMap []int, pos int) (int, bool) { + mapped := sort.SearchInts(posMap, pos) + if mapped < len(posMap) && posMap[mapped] != pos { + return mapped - 1, true + } + return mapped, false +} + +func (r *regexp2Wrapper) findAllSubmatchIndexUnicode(s unicodeString, start, limit int, sticky bool) [][]int { + wrapped := r.rx + if limit < 0 { + limit = len(s) + 1 + } + results := make([][]int, 0, limit) + match, posMap, err := r.findUnicodeCached(s, start, false) + if err != nil { + return nil + } + for match != nil { + groups := match.Groups() + + result := make([]int, 0, len(groups)<<1) + + for _, group := range groups { + if len(group.Captures) > 0 { + start := posMap[group.Index] + end := posMap[group.Index+group.Length] + result = append(result, start, end) + } else { + result = append(result, -1, 0) + } + } + + if sticky && len(result) > 1 { + if result[0] != start { + break + } + start = result[1] + } + + results = append(results, result) + match, err = wrapped.FindNextMatch(match) + if err != nil { + return nil + } + } + return results +} + +func (r *regexp2Wrapper) findAllSubmatchIndex(s valueString, start, limit int, sticky, fullUnicode bool) [][]int { + switch s := s.(type) { + case asciiString: + return r.findAllSubmatchIndexUTF16(s, start, limit, sticky) + case unicodeString: + if fullUnicode { + return r.findAllSubmatchIndexUnicode(s, start, limit, sticky) + } + return r.findAllSubmatchIndexUTF16(s, start, limit, sticky) + default: + panic("Unsupported string type") + } +} + +func (r *regexp2Wrapper) clone() *regexp2Wrapper { + return ®exp2Wrapper{ + rx: r.rx, + } +} + +func (r *regexpWrapper) findAllSubmatchIndex(s string, limit int, sticky bool) (results [][]int) { + wrapped := (*regexp.Regexp)(r) + results = wrapped.FindAllStringSubmatchIndex(s, limit) + pos := 0 + if sticky { + for i, result := range results { + if len(result) > 1 { + if result[0] != pos { + return results[:i] + } + pos = result[1] + } + } + } + return +} + +func (r *regexpWrapper) findSubmatchIndex(s valueString, fullUnicode bool) []int { + switch s := s.(type) { + case asciiString: + return r.findSubmatchIndexASCII(string(s)) + case unicodeString: + return r.findSubmatchIndexUnicode(s, fullUnicode) + default: + panic("Unsupported string type") + } +} + +func (r *regexpWrapper) findSubmatchIndexASCII(s string) []int { + wrapped := (*regexp.Regexp)(r) + return wrapped.FindStringSubmatchIndex(s) +} + +func (r *regexpWrapper) findSubmatchIndexUnicode(s unicodeString, fullUnicode bool) (result []int) { + wrapped := (*regexp.Regexp)(r) + if fullUnicode { + posMap, runes, _, _ := buildPosMap(&lenientUtf16Decoder{utf16Reader: s.utf16Reader(0)}, s.length(), 0) + res := wrapped.FindReaderSubmatchIndex(&arrayRuneReader{runes: runes}) + for i, item := range res { + if item >= 0 { + res[i] = posMap[item] + } + } + return res + } + return wrapped.FindReaderSubmatchIndex(s.utf16Reader(0)) +} + +func (r *regexpWrapper) clone() *regexpWrapper { + return r +} + +func (r *regexpObject) execResultToArray(target valueString, result []int) Value { + captureCount := len(result) >> 1 + valueArray := make([]Value, captureCount) + matchIndex := result[0] + lowerBound := matchIndex + for index := 0; index < captureCount; index++ { + offset := index << 1 + if result[offset] >= lowerBound { + valueArray[index] = target.substring(result[offset], result[offset+1]) + lowerBound = result[offset] + } else { + valueArray[index] = _undefined + } + } + match := r.val.runtime.newArrayValues(valueArray) + match.self.setOwnStr("input", target, false) + match.self.setOwnStr("index", intToValue(int64(matchIndex)), false) + return match +} + +func (r *regexpObject) getLastIndex() int64 { + lastIndex := toLength(r.getStr("lastIndex", nil)) + if !r.pattern.global && !r.pattern.sticky { + return 0 + } + return lastIndex +} + +func (r *regexpObject) updateLastIndex(index int64, firstResult, lastResult []int) bool { + if r.pattern.sticky { + if firstResult == nil || int64(firstResult[0]) != index { + r.setOwnStr("lastIndex", intToValue(0), true) + return false + } + } else { + if firstResult == nil { + if r.pattern.global { + r.setOwnStr("lastIndex", intToValue(0), true) + } + return false + } + } + + if r.pattern.global || r.pattern.sticky { + r.setOwnStr("lastIndex", intToValue(int64(lastResult[1])), true) + } + return true +} + +func (r *regexpObject) execRegexp(target valueString) (match bool, result []int) { + index := r.getLastIndex() + if index >= 0 && index <= int64(target.length()) { + result = r.pattern.findSubmatchIndex(target, int(index)) + } + match = r.updateLastIndex(index, result, result) + return +} + +func (r *regexpObject) exec(target valueString) Value { + match, result := r.execRegexp(target) + if match { + return r.execResultToArray(target, result) + } + return _null +} + +func (r *regexpObject) test(target valueString) bool { + match, _ := r.execRegexp(target) + return match +} + +func (r *regexpObject) clone() *regexpObject { + r1 := r.val.runtime.newRegexpObject(r.prototype) + r1.source = r.source + r1.pattern = r.pattern + + return r1 +} + +func (r *regexpObject) init() { + r.baseObject.init() + r.standard = true + r._putProp("lastIndex", intToValue(0), true, false, false) +} + +func (r *regexpObject) setProto(proto *Object, throw bool) bool { + res := r.baseObject.setProto(proto, throw) + if res { + r.standard = false + } + return res +} + +func (r *regexpObject) defineOwnPropertyStr(name unistring.String, desc PropertyDescriptor, throw bool) bool { + res := r.baseObject.defineOwnPropertyStr(name, desc, throw) + if res { + r.standard = false + } + return res +} + +func (r *regexpObject) defineOwnPropertySym(name *Symbol, desc PropertyDescriptor, throw bool) bool { + res := r.baseObject.defineOwnPropertySym(name, desc, throw) + if res && r.standard { + switch name { + case SymMatch, SymMatchAll, SymSearch, SymSplit, SymReplace: + r.standard = false + } + } + return res +} + +func (r *regexpObject) deleteStr(name unistring.String, throw bool) bool { + res := r.baseObject.deleteStr(name, throw) + if res { + r.standard = false + } + return res +} + +func (r *regexpObject) setOwnStr(name unistring.String, value Value, throw bool) bool { + res := r.baseObject.setOwnStr(name, value, throw) + if res && r.standard && name == "exec" { + r.standard = false + } + return res +} + +func (r *regexpObject) setOwnSym(name *Symbol, value Value, throw bool) bool { + res := r.baseObject.setOwnSym(name, value, throw) + if res && r.standard { + switch name { + case SymMatch, SymMatchAll, SymSearch, SymSplit, SymReplace: + r.standard = false + } + } + return res +} diff --git a/vendor/github.com/dop251/goja/runtime.go b/vendor/github.com/dop251/goja/runtime.go new file mode 100644 index 0000000000..03ace1eb2d --- /dev/null +++ b/vendor/github.com/dop251/goja/runtime.go @@ -0,0 +1,2678 @@ +package goja + +import ( + "bytes" + "errors" + "fmt" + "github.com/dop251/goja/file" + "go/ast" + "hash/maphash" + "math" + "math/bits" + "math/rand" + "reflect" + "runtime" + "strconv" + "time" + + "golang.org/x/text/collate" + + js_ast "github.com/dop251/goja/ast" + "github.com/dop251/goja/parser" + "github.com/dop251/goja/unistring" +) + +const ( + sqrt1_2 float64 = math.Sqrt2 / 2 + + deoptimiseRegexp = false +) + +var ( + typeCallable = reflect.TypeOf(Callable(nil)) + typeValue = reflect.TypeOf((*Value)(nil)).Elem() + typeObject = reflect.TypeOf((*Object)(nil)) + typeTime = reflect.TypeOf(time.Time{}) +) + +type iterationKind int + +const ( + iterationKindKey iterationKind = iota + iterationKindValue + iterationKindKeyValue +) + +type global struct { + stash stash + varNames map[unistring.String]struct{} + + Object *Object + Array *Object + Function *Object + String *Object + Number *Object + Boolean *Object + RegExp *Object + Date *Object + Symbol *Object + Proxy *Object + + ArrayBuffer *Object + DataView *Object + TypedArray *Object + Uint8Array *Object + Uint8ClampedArray *Object + Int8Array *Object + Uint16Array *Object + Int16Array *Object + Uint32Array *Object + Int32Array *Object + Float32Array *Object + Float64Array *Object + + WeakSet *Object + WeakMap *Object + Map *Object + Set *Object + + Error *Object + TypeError *Object + ReferenceError *Object + SyntaxError *Object + RangeError *Object + EvalError *Object + URIError *Object + + GoError *Object + + ObjectPrototype *Object + ArrayPrototype *Object + NumberPrototype *Object + StringPrototype *Object + BooleanPrototype *Object + FunctionPrototype *Object + RegExpPrototype *Object + DatePrototype *Object + SymbolPrototype *Object + + ArrayBufferPrototype *Object + DataViewPrototype *Object + TypedArrayPrototype *Object + WeakSetPrototype *Object + WeakMapPrototype *Object + MapPrototype *Object + SetPrototype *Object + + IteratorPrototype *Object + ArrayIteratorPrototype *Object + MapIteratorPrototype *Object + SetIteratorPrototype *Object + StringIteratorPrototype *Object + RegExpStringIteratorPrototype *Object + + ErrorPrototype *Object + TypeErrorPrototype *Object + SyntaxErrorPrototype *Object + RangeErrorPrototype *Object + ReferenceErrorPrototype *Object + EvalErrorPrototype *Object + URIErrorPrototype *Object + + GoErrorPrototype *Object + + Eval *Object + + thrower *Object + throwerProperty Value + + stdRegexpProto *guardedObject + + weakSetAdder *Object + weakMapAdder *Object + mapAdder *Object + setAdder *Object + arrayValues *Object + arrayToString *Object +} + +type Flag int + +const ( + FLAG_NOT_SET Flag = iota + FLAG_FALSE + FLAG_TRUE +) + +func (f Flag) Bool() bool { + return f == FLAG_TRUE +} + +func ToFlag(b bool) Flag { + if b { + return FLAG_TRUE + } + return FLAG_FALSE +} + +type RandSource func() float64 + +type Now func() time.Time + +type Runtime struct { + global global + globalObject *Object + stringSingleton *stringObject + rand RandSource + now Now + _collator *collate.Collator + parserOptions []parser.Option + + symbolRegistry map[unistring.String]*Symbol + + typeInfoCache map[reflect.Type]*reflectTypeInfo + fieldNameMapper FieldNameMapper + + vm *vm + hash *maphash.Hash + idSeq uint64 +} + +type StackFrame struct { + prg *Program + funcName unistring.String + pc int +} + +func (f *StackFrame) SrcName() string { + if f.prg == nil { + return "" + } + return f.prg.src.Name() +} + +func (f *StackFrame) FuncName() string { + if f.funcName == "" && f.prg == nil { + return "" + } + if f.funcName == "" { + return "" + } + return f.funcName.String() +} + +func (f *StackFrame) Position() file.Position { + if f.prg == nil || f.prg.src == nil { + return file.Position{} + } + return f.prg.src.Position(f.prg.sourceOffset(f.pc)) +} + +func (f *StackFrame) Write(b *bytes.Buffer) { + if f.prg != nil { + if n := f.prg.funcName; n != "" { + b.WriteString(n.String()) + b.WriteString(" (") + } + p := f.Position() + if p.Filename != "" { + b.WriteString(p.Filename) + } else { + b.WriteString("") + } + b.WriteByte(':') + b.WriteString(strconv.Itoa(p.Line)) + b.WriteByte(':') + b.WriteString(strconv.Itoa(p.Column)) + b.WriteByte('(') + b.WriteString(strconv.Itoa(f.pc)) + b.WriteByte(')') + if f.prg.funcName != "" { + b.WriteByte(')') + } + } else { + if f.funcName != "" { + b.WriteString(f.funcName.String()) + b.WriteString(" (") + } + b.WriteString("native") + if f.funcName != "" { + b.WriteByte(')') + } + } +} + +type Exception struct { + val Value + stack []StackFrame +} + +type uncatchableException struct { + stack *[]StackFrame + err error +} + +type InterruptedError struct { + Exception + iface interface{} +} + +type StackOverflowError struct { + Exception +} + +func (e *InterruptedError) Value() interface{} { + return e.iface +} + +func (e *InterruptedError) String() string { + if e == nil { + return "" + } + var b bytes.Buffer + if e.iface != nil { + b.WriteString(fmt.Sprint(e.iface)) + b.WriteByte('\n') + } + e.writeFullStack(&b) + return b.String() +} + +func (e *InterruptedError) Error() string { + if e == nil || e.iface == nil { + return "" + } + var b bytes.Buffer + b.WriteString(fmt.Sprint(e.iface)) + e.writeShortStack(&b) + return b.String() +} + +func (e *Exception) writeFullStack(b *bytes.Buffer) { + for _, frame := range e.stack { + b.WriteString("\tat ") + frame.Write(b) + b.WriteByte('\n') + } +} + +func (e *Exception) writeShortStack(b *bytes.Buffer) { + if len(e.stack) > 0 && (e.stack[0].prg != nil || e.stack[0].funcName != "") { + b.WriteString(" at ") + e.stack[0].Write(b) + } +} + +func (e *Exception) String() string { + if e == nil { + return "" + } + var b bytes.Buffer + if e.val != nil { + b.WriteString(e.val.String()) + b.WriteByte('\n') + } + e.writeFullStack(&b) + return b.String() +} + +func (e *Exception) Error() string { + if e == nil || e.val == nil { + return "" + } + var b bytes.Buffer + b.WriteString(e.val.String()) + e.writeShortStack(&b) + return b.String() +} + +func (e *Exception) Value() Value { + return e.val +} + +func (r *Runtime) addToGlobal(name string, value Value) { + r.globalObject.self._putProp(unistring.String(name), value, true, false, true) +} + +func (r *Runtime) createIterProto(val *Object) objectImpl { + o := newBaseObjectObj(val, r.global.ObjectPrototype, classObject) + + o._putSym(SymIterator, valueProp(r.newNativeFunc(r.returnThis, nil, "[Symbol.iterator]", nil, 0), true, false, true)) + return o +} + +func (r *Runtime) init() { + r.rand = rand.Float64 + r.now = time.Now + r.global.ObjectPrototype = r.newBaseObject(nil, classObject).val + r.globalObject = r.NewObject() + + r.vm = &vm{ + r: r, + } + r.vm.init() + + r.global.FunctionPrototype = r.newNativeFunc(func(FunctionCall) Value { + return _undefined + }, nil, " ", nil, 0) + + r.global.IteratorPrototype = r.newLazyObject(r.createIterProto) + + r.initObject() + r.initFunction() + r.initArray() + r.initString() + r.initGlobalObject() + r.initNumber() + r.initRegExp() + r.initDate() + r.initBoolean() + r.initProxy() + r.initReflect() + + r.initErrors() + + r.global.Eval = r.newNativeFunc(r.builtin_eval, nil, "eval", nil, 1) + r.addToGlobal("eval", r.global.Eval) + + r.initMath() + r.initJSON() + + r.initTypedArrays() + r.initSymbol() + r.initWeakSet() + r.initWeakMap() + r.initMap() + r.initSet() + + r.global.thrower = r.newNativeFunc(r.builtin_thrower, nil, "thrower", nil, 0) + r.global.throwerProperty = &valueProperty{ + getterFunc: r.global.thrower, + setterFunc: r.global.thrower, + accessor: true, + } +} + +func (r *Runtime) typeErrorResult(throw bool, args ...interface{}) { + if throw { + panic(r.NewTypeError(args...)) + } +} + +func (r *Runtime) newError(typ *Object, format string, args ...interface{}) Value { + msg := fmt.Sprintf(format, args...) + return r.builtin_new(typ, []Value{newStringValue(msg)}) +} + +func (r *Runtime) throwReferenceError(name unistring.String) { + panic(r.newError(r.global.ReferenceError, "%s is not defined", name)) +} + +func (r *Runtime) newSyntaxError(msg string, offset int) Value { + return r.builtin_new(r.global.SyntaxError, []Value{newStringValue(msg)}) +} + +func newBaseObjectObj(obj, proto *Object, class string) *baseObject { + o := &baseObject{ + class: class, + val: obj, + extensible: true, + prototype: proto, + } + obj.self = o + o.init() + return o +} + +func newGuardedObj(proto *Object, class string) *guardedObject { + return &guardedObject{ + baseObject: baseObject{ + class: class, + extensible: true, + prototype: proto, + }, + } +} + +func (r *Runtime) newBaseObject(proto *Object, class string) (o *baseObject) { + v := &Object{runtime: r} + return newBaseObjectObj(v, proto, class) +} + +func (r *Runtime) newGuardedObject(proto *Object, class string) (o *guardedObject) { + v := &Object{runtime: r} + o = newGuardedObj(proto, class) + v.self = o + o.val = v + o.init() + return +} + +func (r *Runtime) NewObject() (v *Object) { + return r.newBaseObject(r.global.ObjectPrototype, classObject).val +} + +// CreateObject creates an object with given prototype. Equivalent of Object.create(proto). +func (r *Runtime) CreateObject(proto *Object) *Object { + return r.newBaseObject(proto, classObject).val +} + +func (r *Runtime) NewArray(items ...interface{}) *Object { + values := make([]Value, len(items)) + for i, item := range items { + values[i] = r.ToValue(item) + } + return r.newArrayValues(values) +} + +func (r *Runtime) NewTypeError(args ...interface{}) *Object { + msg := "" + if len(args) > 0 { + f, _ := args[0].(string) + msg = fmt.Sprintf(f, args[1:]...) + } + return r.builtin_new(r.global.TypeError, []Value{newStringValue(msg)}) +} + +func (r *Runtime) NewGoError(err error) *Object { + e := r.newError(r.global.GoError, err.Error()).(*Object) + e.Set("value", err) + return e +} + +func (r *Runtime) newFunc(name unistring.String, len int, strict bool) (f *funcObject) { + v := &Object{runtime: r} + + f = &funcObject{} + f.class = classFunction + f.val = v + f.extensible = true + v.self = f + f.prototype = r.global.FunctionPrototype + f.init(name, len) + if strict { + f._put("caller", r.global.throwerProperty) + f._put("arguments", r.global.throwerProperty) + } + return +} + +func (r *Runtime) newNativeFuncObj(v *Object, call func(FunctionCall) Value, construct func(args []Value, proto *Object) *Object, name unistring.String, proto *Object, length int) *nativeFuncObject { + f := &nativeFuncObject{ + baseFuncObject: baseFuncObject{ + baseObject: baseObject{ + class: classFunction, + val: v, + extensible: true, + prototype: r.global.FunctionPrototype, + }, + }, + f: call, + construct: r.wrapNativeConstruct(construct, proto), + } + v.self = f + f.init(name, length) + if proto != nil { + f._putProp("prototype", proto, false, false, false) + } + return f +} + +func (r *Runtime) newNativeConstructor(call func(ConstructorCall) *Object, name unistring.String, length int) *Object { + v := &Object{runtime: r} + + f := &nativeFuncObject{ + baseFuncObject: baseFuncObject{ + baseObject: baseObject{ + class: classFunction, + val: v, + extensible: true, + prototype: r.global.FunctionPrototype, + }, + }, + } + + f.f = func(c FunctionCall) Value { + thisObj, _ := c.This.(*Object) + if thisObj != nil { + res := call(ConstructorCall{ + This: thisObj, + Arguments: c.Arguments, + }) + if res == nil { + return _undefined + } + return res + } + return f.defaultConstruct(call, c.Arguments, nil) + } + + f.construct = func(args []Value, newTarget *Object) *Object { + return f.defaultConstruct(call, args, newTarget) + } + + v.self = f + f.init(name, length) + + proto := r.NewObject() + proto.self._putProp("constructor", v, true, false, true) + f._putProp("prototype", proto, true, false, false) + + return v +} + +func (r *Runtime) newNativeConstructOnly(v *Object, ctor func(args []Value, newTarget *Object) *Object, defaultProto *Object, name unistring.String, length int) *nativeFuncObject { + if v == nil { + v = &Object{runtime: r} + } + + f := &nativeFuncObject{ + baseFuncObject: baseFuncObject{ + baseObject: baseObject{ + class: classFunction, + val: v, + extensible: true, + prototype: r.global.FunctionPrototype, + }, + }, + f: func(call FunctionCall) Value { + return ctor(call.Arguments, nil) + }, + construct: func(args []Value, newTarget *Object) *Object { + if newTarget == nil { + newTarget = v + } + return ctor(args, newTarget) + }, + } + v.self = f + f.init(name, length) + if defaultProto != nil { + f._putProp("prototype", defaultProto, false, false, false) + } + + return f +} + +func (r *Runtime) newNativeFunc(call func(FunctionCall) Value, construct func(args []Value, proto *Object) *Object, name unistring.String, proto *Object, length int) *Object { + v := &Object{runtime: r} + + f := &nativeFuncObject{ + baseFuncObject: baseFuncObject{ + baseObject: baseObject{ + class: classFunction, + val: v, + extensible: true, + prototype: r.global.FunctionPrototype, + }, + }, + f: call, + construct: r.wrapNativeConstruct(construct, proto), + } + v.self = f + f.init(name, length) + if proto != nil { + f._putProp("prototype", proto, false, false, false) + proto.self._putProp("constructor", v, true, false, true) + } + return v +} + +func (r *Runtime) newNativeFuncConstructObj(v *Object, construct func(args []Value, proto *Object) *Object, name unistring.String, proto *Object, length int) *nativeFuncObject { + f := &nativeFuncObject{ + baseFuncObject: baseFuncObject{ + baseObject: baseObject{ + class: classFunction, + val: v, + extensible: true, + prototype: r.global.FunctionPrototype, + }, + }, + f: r.constructToCall(construct, proto), + construct: r.wrapNativeConstruct(construct, proto), + } + + f.init(name, length) + if proto != nil { + f._putProp("prototype", proto, false, false, false) + } + return f +} + +func (r *Runtime) newNativeFuncConstruct(construct func(args []Value, proto *Object) *Object, name unistring.String, prototype *Object, length int) *Object { + return r.newNativeFuncConstructProto(construct, name, prototype, r.global.FunctionPrototype, length) +} + +func (r *Runtime) newNativeFuncConstructProto(construct func(args []Value, proto *Object) *Object, name unistring.String, prototype, proto *Object, length int) *Object { + v := &Object{runtime: r} + + f := &nativeFuncObject{} + f.class = classFunction + f.val = v + f.extensible = true + v.self = f + f.prototype = proto + f.f = r.constructToCall(construct, prototype) + f.construct = r.wrapNativeConstruct(construct, prototype) + f.init(name, length) + if prototype != nil { + f._putProp("prototype", prototype, false, false, false) + prototype.self._putProp("constructor", v, true, false, true) + } + return v +} + +func (r *Runtime) newPrimitiveObject(value Value, proto *Object, class string) *Object { + v := &Object{runtime: r} + + o := &primitiveValueObject{} + o.class = class + o.val = v + o.extensible = true + v.self = o + o.prototype = proto + o.pValue = value + o.init() + return v +} + +func (r *Runtime) builtin_Number(call FunctionCall) Value { + if len(call.Arguments) > 0 { + return call.Arguments[0].ToNumber() + } else { + return valueInt(0) + } +} + +func (r *Runtime) builtin_newNumber(args []Value, proto *Object) *Object { + var v Value + if len(args) > 0 { + v = args[0].ToNumber() + } else { + v = intToValue(0) + } + return r.newPrimitiveObject(v, proto, classNumber) +} + +func (r *Runtime) builtin_Boolean(call FunctionCall) Value { + if len(call.Arguments) > 0 { + if call.Arguments[0].ToBoolean() { + return valueTrue + } else { + return valueFalse + } + } else { + return valueFalse + } +} + +func (r *Runtime) builtin_newBoolean(args []Value, proto *Object) *Object { + var v Value + if len(args) > 0 { + if args[0].ToBoolean() { + v = valueTrue + } else { + v = valueFalse + } + } else { + v = valueFalse + } + return r.newPrimitiveObject(v, proto, classBoolean) +} + +func (r *Runtime) error_toString(call FunctionCall) Value { + var nameStr, msgStr valueString + obj := call.This.ToObject(r).self + name := obj.getStr("name", nil) + if name == nil || name == _undefined { + nameStr = asciiString("Error") + } else { + nameStr = name.toString() + } + msg := obj.getStr("message", nil) + if msg == nil || msg == _undefined { + msgStr = stringEmpty + } else { + msgStr = msg.toString() + } + if nameStr.length() == 0 { + return msgStr + } + if msgStr.length() == 0 { + return nameStr + } + var sb valueStringBuilder + sb.WriteString(nameStr) + sb.WriteString(asciiString(": ")) + sb.WriteString(msgStr) + return sb.String() +} + +func (r *Runtime) builtin_Error(args []Value, proto *Object) *Object { + obj := r.newBaseObject(proto, classError) + if len(args) > 0 && args[0] != _undefined { + obj._putProp("message", args[0], true, false, true) + } + return obj.val +} + +func (r *Runtime) builtin_new(construct *Object, args []Value) *Object { + return r.toConstructor(construct)(args, nil) +} + +func (r *Runtime) throw(e Value) { + panic(e) +} + +func (r *Runtime) builtin_thrower(FunctionCall) Value { + r.typeErrorResult(true, "'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them") + return nil +} + +func (r *Runtime) eval(srcVal valueString, direct, strict bool, this Value) Value { + src := escapeInvalidUtf16(srcVal) + vm := r.vm + inGlobal := true + if direct { + for s := vm.stash; s != nil; s = s.outer { + if s.variable { + inGlobal = false + break + } + } + } + p, err := r.compile("", src, strict, true, inGlobal) + if err != nil { + panic(err) + } + + vm.pushCtx() + vm.prg = p + vm.pc = 0 + vm.args = 0 + vm.result = _undefined + if !direct { + vm.stash = &r.global.stash + } + vm.sb = vm.sp + vm.push(this) + vm.run() + retval := vm.result + vm.popCtx() + vm.halt = false + vm.sp -= 1 + return retval +} + +func (r *Runtime) builtin_eval(call FunctionCall) Value { + if len(call.Arguments) == 0 { + return _undefined + } + if str, ok := call.Arguments[0].(valueString); ok { + return r.eval(str, false, false, r.globalObject) + } + return call.Arguments[0] +} + +func (r *Runtime) constructToCall(construct func(args []Value, proto *Object) *Object, proto *Object) func(call FunctionCall) Value { + return func(call FunctionCall) Value { + return construct(call.Arguments, proto) + } +} + +func (r *Runtime) wrapNativeConstruct(c func(args []Value, proto *Object) *Object, proto *Object) func(args []Value, newTarget *Object) *Object { + if c == nil { + return nil + } + return func(args []Value, newTarget *Object) *Object { + var p *Object + if newTarget != nil { + if pp, ok := newTarget.self.getStr("prototype", nil).(*Object); ok { + p = pp + } + } + if p == nil { + p = proto + } + return c(args, p) + } +} + +func (r *Runtime) toCallable(v Value) func(FunctionCall) Value { + if call, ok := r.toObject(v).self.assertCallable(); ok { + return call + } + r.typeErrorResult(true, "Value is not callable: %s", v.toString()) + return nil +} + +func (r *Runtime) checkObjectCoercible(v Value) { + switch v.(type) { + case valueUndefined, valueNull: + r.typeErrorResult(true, "Value is not object coercible") + } +} + +func toInt8(v Value) int8 { + v = v.ToNumber() + if i, ok := v.(valueInt); ok { + return int8(i) + } + + if f, ok := v.(valueFloat); ok { + f := float64(f) + if !math.IsNaN(f) && !math.IsInf(f, 0) { + return int8(int64(f)) + } + } + return 0 +} + +func toUint8(v Value) uint8 { + v = v.ToNumber() + if i, ok := v.(valueInt); ok { + return uint8(i) + } + + if f, ok := v.(valueFloat); ok { + f := float64(f) + if !math.IsNaN(f) && !math.IsInf(f, 0) { + return uint8(int64(f)) + } + } + return 0 +} + +func toUint8Clamp(v Value) uint8 { + v = v.ToNumber() + if i, ok := v.(valueInt); ok { + if i < 0 { + return 0 + } + if i <= 255 { + return uint8(i) + } + return 255 + } + + if num, ok := v.(valueFloat); ok { + num := float64(num) + if !math.IsNaN(num) { + if num < 0 { + return 0 + } + if num > 255 { + return 255 + } + f := math.Floor(num) + f1 := f + 0.5 + if f1 < num { + return uint8(f + 1) + } + if f1 > num { + return uint8(f) + } + r := uint8(f) + if r&1 != 0 { + return r + 1 + } + return r + } + } + return 0 +} + +func toInt16(v Value) int16 { + v = v.ToNumber() + if i, ok := v.(valueInt); ok { + return int16(i) + } + + if f, ok := v.(valueFloat); ok { + f := float64(f) + if !math.IsNaN(f) && !math.IsInf(f, 0) { + return int16(int64(f)) + } + } + return 0 +} + +func toUint16(v Value) uint16 { + v = v.ToNumber() + if i, ok := v.(valueInt); ok { + return uint16(i) + } + + if f, ok := v.(valueFloat); ok { + f := float64(f) + if !math.IsNaN(f) && !math.IsInf(f, 0) { + return uint16(int64(f)) + } + } + return 0 +} + +func toInt32(v Value) int32 { + v = v.ToNumber() + if i, ok := v.(valueInt); ok { + return int32(i) + } + + if f, ok := v.(valueFloat); ok { + f := float64(f) + if !math.IsNaN(f) && !math.IsInf(f, 0) { + return int32(int64(f)) + } + } + return 0 +} + +func toUint32(v Value) uint32 { + v = v.ToNumber() + if i, ok := v.(valueInt); ok { + return uint32(i) + } + + if f, ok := v.(valueFloat); ok { + f := float64(f) + if !math.IsNaN(f) && !math.IsInf(f, 0) { + return uint32(int64(f)) + } + } + return 0 +} + +func toInt64(v Value) int64 { + v = v.ToNumber() + if i, ok := v.(valueInt); ok { + return int64(i) + } + + if f, ok := v.(valueFloat); ok { + f := float64(f) + if !math.IsNaN(f) && !math.IsInf(f, 0) { + return int64(f) + } + } + return 0 +} + +func toUint64(v Value) uint64 { + v = v.ToNumber() + if i, ok := v.(valueInt); ok { + return uint64(i) + } + + if f, ok := v.(valueFloat); ok { + f := float64(f) + if !math.IsNaN(f) && !math.IsInf(f, 0) { + return uint64(int64(f)) + } + } + return 0 +} + +func toInt(v Value) int { + v = v.ToNumber() + if i, ok := v.(valueInt); ok { + return int(i) + } + + if f, ok := v.(valueFloat); ok { + f := float64(f) + if !math.IsNaN(f) && !math.IsInf(f, 0) { + return int(f) + } + } + return 0 +} + +func toUint(v Value) uint { + v = v.ToNumber() + if i, ok := v.(valueInt); ok { + return uint(i) + } + + if f, ok := v.(valueFloat); ok { + f := float64(f) + if !math.IsNaN(f) && !math.IsInf(f, 0) { + return uint(int64(f)) + } + } + return 0 +} + +func toFloat32(v Value) float32 { + return float32(v.ToFloat()) +} + +func toLength(v Value) int64 { + if v == nil { + return 0 + } + i := v.ToInteger() + if i < 0 { + return 0 + } + if i >= maxInt { + return maxInt - 1 + } + return i +} + +func toIntStrict(i int64) int { + if bits.UintSize == 32 { + if i > math.MaxInt32 || i < math.MinInt32 { + panic(rangeError("Integer value overflows 32-bit int")) + } + } + return int(i) +} + +func toIntClamp(i int64) int { + if bits.UintSize == 32 { + if i > math.MaxInt32 { + return math.MaxInt32 + } + if i < math.MinInt32 { + return math.MinInt32 + } + } + return int(i) +} + +func (r *Runtime) toIndex(v Value) int { + intIdx := v.ToInteger() + if intIdx >= 0 && intIdx < maxInt { + if bits.UintSize == 32 && intIdx >= math.MaxInt32 { + panic(r.newError(r.global.RangeError, "Index %s overflows int", v.String())) + } + return int(intIdx) + } + panic(r.newError(r.global.RangeError, "Invalid index %s", v.String())) +} + +func (r *Runtime) toBoolean(b bool) Value { + if b { + return valueTrue + } else { + return valueFalse + } +} + +// New creates an instance of a Javascript runtime that can be used to run code. Multiple instances may be created and +// used simultaneously, however it is not possible to pass JS values across runtimes. +func New() *Runtime { + r := &Runtime{} + r.init() + return r +} + +// Compile creates an internal representation of the JavaScript code that can be later run using the Runtime.RunProgram() +// method. This representation is not linked to a runtime in any way and can be run in multiple runtimes (possibly +// at the same time). +func Compile(name, src string, strict bool) (*Program, error) { + return compile(name, src, strict, false, true) +} + +// CompileAST creates an internal representation of the JavaScript code that can be later run using the Runtime.RunProgram() +// method. This representation is not linked to a runtime in any way and can be run in multiple runtimes (possibly +// at the same time). +func CompileAST(prg *js_ast.Program, strict bool) (*Program, error) { + return compileAST(prg, strict, false, true) +} + +// MustCompile is like Compile but panics if the code cannot be compiled. +// It simplifies safe initialization of global variables holding compiled JavaScript code. +func MustCompile(name, src string, strict bool) *Program { + prg, err := Compile(name, src, strict) + if err != nil { + panic(err) + } + + return prg +} + +// Parse takes a source string and produces a parsed AST. Use this function if you want to pass options +// to the parser, e.g.: +// +// p, err := Parse("test.js", "var a = true", parser.WithDisableSourceMaps) +// if err != nil { /* ... */ } +// prg, err := CompileAST(p, true) +// // ... +// +// Otherwise use Compile which combines both steps. +func Parse(name, src string, options ...parser.Option) (prg *js_ast.Program, err error) { + prg, err1 := parser.ParseFile(nil, name, src, 0, options...) + if err1 != nil { + // FIXME offset + err = &CompilerSyntaxError{ + CompilerError: CompilerError{ + Message: err1.Error(), + }, + } + } + return +} + +func compile(name, src string, strict, eval, inGlobal bool, parserOptions ...parser.Option) (p *Program, err error) { + prg, err := Parse(name, src, parserOptions...) + if err != nil { + return + } + + return compileAST(prg, strict, eval, inGlobal) +} + +func compileAST(prg *js_ast.Program, strict, eval, inGlobal bool) (p *Program, err error) { + c := newCompiler() + + defer func() { + if x := recover(); x != nil { + p = nil + switch x1 := x.(type) { + case *CompilerSyntaxError: + err = x1 + default: + panic(x) + } + } + }() + + c.compile(prg, strict, eval, inGlobal) + p = c.p + return +} + +func (r *Runtime) compile(name, src string, strict, eval, inGlobal bool) (p *Program, err error) { + p, err = compile(name, src, strict, eval, inGlobal, r.parserOptions...) + if err != nil { + switch x1 := err.(type) { + case *CompilerSyntaxError: + err = &Exception{ + val: r.builtin_new(r.global.SyntaxError, []Value{newStringValue(x1.Error())}), + } + case *CompilerReferenceError: + err = &Exception{ + val: r.newError(r.global.ReferenceError, x1.Message), + } // TODO proper message + } + } + return +} + +// RunString executes the given string in the global context. +func (r *Runtime) RunString(str string) (Value, error) { + return r.RunScript("", str) +} + +// RunScript executes the given string in the global context. +func (r *Runtime) RunScript(name, src string) (Value, error) { + p, err := r.compile(name, src, false, false, true) + + if err != nil { + return nil, err + } + + return r.RunProgram(p) +} + +// RunProgram executes a pre-compiled (see Compile()) code in the global context. +func (r *Runtime) RunProgram(p *Program) (result Value, err error) { + defer func() { + if x := recover(); x != nil { + if ex, ok := x.(*uncatchableException); ok { + err = ex.err + } else { + panic(x) + } + } + }() + vm := r.vm + recursive := false + if len(vm.callStack) > 0 { + recursive = true + vm.pushCtx() + vm.stash = &r.global.stash + vm.sb = vm.sp - 1 + } + vm.prg = p + vm.pc = 0 + vm.result = _undefined + ex := vm.runTry() + if ex == nil { + result = r.vm.result + } else { + err = ex + } + if recursive { + vm.popCtx() + vm.halt = false + vm.clearStack() + } else { + vm.stack = nil + vm.prg = nil + r.leave() + } + return +} + +// CaptureCallStack appends the current call stack frames to the stack slice (which may be nil) up to the specified depth. +// The most recent frame will be the first one. +// If depth <= 0 or more than the number of available frames, returns the entire stack. +// This method is not safe for concurrent use and should only be called by a Go function that is +// called from a running script. +func (r *Runtime) CaptureCallStack(depth int, stack []StackFrame) []StackFrame { + l := len(r.vm.callStack) + var offset int + if depth > 0 { + offset = l - depth + 1 + if offset < 0 { + offset = 0 + } + } + if stack == nil { + stack = make([]StackFrame, 0, l-offset+1) + } + return r.vm.captureStack(stack, offset) +} + +// Interrupt a running JavaScript. The corresponding Go call will return an *InterruptedError containing v. +// Note, it only works while in JavaScript code, it does not interrupt native Go functions (which includes all built-ins). +// If the runtime is currently not running, it will be immediately interrupted on the next Run*() call. +// To avoid that use ClearInterrupt() +func (r *Runtime) Interrupt(v interface{}) { + r.vm.Interrupt(v) +} + +// ClearInterrupt resets the interrupt flag. Typically this needs to be called before the runtime +// is made available for re-use if there is a chance it could have been interrupted with Interrupt(). +// Otherwise if Interrupt() was called when runtime was not running (e.g. if it had already finished) +// so that Interrupt() didn't actually trigger, an attempt to use the runtime will immediately cause +// an interruption. It is up to the user to ensure proper synchronisation so that ClearInterrupt() is +// only called when the runtime has finished and there is no chance of a concurrent Interrupt() call. +func (r *Runtime) ClearInterrupt() { + r.vm.ClearInterrupt() +} + +/* +ToValue converts a Go value into a JavaScript value of a most appropriate type. Structural types (such as structs, maps +and slices) are wrapped so that changes are reflected on the original value which can be retrieved using Value.Export(). + +WARNING! There are two very important caveats to bear in mind when modifying wrapped Go structs, maps and +slices. + +1. If a slice is passed by value (not as a pointer), resizing the slice does not reflect on the original +value. Moreover, extending the slice may result in the underlying array being re-allocated and copied. +For example: + + a := []interface{}{1} + vm.Set("a", a) + vm.RunString(`a.push(2); a[0] = 0;`) + fmt.Println(a[0]) // prints "1" + +2. If a regular JavaScript Object is assigned as an element of a wrapped Go struct, map or array, it is +Export()'ed and therefore copied. This may result in an unexpected behaviour in JavaScript: + + m := map[string]interface{}{} + vm.Set("m", m) + vm.RunString(` + var obj = {test: false}; + m.obj = obj; // obj gets Export()'ed, i.e. copied to a new map[string]interface{} and then this map is set as m["obj"] + obj.test = true; // note, m.obj.test is still false + `) + fmt.Println(m["obj"].(map[string]interface{})["test"]) // prints "false" + +Notes on individual types: + +Primitive types + +Primitive types (numbers, string, bool) are converted to the corresponding JavaScript primitives. + +Strings + +Because of the difference in internal string representation between ECMAScript (which uses UTF-16) and Go (which uses +UTF-8) conversion from JS to Go may be lossy. In particular, code points that can be part of UTF-16 surrogate pairs +(0xD800-0xDFFF) cannot be represented in UTF-8 unless they form a valid surrogate pair and are replaced with +utf8.RuneError. + +Nil + +Nil is converted to null. + +Functions + +func(FunctionCall) Value is treated as a native JavaScript function. This increases performance because there are no +automatic argument and return value type conversions (which involves reflect). Attempting to use +the function as a constructor will result in a TypeError. + +func(FunctionCall, *Runtime) Value is treated as above, except the *Runtime is also passed as a parameter. + +func(ConstructorCall) *Object is treated as a native constructor, allowing to use it with the new +operator: + + func MyObject(call goja.ConstructorCall) *goja.Object { + // call.This contains the newly created object as per http://www.ecma-international.org/ecma-262/5.1/index.html#sec-13.2.2 + // call.Arguments contain arguments passed to the function + + call.This.Set("method", method) + + //... + + // If return value is a non-nil *Object, it will be used instead of call.This + // This way it is possible to return a Go struct or a map converted + // into goja.Value using ToValue(), however in this case + // instanceof will not work as expected. + return nil + } + + runtime.Set("MyObject", MyObject) + +Then it can be used in JS as follows: + + var o = new MyObject(arg); + var o1 = MyObject(arg); // same thing + o instanceof MyObject && o1 instanceof MyObject; // true + +When a native constructor is called directly (without the new operator) its behavior depends on +this value: if it's an Object, it is passed through, otherwise a new one is created exactly as +if it was called with the new operator. In either case call.NewTarget will be nil. + +func(ConstructorCall, *Runtime) *Object is treated as above, except the *Runtime is also passed as a parameter. + +Any other Go function is wrapped so that the arguments are automatically converted into the required Go types and the +return value is converted to a JavaScript value (using this method). If conversion is not possible, a TypeError is +thrown. + +Functions with multiple return values return an Array. If the last return value is an `error` it is not returned but +converted into a JS exception. If the error is *Exception, it is thrown as is, otherwise it's wrapped in a GoEerror. +Note that if there are exactly two return values and the last is an `error`, the function returns the first value as is, +not an Array. + +Structs + +Structs are converted to Object-like values. Fields and methods are available as properties, their values are +results of this method (ToValue()) applied to the corresponding Go value. + +Field properties are writable (if the struct is addressable) and non-configurable. +Method properties are non-writable and non-configurable. + +Attempt to define a new property or delete an existing property will fail (throw in strict mode) unless it's a Symbol +property. Symbol properties only exist in the wrapper and do not affect the underlying Go value. +Note that because a wrapper is created every time a property is accessed it may lead to unexpected results such as this: + + type Field struct{ + } + type S struct { + Field *Field + } + var s = S{ + Field: &Field{}, + } + vm := New() + vm.Set("s", &s) + res, err := vm.RunString(` + var sym = Symbol(66); + var field1 = s.Field; + field1[sym] = true; + var field2 = s.Field; + field1 === field2; // true, because the equality operation compares the wrapped values, not the wrappers + field1[sym] === true; // true + field2[sym] === undefined; // also true + `) + +The same applies to values from maps and slices as well. + +Handling of time.Time + +time.Time does not get special treatment and therefore is converted just like any other `struct` providing access to +all its methods. This is done deliberately instead of converting it to a `Date` because these two types are not fully +compatible: `time.Time` includes zone, whereas JS `Date` doesn't. Doing the conversion implicitly therefore would +result in a loss of information. + +If you need to convert it to a `Date`, it can be done either in JS: + + var d = new Date(goval.UnixNano()/1e6); + +... or in Go: + + now := time.Now() + vm := New() + val, err := vm.New(vm.Get("Date").ToObject(vm), vm.ToValue(now.UnixNano()/1e6)) + if err != nil { + ... + } + vm.Set("d", val) + +Note that Value.Export() for a `Date` value returns time.Time in local timezone. + +Maps + +Maps with string or integer key type are converted into host objects that largely behave like a JavaScript Object. + +Maps with methods + +If a map type has at least one method defined, the properties of the resulting Object represent methods, not map keys. +This is because in JavaScript there is no distinction between 'object.key` and `object[key]`, unlike Go. +If access to the map values is required, it can be achieved by defining another method or, if it's not possible, by +defining an external getter function. + +Slices + +Slices are converted into host objects that behave largely like JavaScript Array. It has the appropriate +prototype and all the usual methods should work. There is, however, a caveat: converted Arrays may not contain holes +(because Go slices cannot). This means that hasOwnProperty(n) always returns `true` if n < length. Deleting an item with +an index < length will set it to a zero value (but the property will remain). Nil slice elements are be converted to +`null`. Accessing an element beyond `length` returns `undefined`. Also see the warning above about passing slices as +values (as opposed to pointers). + +Any other type is converted to a generic reflect based host object. Depending on the underlying type it behaves similar +to a Number, String, Boolean or Object. + +Note that the underlying type is not lost, calling Export() returns the original Go value. This applies to all +reflect based types. +*/ +func (r *Runtime) ToValue(i interface{}) Value { + switch i := i.(type) { + case nil: + return _null + case *Object: + if i == nil || i.runtime == nil { + return _null + } + if i.runtime != r { + panic(r.NewTypeError("Illegal runtime transition of an Object")) + } + return i + case valueContainer: + return i.toValue(r) + case Value: + return i + case string: + return newStringValue(i) + case bool: + if i { + return valueTrue + } else { + return valueFalse + } + case func(FunctionCall) Value: + name := unistring.NewFromString(runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()) + return r.newNativeFunc(i, nil, name, nil, 0) + case func(FunctionCall, *Runtime) Value: + name := unistring.NewFromString(runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()) + return r.newNativeFunc(func(call FunctionCall) Value { + return i(call, r) + }, nil, name, nil, 0) + case func(ConstructorCall) *Object: + name := unistring.NewFromString(runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()) + return r.newNativeConstructor(i, name, 0) + case func(ConstructorCall, *Runtime) *Object: + name := unistring.NewFromString(runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()) + return r.newNativeConstructor(func(call ConstructorCall) *Object { + return i(call, r) + }, name, 0) + case int: + return intToValue(int64(i)) + case int8: + return intToValue(int64(i)) + case int16: + return intToValue(int64(i)) + case int32: + return intToValue(int64(i)) + case int64: + return intToValue(i) + case uint: + if uint64(i) <= math.MaxInt64 { + return intToValue(int64(i)) + } else { + return floatToValue(float64(i)) + } + case uint8: + return intToValue(int64(i)) + case uint16: + return intToValue(int64(i)) + case uint32: + return intToValue(int64(i)) + case uint64: + if i <= math.MaxInt64 { + return intToValue(int64(i)) + } + return floatToValue(float64(i)) + case float32: + return floatToValue(float64(i)) + case float64: + return floatToValue(i) + case map[string]interface{}: + if i == nil { + return _null + } + obj := &Object{runtime: r} + m := &objectGoMapSimple{ + baseObject: baseObject{ + val: obj, + extensible: true, + }, + data: i, + } + obj.self = m + m.init() + return obj + case []interface{}: + if i == nil { + return _null + } + obj := &Object{runtime: r} + a := &objectGoSlice{ + baseObject: baseObject{ + val: obj, + }, + data: &i, + } + obj.self = a + a.init() + return obj + case *[]interface{}: + if i == nil { + return _null + } + obj := &Object{runtime: r} + a := &objectGoSlice{ + baseObject: baseObject{ + val: obj, + }, + data: i, + } + obj.self = a + a.init() + return obj + } + + origValue := reflect.ValueOf(i) + value := origValue + for value.Kind() == reflect.Ptr { + value = reflect.Indirect(value) + } + + if !value.IsValid() { + return _null + } + + switch value.Kind() { + case reflect.Map: + if value.Type().NumMethod() == 0 { + switch value.Type().Key().Kind() { + case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float64, reflect.Float32: + + obj := &Object{runtime: r} + m := &objectGoMapReflect{ + objectGoReflect: objectGoReflect{ + baseObject: baseObject{ + val: obj, + extensible: true, + }, + origValue: origValue, + value: value, + }, + } + m.init() + obj.self = m + return obj + } + } + case reflect.Slice: + obj := &Object{runtime: r} + a := &objectGoSliceReflect{ + objectGoReflect: objectGoReflect{ + baseObject: baseObject{ + val: obj, + }, + origValue: origValue, + value: value, + }, + } + a.init() + obj.self = a + return obj + case reflect.Func: + name := unistring.NewFromString(runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()) + return r.newNativeFunc(r.wrapReflectFunc(value), nil, name, nil, value.Type().NumIn()) + } + + obj := &Object{runtime: r} + o := &objectGoReflect{ + baseObject: baseObject{ + val: obj, + }, + origValue: origValue, + value: value, + } + obj.self = o + o.init() + return obj +} + +func (r *Runtime) wrapReflectFunc(value reflect.Value) func(FunctionCall) Value { + return func(call FunctionCall) Value { + typ := value.Type() + nargs := typ.NumIn() + var in []reflect.Value + + if l := len(call.Arguments); l < nargs { + // fill missing arguments with zero values + n := nargs + if typ.IsVariadic() { + n-- + } + in = make([]reflect.Value, n) + for i := l; i < n; i++ { + in[i] = reflect.Zero(typ.In(i)) + } + } else { + if l > nargs && !typ.IsVariadic() { + l = nargs + } + in = make([]reflect.Value, l) + } + + callSlice := false + for i, a := range call.Arguments { + var t reflect.Type + + n := i + if n >= nargs-1 && typ.IsVariadic() { + if n > nargs-1 { + n = nargs - 1 + } + + t = typ.In(n).Elem() + } else if n > nargs-1 { // ignore extra arguments + break + } else { + t = typ.In(n) + } + + // if this is a variadic Go function, and the caller has supplied + // exactly the number of JavaScript arguments required, and this + // is the last JavaScript argument, try treating the it as the + // actual set of variadic Go arguments. if that succeeds, break + // out of the loop. + if typ.IsVariadic() && len(call.Arguments) == nargs && i == nargs-1 { + v := reflect.New(typ.In(n)).Elem() + if err := r.toReflectValue(a, v, &objectExportCtx{}); err == nil { + in[i] = v + callSlice = true + break + } + } + v := reflect.New(t).Elem() + err := r.toReflectValue(a, v, &objectExportCtx{}) + if err != nil { + panic(r.newError(r.global.TypeError, "could not convert function call parameter %v to %v", a, t)) + } + in[i] = v + } + + var out []reflect.Value + if callSlice { + out = value.CallSlice(in) + } else { + out = value.Call(in) + } + + if len(out) == 0 { + return _undefined + } + + if last := out[len(out)-1]; last.Type().Name() == "error" { + if !last.IsNil() { + err := last.Interface() + if _, ok := err.(*Exception); ok { + panic(err) + } + panic(r.NewGoError(last.Interface().(error))) + } + out = out[:len(out)-1] + } + + switch len(out) { + case 0: + return _undefined + case 1: + return r.ToValue(out[0].Interface()) + default: + s := make([]interface{}, len(out)) + for i, v := range out { + s[i] = v.Interface() + } + + return r.ToValue(s) + } + } +} + +func (r *Runtime) toReflectValue(v Value, dst reflect.Value, ctx *objectExportCtx) error { + typ := dst.Type() + switch typ.Kind() { + case reflect.String: + dst.Set(reflect.ValueOf(v.String()).Convert(typ)) + return nil + case reflect.Bool: + dst.Set(reflect.ValueOf(v.ToBoolean()).Convert(typ)) + return nil + case reflect.Int: + dst.Set(reflect.ValueOf(toInt(v)).Convert(typ)) + return nil + case reflect.Int64: + dst.Set(reflect.ValueOf(toInt64(v)).Convert(typ)) + return nil + case reflect.Int32: + dst.Set(reflect.ValueOf(toInt32(v)).Convert(typ)) + return nil + case reflect.Int16: + dst.Set(reflect.ValueOf(toInt16(v)).Convert(typ)) + return nil + case reflect.Int8: + dst.Set(reflect.ValueOf(toInt8(v)).Convert(typ)) + return nil + case reflect.Uint: + dst.Set(reflect.ValueOf(toUint(v)).Convert(typ)) + return nil + case reflect.Uint64: + dst.Set(reflect.ValueOf(toUint64(v)).Convert(typ)) + return nil + case reflect.Uint32: + dst.Set(reflect.ValueOf(toUint32(v)).Convert(typ)) + return nil + case reflect.Uint16: + dst.Set(reflect.ValueOf(toUint16(v)).Convert(typ)) + return nil + case reflect.Uint8: + dst.Set(reflect.ValueOf(toUint8(v)).Convert(typ)) + return nil + case reflect.Float64: + dst.Set(reflect.ValueOf(v.ToFloat()).Convert(typ)) + return nil + case reflect.Float32: + dst.Set(reflect.ValueOf(toFloat32(v)).Convert(typ)) + return nil + } + + if typ == typeCallable { + if fn, ok := AssertFunction(v); ok { + dst.Set(reflect.ValueOf(fn)) + return nil + } + } + + if typ == typeValue { + dst.Set(reflect.ValueOf(v)) + return nil + } + + if typ == typeObject { + if obj, ok := v.(*Object); ok { + dst.Set(reflect.ValueOf(obj)) + return nil + } + } + + { + et := v.ExportType() + if et == nil || et == reflectTypeNil { + dst.Set(reflect.Zero(typ)) + return nil + } + + for i := 0; ; i++ { + if et.ConvertibleTo(typ) { + ev := reflect.ValueOf(exportValue(v, ctx)) + for ; i > 0; i-- { + ev = ev.Elem() + } + dst.Set(ev.Convert(typ)) + return nil + } + if et.Kind() == reflect.Ptr { + et = et.Elem() + } else { + break + } + } + + if typ == typeTime { + if obj, ok := v.(*Object); ok { + if d, ok := obj.self.(*dateObject); ok { + dst.Set(reflect.ValueOf(d.time())) + return nil + } + } + if et.Kind() == reflect.String { + tme, ok := dateParse(v.String()) + if !ok { + return fmt.Errorf("could not convert string %v to %v", v, typ) + } + dst.Set(reflect.ValueOf(tme)) + return nil + } + } + } + + switch typ.Kind() { + case reflect.Slice: + if o, ok := v.(*Object); ok { + if o.self.className() == classArray { + if v, exists := ctx.getTyped(o.self, typ); exists { + dst.Set(reflect.ValueOf(v)) + return nil + } + l := int(toLength(o.self.getStr("length", nil))) + if dst.IsNil() || dst.Len() != l { + dst.Set(reflect.MakeSlice(typ, l, l)) + } + s := dst + ctx.putTyped(o.self, typ, s.Interface()) + for i := 0; i < l; i++ { + item := o.self.getIdx(valueInt(int64(i)), nil) + err := r.toReflectValue(item, s.Index(i), ctx) + if err != nil { + return fmt.Errorf("could not convert array element %v to %v at %d: %w", v, typ, i, err) + } + } + return nil + } + } + case reflect.Map: + if o, ok := v.(*Object); ok { + if v, exists := ctx.getTyped(o.self, typ); exists { + dst.Set(reflect.ValueOf(v)) + return nil + } + if dst.IsNil() { + dst.Set(reflect.MakeMap(typ)) + } + m := dst + ctx.putTyped(o.self, typ, m.Interface()) + keyTyp := typ.Key() + elemTyp := typ.Elem() + needConvertKeys := !reflect.ValueOf("").Type().AssignableTo(keyTyp) + iter := &enumerableIter{ + wrapped: o.self.enumerateOwnKeys(), + } + for item, next := iter.next(); next != nil; item, next = next() { + var kv reflect.Value + var err error + if needConvertKeys { + kv = reflect.New(keyTyp).Elem() + err = r.toReflectValue(stringValueFromRaw(item.name), kv, ctx) + if err != nil { + return fmt.Errorf("could not convert map key %s to %v", item.name.String(), typ) + } + } else { + kv = reflect.ValueOf(item.name.String()) + } + + ival := o.self.getStr(item.name, nil) + if ival != nil { + vv := reflect.New(elemTyp).Elem() + err := r.toReflectValue(ival, vv, ctx) + if err != nil { + return fmt.Errorf("could not convert map value %v to %v at key %s", ival, typ, item.name.String()) + } + m.SetMapIndex(kv, vv) + } else { + m.SetMapIndex(kv, reflect.Zero(elemTyp)) + } + } + + return nil + } + case reflect.Struct: + if o, ok := v.(*Object); ok { + t := reflect.PtrTo(typ) + if v, exists := ctx.getTyped(o.self, t); exists { + dst.Set(reflect.ValueOf(v).Elem()) + return nil + } + s := dst + ctx.putTyped(o.self, t, s.Addr().Interface()) + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + if ast.IsExported(field.Name) { + name := field.Name + if r.fieldNameMapper != nil { + name = r.fieldNameMapper.FieldName(typ, field) + } + var v Value + if field.Anonymous { + v = o + } else { + v = o.self.getStr(unistring.NewFromString(name), nil) + } + + if v != nil { + err := r.toReflectValue(v, s.Field(i), ctx) + if err != nil { + return fmt.Errorf("could not convert struct value %v to %v for field %s: %w", v, field.Type, field.Name, err) + } + } + } + } + return nil + } + case reflect.Func: + if fn, ok := AssertFunction(v); ok { + dst.Set(reflect.MakeFunc(typ, r.wrapJSFunc(fn, typ))) + return nil + } + case reflect.Ptr: + if o, ok := v.(*Object); ok { + if v, exists := ctx.getTyped(o.self, typ); exists { + dst.Set(reflect.ValueOf(v)) + return nil + } + } + if dst.IsNil() { + dst.Set(reflect.New(typ.Elem())) + } + return r.toReflectValue(v, dst.Elem(), ctx) + } + + return fmt.Errorf("could not convert %v to %v", v, typ) +} + +func (r *Runtime) wrapJSFunc(fn Callable, typ reflect.Type) func(args []reflect.Value) (results []reflect.Value) { + return func(args []reflect.Value) (results []reflect.Value) { + jsArgs := make([]Value, len(args)) + for i, arg := range args { + jsArgs[i] = r.ToValue(arg.Interface()) + } + + results = make([]reflect.Value, typ.NumOut()) + res, err := fn(_undefined, jsArgs...) + if err == nil { + if typ.NumOut() > 0 { + v := reflect.New(typ.Out(0)).Elem() + err = r.toReflectValue(res, v, &objectExportCtx{}) + if err == nil { + results[0] = v + } + } + } + + if err != nil { + if typ.NumOut() == 2 && typ.Out(1).Name() == "error" { + results[1] = reflect.ValueOf(err).Convert(typ.Out(1)) + } else { + panic(err) + } + } + + for i, v := range results { + if !v.IsValid() { + results[i] = reflect.Zero(typ.Out(i)) + } + } + + return + } +} + +// ExportTo converts a JavaScript value into the specified Go value. The second parameter must be a non-nil pointer. +// Exporting to an interface{} results in a value of the same type as Export() would produce. +// Exporting to numeric types uses the standard ECMAScript conversion operations, same as used when assigning +// values to non-clamped typed array items, e.g. https://262.ecma-international.org/#sec-toint32 +// Returns error if conversion is not possible. +func (r *Runtime) ExportTo(v Value, target interface{}) error { + tval := reflect.ValueOf(target) + if tval.Kind() != reflect.Ptr || tval.IsNil() { + return errors.New("target must be a non-nil pointer") + } + return r.toReflectValue(v, tval.Elem(), &objectExportCtx{}) +} + +// GlobalObject returns the global object. +func (r *Runtime) GlobalObject() *Object { + return r.globalObject +} + +// Set the specified variable in the global context. +// Equivalent to running "name = value" in non-strict mode. +// The value is first converted using ToValue(). +// Note, this is not the same as GlobalObject().Set(name, value), +// because if a global lexical binding (let or const) exists, it is set instead. +func (r *Runtime) Set(name string, value interface{}) error { + return r.try(func() { + name := unistring.NewFromString(name) + v := r.ToValue(value) + if ref := r.global.stash.getRefByName(name, false); ref != nil { + ref.set(v) + } else { + r.globalObject.self.setOwnStr(name, v, true) + } + }) +} + +// Get the specified variable in the global context. +// Equivalent to dereferencing a variable by name in non-strict mode. If variable is not defined returns nil. +// Note, this is not the same as GlobalObject().Get(name), +// because if a global lexical binding (let or const) exists, it is used instead. +// This method will panic with an *Exception if a JavaScript exception is thrown in the process. +func (r *Runtime) Get(name string) (ret Value) { + r.tryPanic(func() { + n := unistring.NewFromString(name) + if v, exists := r.global.stash.getByName(n); exists { + ret = v + } else { + ret = r.globalObject.self.getStr(n, nil) + } + }) + return +} + +// SetRandSource sets random source for this Runtime. If not called, the default math/rand is used. +func (r *Runtime) SetRandSource(source RandSource) { + r.rand = source +} + +// SetTimeSource sets the current time source for this Runtime. +// If not called, the default time.Now() is used. +func (r *Runtime) SetTimeSource(now Now) { + r.now = now +} + +// SetParserOptions sets parser options to be used by RunString, RunScript and eval() within the code. +func (r *Runtime) SetParserOptions(opts ...parser.Option) { + r.parserOptions = opts +} + +// SetMaxCallStackSize sets the maximum function call depth. When exceeded, a *StackOverflowError is thrown and +// returned by RunProgram or by a Callable call. This is useful to prevent memory exhaustion caused by an +// infinite recursion. The default value is math.MaxInt32. +// This method (as the rest of the Set* methods) is not safe for concurrent use and may only be called +// from the vm goroutine or when the vm is not running. +func (r *Runtime) SetMaxCallStackSize(size int) { + r.vm.maxCallStackSize = size +} + +// New is an equivalent of the 'new' operator allowing to call it directly from Go. +func (r *Runtime) New(construct Value, args ...Value) (o *Object, err error) { + err = r.try(func() { + o = r.builtin_new(r.toObject(construct), args) + }) + return +} + +// Callable represents a JavaScript function that can be called from Go. +type Callable func(this Value, args ...Value) (Value, error) + +// AssertFunction checks if the Value is a function and returns a Callable. +func AssertFunction(v Value) (Callable, bool) { + if obj, ok := v.(*Object); ok { + if f, ok := obj.self.assertCallable(); ok { + return func(this Value, args ...Value) (ret Value, err error) { + defer func() { + if x := recover(); x != nil { + if ex, ok := x.(*uncatchableException); ok { + err = ex.err + } else { + panic(x) + } + } + }() + ex := obj.runtime.vm.try(func() { + ret = f(FunctionCall{ + This: this, + Arguments: args, + }) + }) + if ex != nil { + err = ex + } + vm := obj.runtime.vm + vm.clearStack() + if len(vm.callStack) == 0 { + obj.runtime.leave() + } + return + }, true + } + } + return nil, false +} + +// IsUndefined returns true if the supplied Value is undefined. Note, it checks against the real undefined, not +// against the global object's 'undefined' property. +func IsUndefined(v Value) bool { + return v == _undefined +} + +// IsNull returns true if the supplied Value is null. +func IsNull(v Value) bool { + return v == _null +} + +// IsNaN returns true if the supplied value is NaN. +func IsNaN(v Value) bool { + f, ok := v.(valueFloat) + return ok && math.IsNaN(float64(f)) +} + +// IsInfinity returns true if the supplied is (+/-)Infinity +func IsInfinity(v Value) bool { + return v == _positiveInf || v == _negativeInf +} + +// Undefined returns JS undefined value. Note if global 'undefined' property is changed this still returns the original value. +func Undefined() Value { + return _undefined +} + +// Null returns JS null value. +func Null() Value { + return _null +} + +// NaN returns a JS NaN value. +func NaN() Value { + return _NaN +} + +// PositiveInf returns a JS +Inf value. +func PositiveInf() Value { + return _positiveInf +} + +// NegativeInf returns a JS -Inf value. +func NegativeInf() Value { + return _negativeInf +} + +func tryFunc(f func()) (ret interface{}) { + defer func() { + ret = recover() + }() + + f() + return +} + +func (r *Runtime) try(f func()) error { + if ex := r.vm.try(f); ex != nil { + return ex + } + return nil +} + +func (r *Runtime) tryPanic(f func()) { + if ex := r.vm.try(f); ex != nil { + panic(ex) + } +} + +func (r *Runtime) toObject(v Value, args ...interface{}) *Object { + if obj, ok := v.(*Object); ok { + return obj + } + if len(args) > 0 { + panic(r.NewTypeError(args...)) + } else { + var s string + if v == nil { + s = "undefined" + } else { + s = v.String() + } + panic(r.NewTypeError("Value is not an object: %s", s)) + } +} + +func (r *Runtime) toNumber(v Value) Value { + switch o := v.(type) { + case valueInt, valueFloat: + return v + case *Object: + if pvo, ok := o.self.(*primitiveValueObject); ok { + return r.toNumber(pvo.pValue) + } + } + panic(r.NewTypeError("Value is not a number: %s", v)) +} + +func (r *Runtime) speciesConstructor(o, defaultConstructor *Object) func(args []Value, newTarget *Object) *Object { + c := o.self.getStr("constructor", nil) + if c != nil && c != _undefined { + c = r.toObject(c).self.getSym(SymSpecies, nil) + } + if c == nil || c == _undefined || c == _null { + c = defaultConstructor + } + return r.toConstructor(c) +} + +func (r *Runtime) speciesConstructorObj(o, defaultConstructor *Object) *Object { + c := o.self.getStr("constructor", nil) + if c != nil && c != _undefined { + c = r.toObject(c).self.getSym(SymSpecies, nil) + } + if c == nil || c == _undefined || c == _null { + return defaultConstructor + } + return r.toObject(c) +} + +func (r *Runtime) returnThis(call FunctionCall) Value { + return call.This +} + +func createDataPropertyOrThrow(o *Object, p Value, v Value) { + o.defineOwnProperty(p, PropertyDescriptor{ + Writable: FLAG_TRUE, + Enumerable: FLAG_TRUE, + Configurable: FLAG_TRUE, + Value: v, + }, true) +} + +func toPropertyKey(key Value) Value { + return key.ToString() +} + +func (r *Runtime) getVStr(v Value, p unistring.String) Value { + o := v.ToObject(r) + return o.self.getStr(p, v) +} + +func (r *Runtime) getV(v Value, p Value) Value { + o := v.ToObject(r) + return o.get(p, v) +} + +func (r *Runtime) getIterator(obj Value, method func(FunctionCall) Value) *Object { + if method == nil { + method = toMethod(r.getV(obj, SymIterator)) + if method == nil { + panic(r.NewTypeError("object is not iterable")) + } + } + + return r.toObject(method(FunctionCall{ + This: obj, + })) +} + +func returnIter(iter *Object) { + retMethod := toMethod(iter.self.getStr("return", nil)) + if retMethod != nil { + iter.runtime.toObject(retMethod(FunctionCall{This: iter})) + } +} + +func (r *Runtime) iterate(iter *Object, step func(Value)) { + for { + res := r.toObject(toMethod(iter.self.getStr("next", nil))(FunctionCall{This: iter})) + if nilSafe(res.self.getStr("done", nil)).ToBoolean() { + break + } + value := nilSafe(res.self.getStr("value", nil)) + ret := tryFunc(func() { + step(value) + }) + if ret != nil { + _ = tryFunc(func() { + returnIter(iter) + }) + panic(ret) + } + } +} + +func (r *Runtime) createIterResultObject(value Value, done bool) Value { + o := r.NewObject() + o.self.setOwnStr("value", value, false) + o.self.setOwnStr("done", r.toBoolean(done), false) + return o +} + +func (r *Runtime) newLazyObject(create func(*Object) objectImpl) *Object { + val := &Object{runtime: r} + o := &lazyObject{ + val: val, + create: create, + } + val.self = o + return val +} + +func (r *Runtime) getHash() *maphash.Hash { + if r.hash == nil { + r.hash = &maphash.Hash{} + } + return r.hash +} + +// called when the top level function returns (i.e. control is passed outside the Runtime). +func (r *Runtime) leave() { + // run jobs, etc... +} + +func nilSafe(v Value) Value { + if v != nil { + return v + } + return _undefined +} + +func isArray(object *Object) bool { + self := object.self + if proxy, ok := self.(*proxyObject); ok { + if proxy.target == nil { + panic(typeError("Cannot perform 'IsArray' on a proxy that has been revoked")) + } + return isArray(proxy.target) + } + switch self.className() { + case classArray: + return true + default: + return false + } +} + +func isRegexp(v Value) bool { + if o, ok := v.(*Object); ok { + matcher := o.self.getSym(SymMatch, nil) + if matcher != nil && matcher != _undefined { + return matcher.ToBoolean() + } + _, reg := o.self.(*regexpObject) + return reg + } + return false +} + +func limitCallArgs(call FunctionCall, n int) FunctionCall { + if len(call.Arguments) > n { + return FunctionCall{This: call.This, Arguments: call.Arguments[:n]} + } else { + return call + } +} + +func shrinkCap(newSize, oldCap int) int { + if oldCap > 8 { + if cap := oldCap / 2; cap >= newSize { + return cap + } + } + return oldCap +} + +func growCap(newSize, oldSize, oldCap int) int { + // Use the same algorithm as in runtime.growSlice + doublecap := oldCap + oldCap + if newSize > doublecap { + return newSize + } else { + if oldSize < 1024 { + return doublecap + } else { + cap := oldCap + // Check 0 < cap to detect overflow + // and prevent an infinite loop. + for 0 < cap && cap < newSize { + cap += cap / 4 + } + // Return the requested cap when + // the calculation overflowed. + if cap <= 0 { + return newSize + } + return cap + } + } +} + +func (r *Runtime) genId() (ret uint64) { + if r.hash == nil { + h := r.getHash() + r.idSeq = h.Sum64() + } + if r.idSeq == 0 { + r.idSeq = 1 + } + ret = r.idSeq + r.idSeq++ + return +} + +func (r *Runtime) setGlobal(name unistring.String, v Value, strict bool) { + if ref := r.global.stash.getRefByName(name, strict); ref != nil { + ref.set(v) + } else { + o := r.globalObject.self + if strict { + if o.hasOwnPropertyStr(name) { + o.setOwnStr(name, v, true) + } else { + r.throwReferenceError(name) + } + } else { + o.setOwnStr(name, v, false) + } + } +} + +func strToArrayIdx(s unistring.String) uint32 { + if s == "" { + return math.MaxUint32 + } + l := len(s) + if s[0] == '0' { + if l == 1 { + return 0 + } + return math.MaxUint32 + } + var n uint32 + if l < 10 { + // guaranteed not to overflow + for i := 0; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + return math.MaxUint32 + } + n = n*10 + uint32(c-'0') + } + return n + } + if l > 10 { + // guaranteed to overflow + return math.MaxUint32 + } + c9 := s[9] + if c9 < '0' || c9 > '9' { + return math.MaxUint32 + } + for i := 0; i < 9; i++ { + c := s[i] + if c < '0' || c > '9' { + return math.MaxUint32 + } + n = n*10 + uint32(c-'0') + } + if n >= math.MaxUint32/10+1 { + return math.MaxUint32 + } + n *= 10 + n1 := n + uint32(c9-'0') + if n1 < n { + return math.MaxUint32 + } + + return n1 +} + +func strToInt32(s unistring.String) (int32, bool) { + if s == "" { + return -1, false + } + neg := s[0] == '-' + if neg { + s = s[1:] + } + l := len(s) + if s[0] == '0' { + if l == 1 { + return 0, !neg + } + return -1, false + } + var n uint32 + if l < 10 { + // guaranteed not to overflow + for i := 0; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + return -1, false + } + n = n*10 + uint32(c-'0') + } + } else if l > 10 { + // guaranteed to overflow + return -1, false + } else { + c9 := s[9] + if c9 >= '0' { + if !neg && c9 > '7' || c9 > '8' { + // guaranteed to overflow + return -1, false + } + for i := 0; i < 9; i++ { + c := s[i] + if c < '0' || c > '9' { + return -1, false + } + n = n*10 + uint32(c-'0') + } + if n >= math.MaxInt32/10+1 { + // valid number, but it overflows integer + return 0, false + } + n = n*10 + uint32(c9-'0') + } else { + return -1, false + } + } + if neg { + return int32(-n), true + } + return int32(n), true +} + +func strToInt64(s unistring.String) (int64, bool) { + if s == "" { + return -1, false + } + neg := s[0] == '-' + if neg { + s = s[1:] + } + l := len(s) + if s[0] == '0' { + if l == 1 { + return 0, !neg + } + return -1, false + } + var n uint64 + if l < 19 { + // guaranteed not to overflow + for i := 0; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + return -1, false + } + n = n*10 + uint64(c-'0') + } + } else if l > 19 { + // guaranteed to overflow + return -1, false + } else { + c18 := s[18] + if c18 >= '0' { + if !neg && c18 > '7' || c18 > '8' { + // guaranteed to overflow + return -1, false + } + for i := 0; i < 18; i++ { + c := s[i] + if c < '0' || c > '9' { + return -1, false + } + n = n*10 + uint64(c-'0') + } + if n >= math.MaxInt64/10+1 { + // valid number, but it overflows integer + return 0, false + } + n = n*10 + uint64(c18-'0') + } else { + return -1, false + } + } + if neg { + return int64(-n), true + } + return int64(n), true +} + +func strToInt(s unistring.String) (int, bool) { + if bits.UintSize == 32 { + n, ok := strToInt32(s) + return int(n), ok + } + n, ok := strToInt64(s) + return int(n), ok +} + +// Attempts to convert a string into a canonical integer. +// On success returns (number, true). +// If it was a canonical number, but not an integer returns (0, false). This includes -0 and overflows. +// In all other cases returns (-1, false). +// See https://262.ecma-international.org/#sec-canonicalnumericindexstring +func strToIntNum(s unistring.String) (int, bool) { + n, ok := strToInt64(s) + if n == 0 { + return 0, ok + } + if ok && n >= -maxInt && n <= maxInt { + if bits.UintSize == 32 { + if n > math.MaxInt32 || n < math.MinInt32 { + return 0, false + } + } + return int(n), true + } + str := stringValueFromRaw(s) + if str.ToNumber().toString().SameAs(str) { + return 0, false + } + return -1, false +} + +func strToGoIdx(s unistring.String) int { + if n, ok := strToInt(s); ok { + return n + } + return -1 +} + +func strToIdx64(s unistring.String) int64 { + if n, ok := strToInt64(s); ok { + return n + } + return -1 +} diff --git a/vendor/github.com/dop251/goja/string.go b/vendor/github.com/dop251/goja/string.go new file mode 100644 index 0000000000..8df5e53f5c --- /dev/null +++ b/vendor/github.com/dop251/goja/string.go @@ -0,0 +1,344 @@ +package goja + +import ( + "io" + "strconv" + "strings" + "unicode/utf16" + "unicode/utf8" + + "github.com/dop251/goja/unistring" +) + +const ( + __proto__ = "__proto__" +) + +var ( + stringTrue valueString = asciiString("true") + stringFalse valueString = asciiString("false") + stringNull valueString = asciiString("null") + stringUndefined valueString = asciiString("undefined") + stringObjectC valueString = asciiString("object") + stringFunction valueString = asciiString("function") + stringBoolean valueString = asciiString("boolean") + stringString valueString = asciiString("string") + stringSymbol valueString = asciiString("symbol") + stringNumber valueString = asciiString("number") + stringNaN valueString = asciiString("NaN") + stringInfinity = asciiString("Infinity") + stringPlusInfinity = asciiString("+Infinity") + stringNegInfinity = asciiString("-Infinity") + stringBound_ valueString = asciiString("bound ") + stringEmpty valueString = asciiString("") + + stringError valueString = asciiString("Error") + stringTypeError valueString = asciiString("TypeError") + stringReferenceError valueString = asciiString("ReferenceError") + stringSyntaxError valueString = asciiString("SyntaxError") + stringRangeError valueString = asciiString("RangeError") + stringEvalError valueString = asciiString("EvalError") + stringURIError valueString = asciiString("URIError") + stringGoError valueString = asciiString("GoError") + + stringObjectNull valueString = asciiString("[object Null]") + stringObjectObject valueString = asciiString("[object Object]") + stringObjectUndefined valueString = asciiString("[object Undefined]") + stringInvalidDate valueString = asciiString("Invalid Date") +) + +type valueString interface { + Value + charAt(int) rune + length() int + concat(valueString) valueString + substring(start, end int) valueString + compareTo(valueString) int + reader(start int) io.RuneReader + utf16Reader(start int) io.RuneReader + utf16Runes() []rune + index(valueString, int) int + lastIndex(valueString, int) int + toLower() valueString + toUpper() valueString + toTrimmedUTF8() string +} + +type stringIterObject struct { + baseObject + reader io.RuneReader +} + +func isUTF16FirstSurrogate(r rune) bool { + return r >= 0xD800 && r <= 0xDBFF +} + +func isUTF16SecondSurrogate(r rune) bool { + return r >= 0xDC00 && r <= 0xDFFF +} + +func (si *stringIterObject) next() Value { + if si.reader == nil { + return si.val.runtime.createIterResultObject(_undefined, true) + } + r, _, err := si.reader.ReadRune() + if err == io.EOF { + si.reader = nil + return si.val.runtime.createIterResultObject(_undefined, true) + } + return si.val.runtime.createIterResultObject(stringFromRune(r), false) +} + +func stringFromRune(r rune) valueString { + if r < utf8.RuneSelf { + var sb strings.Builder + sb.Grow(1) + sb.WriteByte(byte(r)) + return asciiString(sb.String()) + } + var sb unicodeStringBuilder + if r <= 0xFFFF { + sb.Grow(1) + } else { + sb.Grow(2) + } + sb.WriteRune(r) + return sb.String() +} + +func (r *Runtime) createStringIterator(s valueString) Value { + o := &Object{runtime: r} + + si := &stringIterObject{ + reader: &lenientUtf16Decoder{utf16Reader: s.utf16Reader(0)}, + } + si.class = classStringIterator + si.val = o + si.extensible = true + o.self = si + si.prototype = r.global.StringIteratorPrototype + si.init() + + return o +} + +type stringObject struct { + baseObject + value valueString + length int + lengthProp valueProperty +} + +func newStringValue(s string) valueString { + utf16Size := 0 + ascii := true + for _, chr := range s { + utf16Size++ + if chr >= utf8.RuneSelf { + ascii = false + if chr > 0xFFFF { + utf16Size++ + } + } + } + if ascii { + return asciiString(s) + } + buf := make([]uint16, utf16Size+1) + buf[0] = unistring.BOM + c := 1 + for _, chr := range s { + if chr <= 0xFFFF { + buf[c] = uint16(chr) + } else { + first, second := utf16.EncodeRune(chr) + buf[c] = uint16(first) + c++ + buf[c] = uint16(second) + } + c++ + } + return unicodeString(buf) +} + +func stringValueFromRaw(raw unistring.String) valueString { + if b := raw.AsUtf16(); b != nil { + return unicodeString(b) + } + return asciiString(raw) +} + +func (s *stringObject) init() { + s.baseObject.init() + s.setLength() +} + +func (s *stringObject) setLength() { + if s.value != nil { + s.length = s.value.length() + } + s.lengthProp.value = intToValue(int64(s.length)) + s._put("length", &s.lengthProp) +} + +func (s *stringObject) getStr(name unistring.String, receiver Value) Value { + if i := strToGoIdx(name); i >= 0 && i < s.length { + return s._getIdx(i) + } + return s.baseObject.getStr(name, receiver) +} + +func (s *stringObject) getIdx(idx valueInt, receiver Value) Value { + i := int64(idx) + if i >= 0 { + if i < int64(s.length) { + return s._getIdx(int(i)) + } + return nil + } + return s.baseObject.getStr(idx.string(), receiver) +} + +func (s *stringObject) getOwnPropStr(name unistring.String) Value { + if i := strToGoIdx(name); i >= 0 && i < s.length { + val := s._getIdx(i) + return &valueProperty{ + value: val, + enumerable: true, + } + } + + return s.baseObject.getOwnPropStr(name) +} + +func (s *stringObject) getOwnPropIdx(idx valueInt) Value { + i := int64(idx) + if i >= 0 { + if i < int64(s.length) { + val := s._getIdx(int(i)) + return &valueProperty{ + value: val, + enumerable: true, + } + } + return nil + } + + return s.baseObject.getOwnPropStr(idx.string()) +} + +func (s *stringObject) _getIdx(idx int) Value { + return s.value.substring(idx, idx+1) +} + +func (s *stringObject) setOwnStr(name unistring.String, val Value, throw bool) bool { + if i := strToGoIdx(name); i >= 0 && i < s.length { + s.val.runtime.typeErrorResult(throw, "Cannot assign to read only property '%d' of a String", i) + return false + } + + return s.baseObject.setOwnStr(name, val, throw) +} + +func (s *stringObject) setOwnIdx(idx valueInt, val Value, throw bool) bool { + i := int64(idx) + if i >= 0 && i < int64(s.length) { + s.val.runtime.typeErrorResult(throw, "Cannot assign to read only property '%d' of a String", i) + return false + } + + return s.baseObject.setOwnStr(idx.string(), val, throw) +} + +func (s *stringObject) setForeignStr(name unistring.String, val, receiver Value, throw bool) (bool, bool) { + return s._setForeignStr(name, s.getOwnPropStr(name), val, receiver, throw) +} + +func (s *stringObject) setForeignIdx(idx valueInt, val, receiver Value, throw bool) (bool, bool) { + return s._setForeignIdx(idx, s.getOwnPropIdx(idx), val, receiver, throw) +} + +func (s *stringObject) defineOwnPropertyStr(name unistring.String, descr PropertyDescriptor, throw bool) bool { + if i := strToGoIdx(name); i >= 0 && i < s.length { + s.val.runtime.typeErrorResult(throw, "Cannot redefine property: %d", i) + return false + } + + return s.baseObject.defineOwnPropertyStr(name, descr, throw) +} + +func (s *stringObject) defineOwnPropertyIdx(idx valueInt, descr PropertyDescriptor, throw bool) bool { + i := int64(idx) + if i >= 0 && i < int64(s.length) { + s.val.runtime.typeErrorResult(throw, "Cannot redefine property: %d", i) + return false + } + + return s.baseObject.defineOwnPropertyStr(idx.string(), descr, throw) +} + +type stringPropIter struct { + str valueString // separate, because obj can be the singleton + obj *stringObject + idx, length int +} + +func (i *stringPropIter) next() (propIterItem, iterNextFunc) { + if i.idx < i.length { + name := strconv.Itoa(i.idx) + i.idx++ + return propIterItem{name: unistring.String(name), enumerable: _ENUM_TRUE}, i.next + } + + return i.obj.baseObject.enumerateOwnKeys()() +} + +func (s *stringObject) enumerateOwnKeys() iterNextFunc { + return (&stringPropIter{ + str: s.value, + obj: s, + length: s.length, + }).next +} + +func (s *stringObject) ownKeys(all bool, accum []Value) []Value { + for i := 0; i < s.length; i++ { + accum = append(accum, asciiString(strconv.Itoa(i))) + } + + return s.baseObject.ownKeys(all, accum) +} + +func (s *stringObject) deleteStr(name unistring.String, throw bool) bool { + if i := strToGoIdx(name); i >= 0 && i < s.length { + s.val.runtime.typeErrorResult(throw, "Cannot delete property '%d' of a String", i) + return false + } + + return s.baseObject.deleteStr(name, throw) +} + +func (s *stringObject) deleteIdx(idx valueInt, throw bool) bool { + i := int64(idx) + if i >= 0 && i < int64(s.length) { + s.val.runtime.typeErrorResult(throw, "Cannot delete property '%d' of a String", i) + return false + } + + return s.baseObject.deleteStr(idx.string(), throw) +} + +func (s *stringObject) hasOwnPropertyStr(name unistring.String) bool { + if i := strToGoIdx(name); i >= 0 && i < s.length { + return true + } + return s.baseObject.hasOwnPropertyStr(name) +} + +func (s *stringObject) hasOwnPropertyIdx(idx valueInt) bool { + i := int64(idx) + if i >= 0 && i < int64(s.length) { + return true + } + return s.baseObject.hasOwnPropertyStr(idx.string()) +} diff --git a/vendor/github.com/dop251/goja/string_ascii.go b/vendor/github.com/dop251/goja/string_ascii.go new file mode 100644 index 0000000000..26231f5796 --- /dev/null +++ b/vendor/github.com/dop251/goja/string_ascii.go @@ -0,0 +1,328 @@ +package goja + +import ( + "fmt" + "hash/maphash" + "io" + "math" + "reflect" + "strconv" + "strings" + + "github.com/dop251/goja/unistring" +) + +type asciiString string + +type asciiRuneReader struct { + s asciiString + pos int +} + +func (rr *asciiRuneReader) ReadRune() (r rune, size int, err error) { + if rr.pos < len(rr.s) { + r = rune(rr.s[rr.pos]) + size = 1 + rr.pos++ + } else { + err = io.EOF + } + return +} + +func (s asciiString) reader(start int) io.RuneReader { + return &asciiRuneReader{ + s: s[start:], + } +} + +func (s asciiString) utf16Reader(start int) io.RuneReader { + return s.reader(start) +} + +func (s asciiString) utf16Runes() []rune { + runes := make([]rune, len(s)) + for i := 0; i < len(s); i++ { + runes[i] = rune(s[i]) + } + return runes +} + +// ss must be trimmed +func stringToInt(ss string) (int64, error) { + if ss == "" { + return 0, nil + } + if ss == "-0" { + return 0, strconv.ErrSyntax + } + if len(ss) > 2 { + switch ss[:2] { + case "0x", "0X": + return strconv.ParseInt(ss[2:], 16, 64) + case "0b", "0B": + return strconv.ParseInt(ss[2:], 2, 64) + case "0o", "0O": + return strconv.ParseInt(ss[2:], 8, 64) + } + } + return strconv.ParseInt(ss, 10, 64) +} + +func (s asciiString) _toInt() (int64, error) { + return stringToInt(strings.TrimSpace(string(s))) +} + +func isRangeErr(err error) bool { + if err, ok := err.(*strconv.NumError); ok { + return err.Err == strconv.ErrRange + } + return false +} + +func (s asciiString) _toFloat() (float64, error) { + ss := strings.TrimSpace(string(s)) + if ss == "" { + return 0, nil + } + if ss == "-0" { + var f float64 + return -f, nil + } + f, err := strconv.ParseFloat(ss, 64) + if isRangeErr(err) { + err = nil + } + return f, err +} + +func (s asciiString) ToInteger() int64 { + if s == "" { + return 0 + } + if s == "Infinity" || s == "+Infinity" { + return math.MaxInt64 + } + if s == "-Infinity" { + return math.MinInt64 + } + i, err := s._toInt() + if err != nil { + f, err := s._toFloat() + if err == nil { + return int64(f) + } + } + return i +} + +func (s asciiString) toString() valueString { + return s +} + +func (s asciiString) ToString() Value { + return s +} + +func (s asciiString) String() string { + return string(s) +} + +func (s asciiString) ToFloat() float64 { + if s == "" { + return 0 + } + if s == "Infinity" || s == "+Infinity" { + return math.Inf(1) + } + if s == "-Infinity" { + return math.Inf(-1) + } + f, err := s._toFloat() + if err != nil { + i, err := s._toInt() + if err == nil { + return float64(i) + } + f = math.NaN() + } + return f +} + +func (s asciiString) ToBoolean() bool { + return s != "" +} + +func (s asciiString) ToNumber() Value { + if s == "" { + return intToValue(0) + } + if s == "Infinity" || s == "+Infinity" { + return _positiveInf + } + if s == "-Infinity" { + return _negativeInf + } + + if i, err := s._toInt(); err == nil { + return intToValue(i) + } + + if f, err := s._toFloat(); err == nil { + return floatToValue(f) + } + + return _NaN +} + +func (s asciiString) ToObject(r *Runtime) *Object { + return r._newString(s, r.global.StringPrototype) +} + +func (s asciiString) SameAs(other Value) bool { + if otherStr, ok := other.(asciiString); ok { + return s == otherStr + } + return false +} + +func (s asciiString) Equals(other Value) bool { + if o, ok := other.(asciiString); ok { + return s == o + } + + if o, ok := other.(valueInt); ok { + if o1, e := s._toInt(); e == nil { + return o1 == int64(o) + } + return false + } + + if o, ok := other.(valueFloat); ok { + return s.ToFloat() == float64(o) + } + + if o, ok := other.(valueBool); ok { + if o1, e := s._toFloat(); e == nil { + return o1 == o.ToFloat() + } + return false + } + + if o, ok := other.(*Object); ok { + return s.Equals(o.toPrimitive()) + } + return false +} + +func (s asciiString) StrictEquals(other Value) bool { + if otherStr, ok := other.(asciiString); ok { + return s == otherStr + } + return false +} + +func (s asciiString) baseObject(r *Runtime) *Object { + ss := r.stringSingleton + ss.value = s + ss.setLength() + return ss.val +} + +func (s asciiString) hash(hash *maphash.Hash) uint64 { + _, _ = hash.WriteString(string(s)) + h := hash.Sum64() + hash.Reset() + return h +} + +func (s asciiString) charAt(idx int) rune { + return rune(s[idx]) +} + +func (s asciiString) length() int { + return len(s) +} + +func (s asciiString) concat(other valueString) valueString { + switch other := other.(type) { + case asciiString: + b := make([]byte, len(s)+len(other)) + copy(b, s) + copy(b[len(s):], other) + return asciiString(b) + case unicodeString: + b := make([]uint16, len(s)+len(other)) + b[0] = unistring.BOM + for i := 0; i < len(s); i++ { + b[i+1] = uint16(s[i]) + } + copy(b[len(s)+1:], other[1:]) + return unicodeString(b) + default: + panic(fmt.Errorf("unknown string type: %T", other)) + } +} + +func (s asciiString) substring(start, end int) valueString { + return s[start:end] +} + +func (s asciiString) compareTo(other valueString) int { + switch other := other.(type) { + case asciiString: + return strings.Compare(string(s), string(other)) + case unicodeString: + return strings.Compare(string(s), other.String()) + default: + panic(fmt.Errorf("unknown string type: %T", other)) + } +} + +func (s asciiString) index(substr valueString, start int) int { + if substr, ok := substr.(asciiString); ok { + p := strings.Index(string(s[start:]), string(substr)) + if p >= 0 { + return p + start + } + } + return -1 +} + +func (s asciiString) lastIndex(substr valueString, pos int) int { + if substr, ok := substr.(asciiString); ok { + end := pos + len(substr) + var ss string + if end > len(s) { + ss = string(s) + } else { + ss = string(s[:end]) + } + return strings.LastIndex(ss, string(substr)) + } + return -1 +} + +func (s asciiString) toLower() valueString { + return asciiString(strings.ToLower(string(s))) +} + +func (s asciiString) toUpper() valueString { + return asciiString(strings.ToUpper(string(s))) +} + +func (s asciiString) toTrimmedUTF8() string { + return strings.TrimSpace(string(s)) +} + +func (s asciiString) string() unistring.String { + return unistring.String(s) +} + +func (s asciiString) Export() interface{} { + return string(s) +} + +func (s asciiString) ExportType() reflect.Type { + return reflectTypeString +} diff --git a/vendor/github.com/dop251/goja/string_unicode.go b/vendor/github.com/dop251/goja/string_unicode.go new file mode 100644 index 0000000000..71e387bc2c --- /dev/null +++ b/vendor/github.com/dop251/goja/string_unicode.go @@ -0,0 +1,546 @@ +package goja + +import ( + "errors" + "fmt" + "hash/maphash" + "io" + "math" + "reflect" + "strings" + "unicode/utf16" + "unicode/utf8" + + "github.com/dop251/goja/parser" + "github.com/dop251/goja/unistring" + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +type unicodeString []uint16 + +type unicodeRuneReader struct { + s unicodeString + pos int +} + +type utf16RuneReader struct { + s unicodeString + pos int +} + +// passes through invalid surrogate pairs +type lenientUtf16Decoder struct { + utf16Reader io.RuneReader + prev rune + prevSet bool +} + +type valueStringBuilder struct { + asciiBuilder strings.Builder + unicodeBuilder unicodeStringBuilder +} + +type unicodeStringBuilder struct { + buf []uint16 + unicode bool +} + +var ( + InvalidRuneError = errors.New("invalid rune") +) + +func (rr *utf16RuneReader) ReadRune() (r rune, size int, err error) { + if rr.pos < len(rr.s) { + r = rune(rr.s[rr.pos]) + size++ + rr.pos++ + return + } + err = io.EOF + return +} + +func (rr *lenientUtf16Decoder) ReadRune() (r rune, size int, err error) { + if rr.prevSet { + r = rr.prev + size = 1 + rr.prevSet = false + } else { + r, size, err = rr.utf16Reader.ReadRune() + if err != nil { + return + } + } + if isUTF16FirstSurrogate(r) { + second, _, err1 := rr.utf16Reader.ReadRune() + if err1 != nil { + if err1 != io.EOF { + err = err1 + } + return + } + if isUTF16SecondSurrogate(second) { + r = utf16.DecodeRune(r, second) + size++ + } else { + rr.prev = second + rr.prevSet = true + } + } + + return +} + +func (rr *unicodeRuneReader) ReadRune() (r rune, size int, err error) { + if rr.pos < len(rr.s) { + r = rune(rr.s[rr.pos]) + size++ + rr.pos++ + if isUTF16FirstSurrogate(r) { + if rr.pos < len(rr.s) { + second := rune(rr.s[rr.pos]) + if isUTF16SecondSurrogate(second) { + r = utf16.DecodeRune(r, second) + size++ + rr.pos++ + } else { + err = InvalidRuneError + } + } else { + err = InvalidRuneError + } + } else if isUTF16SecondSurrogate(r) { + err = InvalidRuneError + } + } else { + err = io.EOF + } + return +} + +func (b *unicodeStringBuilder) grow(n int) { + if cap(b.buf)-len(b.buf) < n { + buf := make([]uint16, len(b.buf), 2*cap(b.buf)+n) + copy(buf, b.buf) + b.buf = buf + } +} + +func (b *unicodeStringBuilder) Grow(n int) { + b.grow(n + 1) +} + +func (b *unicodeStringBuilder) ensureStarted(initialSize int) { + b.grow(len(b.buf) + initialSize + 1) + if len(b.buf) == 0 { + b.buf = append(b.buf, unistring.BOM) + } +} + +func (b *unicodeStringBuilder) WriteString(s valueString) { + b.ensureStarted(s.length()) + switch s := s.(type) { + case unicodeString: + b.buf = append(b.buf, s[1:]...) + b.unicode = true + case asciiString: + for i := 0; i < len(s); i++ { + b.buf = append(b.buf, uint16(s[i])) + } + default: + panic(fmt.Errorf("unsupported string type: %T", s)) + } +} + +func (b *unicodeStringBuilder) String() valueString { + if b.unicode { + return unicodeString(b.buf) + } + if len(b.buf) == 0 { + return stringEmpty + } + buf := make([]byte, 0, len(b.buf)-1) + for _, c := range b.buf[1:] { + buf = append(buf, byte(c)) + } + return asciiString(buf) +} + +func (b *unicodeStringBuilder) WriteRune(r rune) { + if r <= 0xFFFF { + b.ensureStarted(1) + b.buf = append(b.buf, uint16(r)) + if !b.unicode && r >= utf8.RuneSelf { + b.unicode = true + } + } else { + b.ensureStarted(2) + first, second := utf16.EncodeRune(r) + b.buf = append(b.buf, uint16(first), uint16(second)) + b.unicode = true + } +} + +func (b *unicodeStringBuilder) writeASCIIString(bytes string) { + b.ensureStarted(len(bytes)) + for _, c := range bytes { + b.buf = append(b.buf, uint16(c)) + } +} + +func (b *valueStringBuilder) ascii() bool { + return len(b.unicodeBuilder.buf) == 0 +} + +func (b *valueStringBuilder) WriteString(s valueString) { + if ascii, ok := s.(asciiString); ok { + if b.ascii() { + b.asciiBuilder.WriteString(string(ascii)) + } else { + b.unicodeBuilder.writeASCIIString(string(ascii)) + } + } else { + b.switchToUnicode(s.length()) + b.unicodeBuilder.WriteString(s) + } +} + +func (b *valueStringBuilder) WriteRune(r rune) { + if r < utf8.RuneSelf { + if b.ascii() { + b.asciiBuilder.WriteByte(byte(r)) + } else { + b.unicodeBuilder.WriteRune(r) + } + } else { + var extraLen int + if r <= 0xFFFF { + extraLen = 1 + } else { + extraLen = 2 + } + b.switchToUnicode(extraLen) + b.unicodeBuilder.WriteRune(r) + } +} + +func (b *valueStringBuilder) String() valueString { + if b.ascii() { + return asciiString(b.asciiBuilder.String()) + } + return b.unicodeBuilder.String() +} + +func (b *valueStringBuilder) Grow(n int) { + if b.ascii() { + b.asciiBuilder.Grow(n) + } else { + b.unicodeBuilder.Grow(n) + } +} + +func (b *valueStringBuilder) switchToUnicode(extraLen int) { + if b.ascii() { + b.unicodeBuilder.ensureStarted(b.asciiBuilder.Len() + extraLen) + b.unicodeBuilder.writeASCIIString(b.asciiBuilder.String()) + b.asciiBuilder.Reset() + } +} + +func (b *valueStringBuilder) WriteSubstring(source valueString, start int, end int) { + if ascii, ok := source.(asciiString); ok { + if b.ascii() { + b.asciiBuilder.WriteString(string(ascii[start:end])) + } else { + b.unicodeBuilder.writeASCIIString(string(ascii[start:end])) + } + return + } + us := source.(unicodeString) + if b.ascii() { + uc := false + for i := start; i < end; i++ { + if us.charAt(i) >= utf8.RuneSelf { + uc = true + break + } + } + if uc { + b.switchToUnicode(end - start + 1) + } else { + b.asciiBuilder.Grow(end - start + 1) + for i := start; i < end; i++ { + b.asciiBuilder.WriteByte(byte(us.charAt(i))) + } + return + } + } + b.unicodeBuilder.buf = append(b.unicodeBuilder.buf, us[start+1:end+1]...) + b.unicodeBuilder.unicode = true +} + +func (s unicodeString) reader(start int) io.RuneReader { + return &unicodeRuneReader{ + s: s[start+1:], + } +} + +func (s unicodeString) utf16Reader(start int) io.RuneReader { + return &utf16RuneReader{ + s: s[start+1:], + } +} + +func (s unicodeString) utf16Runes() []rune { + runes := make([]rune, len(s)-1) + for i, ch := range s[1:] { + runes[i] = rune(ch) + } + return runes +} + +func (s unicodeString) ToInteger() int64 { + return 0 +} + +func (s unicodeString) toString() valueString { + return s +} + +func (s unicodeString) ToString() Value { + return s +} + +func (s unicodeString) ToFloat() float64 { + return math.NaN() +} + +func (s unicodeString) ToBoolean() bool { + return len(s) > 0 +} + +func (s unicodeString) toTrimmedUTF8() string { + if len(s) == 0 { + return "" + } + return strings.Trim(s.String(), parser.WhitespaceChars) +} + +func (s unicodeString) ToNumber() Value { + return asciiString(s.toTrimmedUTF8()).ToNumber() +} + +func (s unicodeString) ToObject(r *Runtime) *Object { + return r._newString(s, r.global.StringPrototype) +} + +func (s unicodeString) equals(other unicodeString) bool { + if len(s) != len(other) { + return false + } + for i, r := range s { + if r != other[i] { + return false + } + } + return true +} + +func (s unicodeString) SameAs(other Value) bool { + if otherStr, ok := other.(unicodeString); ok { + return s.equals(otherStr) + } + + return false +} + +func (s unicodeString) Equals(other Value) bool { + if s.SameAs(other) { + return true + } + + if o, ok := other.(*Object); ok { + return s.Equals(o.toPrimitive()) + } + return false +} + +func (s unicodeString) StrictEquals(other Value) bool { + return s.SameAs(other) +} + +func (s unicodeString) baseObject(r *Runtime) *Object { + ss := r.stringSingleton + ss.value = s + ss.setLength() + return ss.val +} + +func (s unicodeString) charAt(idx int) rune { + return rune(s[idx+1]) +} + +func (s unicodeString) length() int { + return len(s) - 1 +} + +func (s unicodeString) concat(other valueString) valueString { + switch other := other.(type) { + case unicodeString: + b := make(unicodeString, len(s)+len(other)-1) + copy(b, s) + copy(b[len(s):], other[1:]) + return b + case asciiString: + b := make([]uint16, len(s)+len(other)) + copy(b, s) + b1 := b[len(s):] + for i := 0; i < len(other); i++ { + b1[i] = uint16(other[i]) + } + return unicodeString(b) + default: + panic(fmt.Errorf("Unknown string type: %T", other)) + } +} + +func (s unicodeString) substring(start, end int) valueString { + ss := s[start+1 : end+1] + for _, c := range ss { + if c >= utf8.RuneSelf { + b := make(unicodeString, end-start+1) + b[0] = unistring.BOM + copy(b[1:], ss) + return b + } + } + as := make([]byte, end-start) + for i, c := range ss { + as[i] = byte(c) + } + return asciiString(as) +} + +func (s unicodeString) String() string { + return string(utf16.Decode(s[1:])) +} + +func (s unicodeString) compareTo(other valueString) int { + // TODO handle invalid UTF-16 + return strings.Compare(s.String(), other.String()) +} + +func (s unicodeString) index(substr valueString, start int) int { + var ss []uint16 + switch substr := substr.(type) { + case unicodeString: + ss = substr[1:] + case asciiString: + ss = make([]uint16, len(substr)) + for i := 0; i < len(substr); i++ { + ss[i] = uint16(substr[i]) + } + default: + panic(fmt.Errorf("unknown string type: %T", substr)) + } + s1 := s[1:] + // TODO: optimise + end := len(s1) - len(ss) + for start <= end { + for i := 0; i < len(ss); i++ { + if s1[start+i] != ss[i] { + goto nomatch + } + } + + return start + nomatch: + start++ + } + return -1 +} + +func (s unicodeString) lastIndex(substr valueString, start int) int { + var ss []uint16 + switch substr := substr.(type) { + case unicodeString: + ss = substr[1:] + case asciiString: + ss = make([]uint16, len(substr)) + for i := 0; i < len(substr); i++ { + ss[i] = uint16(substr[i]) + } + default: + panic(fmt.Errorf("Unknown string type: %T", substr)) + } + + s1 := s[1:] + if maxStart := len(s1) - len(ss); start > maxStart { + start = maxStart + } + // TODO: optimise + for start >= 0 { + for i := 0; i < len(ss); i++ { + if s1[start+i] != ss[i] { + goto nomatch + } + } + + return start + nomatch: + start-- + } + return -1 +} + +func unicodeStringFromRunes(r []rune) unicodeString { + return unistring.NewFromRunes(r).AsUtf16() +} + +func (s unicodeString) toLower() valueString { + caser := cases.Lower(language.Und) + r := []rune(caser.String(s.String())) + // Workaround + ascii := true + for i := 0; i < len(r)-1; i++ { + if (i == 0 || r[i-1] != 0x3b1) && r[i] == 0x345 && r[i+1] == 0x3c2 { + i++ + r[i] = 0x3c3 + } + if r[i] >= utf8.RuneSelf { + ascii = false + } + } + if ascii { + ascii = r[len(r)-1] < utf8.RuneSelf + } + if ascii { + return asciiString(r) + } + return unicodeStringFromRunes(r) +} + +func (s unicodeString) toUpper() valueString { + caser := cases.Upper(language.Und) + return newStringValue(caser.String(s.String())) +} + +func (s unicodeString) Export() interface{} { + return s.String() +} + +func (s unicodeString) ExportType() reflect.Type { + return reflectTypeString +} + +func (s unicodeString) hash(hash *maphash.Hash) uint64 { + _, _ = hash.WriteString(string(unistring.FromUtf16(s))) + h := hash.Sum64() + hash.Reset() + return h +} + +func (s unicodeString) string() unistring.String { + return unistring.FromUtf16(s) +} diff --git a/vendor/github.com/dop251/goja/token/Makefile b/vendor/github.com/dop251/goja/token/Makefile new file mode 100644 index 0000000000..1e85c73488 --- /dev/null +++ b/vendor/github.com/dop251/goja/token/Makefile @@ -0,0 +1,2 @@ +token_const.go: tokenfmt + ./$^ | gofmt > $@ diff --git a/vendor/github.com/dop251/goja/token/README.markdown b/vendor/github.com/dop251/goja/token/README.markdown new file mode 100644 index 0000000000..66dd2abde7 --- /dev/null +++ b/vendor/github.com/dop251/goja/token/README.markdown @@ -0,0 +1,171 @@ +# token +-- + import "github.com/dop251/goja/token" + +Package token defines constants representing the lexical tokens of JavaScript +(ECMA5). + +## Usage + +```go +const ( + ILLEGAL + EOF + COMMENT + KEYWORD + + STRING + BOOLEAN + NULL + NUMBER + IDENTIFIER + + PLUS // + + MINUS // - + MULTIPLY // * + SLASH // / + REMAINDER // % + + AND // & + OR // | + EXCLUSIVE_OR // ^ + SHIFT_LEFT // << + SHIFT_RIGHT // >> + UNSIGNED_SHIFT_RIGHT // >>> + AND_NOT // &^ + + ADD_ASSIGN // += + SUBTRACT_ASSIGN // -= + MULTIPLY_ASSIGN // *= + QUOTIENT_ASSIGN // /= + REMAINDER_ASSIGN // %= + + AND_ASSIGN // &= + OR_ASSIGN // |= + EXCLUSIVE_OR_ASSIGN // ^= + SHIFT_LEFT_ASSIGN // <<= + SHIFT_RIGHT_ASSIGN // >>= + UNSIGNED_SHIFT_RIGHT_ASSIGN // >>>= + AND_NOT_ASSIGN // &^= + + LOGICAL_AND // && + LOGICAL_OR // || + INCREMENT // ++ + DECREMENT // -- + + EQUAL // == + STRICT_EQUAL // === + LESS // < + GREATER // > + ASSIGN // = + NOT // ! + + BITWISE_NOT // ~ + + NOT_EQUAL // != + STRICT_NOT_EQUAL // !== + LESS_OR_EQUAL // <= + GREATER_OR_EQUAL // >= + + LEFT_PARENTHESIS // ( + LEFT_BRACKET // [ + LEFT_BRACE // { + COMMA // , + PERIOD // . + + RIGHT_PARENTHESIS // ) + RIGHT_BRACKET // ] + RIGHT_BRACE // } + SEMICOLON // ; + COLON // : + QUESTION_MARK // ? + + IF + IN + DO + + VAR + FOR + NEW + TRY + + THIS + ELSE + CASE + VOID + WITH + + WHILE + BREAK + CATCH + THROW + + RETURN + TYPEOF + DELETE + SWITCH + + DEFAULT + FINALLY + + FUNCTION + CONTINUE + DEBUGGER + + INSTANCEOF +) +``` + +#### type Token + +```go +type Token int +``` + +Token is the set of lexical tokens in JavaScript (ECMA5). + +#### func IsKeyword + +```go +func IsKeyword(literal string) (Token, bool) +``` +IsKeyword returns the keyword token if literal is a keyword, a KEYWORD token if +the literal is a future keyword (const, let, class, super, ...), or 0 if the +literal is not a keyword. + +If the literal is a keyword, IsKeyword returns a second value indicating if the +literal is considered a future keyword in strict-mode only. + +7.6.1.2 Future Reserved Words: + + const + class + enum + export + extends + import + super + +7.6.1.2 Future Reserved Words (strict): + + implements + interface + let + package + private + protected + public + static + +#### func (Token) String + +```go +func (tkn Token) String() string +``` +String returns the string corresponding to the token. For operators, delimiters, +and keywords the string is the actual token string (e.g., for the token PLUS, +the String() is "+"). For all other tokens the string corresponds to the token +name (e.g. for the token IDENTIFIER, the string is "IDENTIFIER"). + +-- +**godocdown** http://github.com/robertkrimen/godocdown diff --git a/vendor/github.com/dop251/goja/token/token.go b/vendor/github.com/dop251/goja/token/token.go new file mode 100644 index 0000000000..c49794934a --- /dev/null +++ b/vendor/github.com/dop251/goja/token/token.go @@ -0,0 +1,116 @@ +// Package token defines constants representing the lexical tokens of JavaScript (ECMA5). +package token + +import ( + "strconv" +) + +// Token is the set of lexical tokens in JavaScript (ECMA5). +type Token int + +// String returns the string corresponding to the token. +// For operators, delimiters, and keywords the string is the actual +// token string (e.g., for the token PLUS, the String() is +// "+"). For all other tokens the string corresponds to the token +// name (e.g. for the token IDENTIFIER, the string is "IDENTIFIER"). +// +func (tkn Token) String() string { + if tkn == 0 { + return "UNKNOWN" + } + if tkn < Token(len(token2string)) { + return token2string[tkn] + } + return "token(" + strconv.Itoa(int(tkn)) + ")" +} + +// This is not used for anything +func (tkn Token) precedence(in bool) int { + + switch tkn { + case LOGICAL_OR: + return 1 + + case LOGICAL_AND: + return 2 + + case OR, OR_ASSIGN: + return 3 + + case EXCLUSIVE_OR: + return 4 + + case AND, AND_ASSIGN: + return 5 + + case EQUAL, + NOT_EQUAL, + STRICT_EQUAL, + STRICT_NOT_EQUAL: + return 6 + + case LESS, GREATER, LESS_OR_EQUAL, GREATER_OR_EQUAL, INSTANCEOF: + return 7 + + case IN: + if in { + return 7 + } + return 0 + + case SHIFT_LEFT, SHIFT_RIGHT, UNSIGNED_SHIFT_RIGHT: + fallthrough + case SHIFT_LEFT_ASSIGN, SHIFT_RIGHT_ASSIGN, UNSIGNED_SHIFT_RIGHT_ASSIGN: + return 8 + + case PLUS, MINUS, ADD_ASSIGN, SUBTRACT_ASSIGN: + return 9 + + case MULTIPLY, SLASH, REMAINDER, MULTIPLY_ASSIGN, QUOTIENT_ASSIGN, REMAINDER_ASSIGN: + return 11 + } + return 0 +} + +type _keyword struct { + token Token + futureKeyword bool + strict bool +} + +// IsKeyword returns the keyword token if literal is a keyword, a KEYWORD token +// if the literal is a future keyword (const, let, class, super, ...), or 0 if the literal is not a keyword. +// +// If the literal is a keyword, IsKeyword returns a second value indicating if the literal +// is considered a future keyword in strict-mode only. +// +// 7.6.1.2 Future Reserved Words: +// +// const +// class +// enum +// export +// extends +// import +// super +// +// 7.6.1.2 Future Reserved Words (strict): +// +// implements +// interface +// let +// package +// private +// protected +// public +// static +// +func IsKeyword(literal string) (Token, bool) { + if keyword, exists := keywordTable[literal]; exists { + if keyword.futureKeyword { + return KEYWORD, keyword.strict + } + return keyword.token, false + } + return 0, false +} diff --git a/vendor/github.com/dop251/goja/token/token_const.go b/vendor/github.com/dop251/goja/token/token_const.go new file mode 100644 index 0000000000..e90636aa4e --- /dev/null +++ b/vendor/github.com/dop251/goja/token/token_const.go @@ -0,0 +1,353 @@ +package token + +const ( + _ Token = iota + + ILLEGAL + EOF + COMMENT + KEYWORD + + STRING + BOOLEAN + NULL + NUMBER + IDENTIFIER + + PLUS // + + MINUS // - + MULTIPLY // * + SLASH // / + REMAINDER // % + + AND // & + OR // | + EXCLUSIVE_OR // ^ + SHIFT_LEFT // << + SHIFT_RIGHT // >> + UNSIGNED_SHIFT_RIGHT // >>> + + ADD_ASSIGN // += + SUBTRACT_ASSIGN // -= + MULTIPLY_ASSIGN // *= + QUOTIENT_ASSIGN // /= + REMAINDER_ASSIGN // %= + + AND_ASSIGN // &= + OR_ASSIGN // |= + EXCLUSIVE_OR_ASSIGN // ^= + SHIFT_LEFT_ASSIGN // <<= + SHIFT_RIGHT_ASSIGN // >>= + UNSIGNED_SHIFT_RIGHT_ASSIGN // >>>= + + LOGICAL_AND // && + LOGICAL_OR // || + INCREMENT // ++ + DECREMENT // -- + + EQUAL // == + STRICT_EQUAL // === + LESS // < + GREATER // > + ASSIGN // = + NOT // ! + + BITWISE_NOT // ~ + + NOT_EQUAL // != + STRICT_NOT_EQUAL // !== + LESS_OR_EQUAL // <= + GREATER_OR_EQUAL // >= + + LEFT_PARENTHESIS // ( + LEFT_BRACKET // [ + LEFT_BRACE // { + COMMA // , + PERIOD // . + + RIGHT_PARENTHESIS // ) + RIGHT_BRACKET // ] + RIGHT_BRACE // } + SEMICOLON // ; + COLON // : + QUESTION_MARK // ? + ARROW // => + ELLIPSIS // ... + + firstKeyword + IF + IN + OF + DO + + VAR + LET + FOR + NEW + TRY + + THIS + ELSE + CASE + VOID + WITH + + CONST + WHILE + BREAK + CATCH + THROW + + RETURN + TYPEOF + DELETE + SWITCH + + DEFAULT + FINALLY + + FUNCTION + CONTINUE + DEBUGGER + + INSTANCEOF + lastKeyword +) + +var token2string = [...]string{ + ILLEGAL: "ILLEGAL", + EOF: "EOF", + COMMENT: "COMMENT", + KEYWORD: "KEYWORD", + STRING: "STRING", + BOOLEAN: "BOOLEAN", + NULL: "NULL", + NUMBER: "NUMBER", + IDENTIFIER: "IDENTIFIER", + PLUS: "+", + MINUS: "-", + MULTIPLY: "*", + SLASH: "/", + REMAINDER: "%", + AND: "&", + OR: "|", + EXCLUSIVE_OR: "^", + SHIFT_LEFT: "<<", + SHIFT_RIGHT: ">>", + UNSIGNED_SHIFT_RIGHT: ">>>", + ADD_ASSIGN: "+=", + SUBTRACT_ASSIGN: "-=", + MULTIPLY_ASSIGN: "*=", + QUOTIENT_ASSIGN: "/=", + REMAINDER_ASSIGN: "%=", + AND_ASSIGN: "&=", + OR_ASSIGN: "|=", + EXCLUSIVE_OR_ASSIGN: "^=", + SHIFT_LEFT_ASSIGN: "<<=", + SHIFT_RIGHT_ASSIGN: ">>=", + UNSIGNED_SHIFT_RIGHT_ASSIGN: ">>>=", + LOGICAL_AND: "&&", + LOGICAL_OR: "||", + INCREMENT: "++", + DECREMENT: "--", + EQUAL: "==", + STRICT_EQUAL: "===", + LESS: "<", + GREATER: ">", + ASSIGN: "=", + NOT: "!", + BITWISE_NOT: "~", + NOT_EQUAL: "!=", + STRICT_NOT_EQUAL: "!==", + LESS_OR_EQUAL: "<=", + GREATER_OR_EQUAL: ">=", + LEFT_PARENTHESIS: "(", + LEFT_BRACKET: "[", + LEFT_BRACE: "{", + COMMA: ",", + PERIOD: ".", + RIGHT_PARENTHESIS: ")", + RIGHT_BRACKET: "]", + RIGHT_BRACE: "}", + SEMICOLON: ";", + COLON: ":", + QUESTION_MARK: "?", + ARROW: "=>", + ELLIPSIS: "...", + IF: "if", + IN: "in", + OF: "of", + DO: "do", + VAR: "var", + LET: "let", + FOR: "for", + NEW: "new", + TRY: "try", + THIS: "this", + ELSE: "else", + CASE: "case", + VOID: "void", + WITH: "with", + CONST: "const", + WHILE: "while", + BREAK: "break", + CATCH: "catch", + THROW: "throw", + RETURN: "return", + TYPEOF: "typeof", + DELETE: "delete", + SWITCH: "switch", + DEFAULT: "default", + FINALLY: "finally", + FUNCTION: "function", + CONTINUE: "continue", + DEBUGGER: "debugger", + INSTANCEOF: "instanceof", +} + +var keywordTable = map[string]_keyword{ + "if": { + token: IF, + }, + "in": { + token: IN, + }, + "do": { + token: DO, + }, + "var": { + token: VAR, + }, + "for": { + token: FOR, + }, + "new": { + token: NEW, + }, + "try": { + token: TRY, + }, + "this": { + token: THIS, + }, + "else": { + token: ELSE, + }, + "case": { + token: CASE, + }, + "void": { + token: VOID, + }, + "with": { + token: WITH, + }, + "while": { + token: WHILE, + }, + "break": { + token: BREAK, + }, + "catch": { + token: CATCH, + }, + "throw": { + token: THROW, + }, + "return": { + token: RETURN, + }, + "typeof": { + token: TYPEOF, + }, + "delete": { + token: DELETE, + }, + "switch": { + token: SWITCH, + }, + "default": { + token: DEFAULT, + }, + "finally": { + token: FINALLY, + }, + "function": { + token: FUNCTION, + }, + "continue": { + token: CONTINUE, + }, + "debugger": { + token: DEBUGGER, + }, + "instanceof": { + token: INSTANCEOF, + }, + "const": { + token: CONST, + }, + "class": { + token: KEYWORD, + futureKeyword: true, + }, + "enum": { + token: KEYWORD, + futureKeyword: true, + }, + "export": { + token: KEYWORD, + futureKeyword: true, + }, + "extends": { + token: KEYWORD, + futureKeyword: true, + }, + "import": { + token: KEYWORD, + futureKeyword: true, + }, + "super": { + token: KEYWORD, + futureKeyword: true, + }, + "implements": { + token: KEYWORD, + futureKeyword: true, + strict: true, + }, + "interface": { + token: KEYWORD, + futureKeyword: true, + strict: true, + }, + "let": { + token: LET, + strict: true, + }, + "package": { + token: KEYWORD, + futureKeyword: true, + strict: true, + }, + "private": { + token: KEYWORD, + futureKeyword: true, + strict: true, + }, + "protected": { + token: KEYWORD, + futureKeyword: true, + strict: true, + }, + "public": { + token: KEYWORD, + futureKeyword: true, + strict: true, + }, + "static": { + token: KEYWORD, + futureKeyword: true, + strict: true, + }, +} diff --git a/vendor/github.com/dop251/goja/token/tokenfmt b/vendor/github.com/dop251/goja/token/tokenfmt new file mode 100644 index 0000000000..63dd5d9e6d --- /dev/null +++ b/vendor/github.com/dop251/goja/token/tokenfmt @@ -0,0 +1,222 @@ +#!/usr/bin/env perl + +use strict; +use warnings; + +my (%token, @order, @keywords); + +{ + my $keywords; + my @const; + push @const, <<_END_; +package token + +const( + _ Token = iota +_END_ + + for (split m/\n/, <<_END_) { +ILLEGAL +EOF +COMMENT +KEYWORD + +STRING +BOOLEAN +NULL +NUMBER +IDENTIFIER + +PLUS + +MINUS - +MULTIPLY * +SLASH / +REMAINDER % + +AND & +OR | +EXCLUSIVE_OR ^ +SHIFT_LEFT << +SHIFT_RIGHT >> +UNSIGNED_SHIFT_RIGHT >>> +AND_NOT &^ + +ADD_ASSIGN += +SUBTRACT_ASSIGN -= +MULTIPLY_ASSIGN *= +QUOTIENT_ASSIGN /= +REMAINDER_ASSIGN %= + +AND_ASSIGN &= +OR_ASSIGN |= +EXCLUSIVE_OR_ASSIGN ^= +SHIFT_LEFT_ASSIGN <<= +SHIFT_RIGHT_ASSIGN >>= +UNSIGNED_SHIFT_RIGHT_ASSIGN >>>= +AND_NOT_ASSIGN &^= + +LOGICAL_AND && +LOGICAL_OR || +INCREMENT ++ +DECREMENT -- + +EQUAL == +STRICT_EQUAL === +LESS < +GREATER > +ASSIGN = +NOT ! + +BITWISE_NOT ~ + +NOT_EQUAL != +STRICT_NOT_EQUAL !== +LESS_OR_EQUAL <= +GREATER_OR_EQUAL <= + +LEFT_PARENTHESIS ( +LEFT_BRACKET [ +LEFT_BRACE { +COMMA , +PERIOD . + +RIGHT_PARENTHESIS ) +RIGHT_BRACKET ] +RIGHT_BRACE } +SEMICOLON ; +COLON : +QUESTION_MARK ? + +firstKeyword +IF +IN +DO + +VAR +FOR +NEW +TRY + +THIS +ELSE +CASE +VOID +WITH + +WHILE +BREAK +CATCH +THROW + +RETURN +TYPEOF +DELETE +SWITCH + +DEFAULT +FINALLY + +FUNCTION +CONTINUE +DEBUGGER + +INSTANCEOF +lastKeyword +_END_ + chomp; + + next if m/^\s*#/; + + my ($name, $symbol) = m/(\w+)\s*(\S+)?/; + + if (defined $symbol) { + push @order, $name; + push @const, "$name // $symbol"; + $token{$name} = $symbol; + } elsif (defined $name) { + $keywords ||= $name eq 'firstKeyword'; + + push @const, $name; + #$const[-1] .= " Token = iota" if 2 == @const; + if ($name =~ m/^([A-Z]+)/) { + push @keywords, $name if $keywords; + push @order, $name; + if ($token{SEMICOLON}) { + $token{$name} = lc $1; + } else { + $token{$name} = $name; + } + } + } else { + push @const, ""; + } + + } + push @const, ")"; + print join "\n", @const, ""; +} + +{ + print <<_END_; + +var token2string = [...]string{ +_END_ + for my $name (@order) { + print "$name: \"$token{$name}\",\n"; + } + print <<_END_; +} +_END_ + + print <<_END_; + +var keywordTable = map[string]_keyword{ +_END_ + for my $name (@keywords) { + print <<_END_ + "@{[ lc $name ]}": _keyword{ + token: $name, + }, +_END_ + } + + for my $name (qw/ + const + class + enum + export + extends + import + super + /) { + print <<_END_ + "$name": _keyword{ + token: KEYWORD, + futureKeyword: true, + }, +_END_ + } + + for my $name (qw/ + implements + interface + let + package + private + protected + public + static + /) { + print <<_END_ + "$name": _keyword{ + token: KEYWORD, + futureKeyword: true, + strict: true, + }, +_END_ + } + + print <<_END_; +} +_END_ +} diff --git a/vendor/github.com/dop251/goja/typedarrays.go b/vendor/github.com/dop251/goja/typedarrays.go new file mode 100644 index 0000000000..6326072681 --- /dev/null +++ b/vendor/github.com/dop251/goja/typedarrays.go @@ -0,0 +1,903 @@ +package goja + +import ( + "math" + "reflect" + "strconv" + "unsafe" + + "github.com/dop251/goja/unistring" +) + +type byteOrder bool + +const ( + bigEndian byteOrder = false + littleEndian byteOrder = true +) + +var ( + nativeEndian byteOrder + + arrayBufferType = reflect.TypeOf(ArrayBuffer{}) +) + +type typedArrayObjectCtor func(buf *arrayBufferObject, offset, length int, proto *Object) *typedArrayObject + +type arrayBufferObject struct { + baseObject + detached bool + data []byte +} + +// ArrayBuffer is a Go wrapper around ECMAScript ArrayBuffer. Calling Runtime.ToValue() on it +// returns the underlying ArrayBuffer. Calling Export() on an ECMAScript ArrayBuffer returns a wrapper. +// Use Runtime.NewArrayBuffer([]byte) to create one. +type ArrayBuffer struct { + buf *arrayBufferObject +} + +type dataViewObject struct { + baseObject + viewedArrayBuf *arrayBufferObject + byteLen, byteOffset int +} + +type typedArray interface { + toRaw(Value) uint64 + get(idx int) Value + set(idx int, value Value) + getRaw(idx int) uint64 + setRaw(idx int, raw uint64) + less(i, j int) bool + swap(i, j int) + typeMatch(v Value) bool +} + +type uint8Array []uint8 +type uint8ClampedArray []uint8 +type int8Array []int8 +type uint16Array []uint16 +type int16Array []int16 +type uint32Array []uint32 +type int32Array []int32 +type float32Array []float32 +type float64Array []float64 + +type typedArrayObject struct { + baseObject + viewedArrayBuf *arrayBufferObject + defaultCtor *Object + length, offset int + elemSize int + typedArray typedArray +} + +func (a ArrayBuffer) toValue(r *Runtime) Value { + if a.buf == nil { + return _null + } + v := a.buf.val + if v.runtime != r { + panic(r.NewTypeError("Illegal runtime transition of an ArrayBuffer")) + } + return v +} + +// Bytes returns the underlying []byte for this ArrayBuffer. +// For detached ArrayBuffers returns nil. +func (a ArrayBuffer) Bytes() []byte { + return a.buf.data +} + +// Detach the ArrayBuffer. After this, the underlying []byte becomes unreferenced and any attempt +// to use this ArrayBuffer results in a TypeError. +// Returns false if it was already detached, true otherwise. +// Note, this method may only be called from the goroutine that 'owns' the Runtime, it may not +// be called concurrently. +func (a ArrayBuffer) Detach() bool { + if a.buf.detached { + return false + } + a.buf.detach() + return true +} + +// Detached returns true if the ArrayBuffer is detached. +func (a ArrayBuffer) Detached() bool { + return a.buf.detached +} + +func (r *Runtime) NewArrayBuffer(data []byte) ArrayBuffer { + buf := r._newArrayBuffer(r.global.ArrayBufferPrototype, nil) + buf.data = data + return ArrayBuffer{ + buf: buf, + } +} + +func (a *uint8Array) get(idx int) Value { + return intToValue(int64((*a)[idx])) +} + +func (a *uint8Array) getRaw(idx int) uint64 { + return uint64((*a)[idx]) +} + +func (a *uint8Array) set(idx int, value Value) { + (*a)[idx] = toUint8(value) +} + +func (a *uint8Array) toRaw(v Value) uint64 { + return uint64(toUint8(v)) +} + +func (a *uint8Array) setRaw(idx int, v uint64) { + (*a)[idx] = uint8(v) +} + +func (a *uint8Array) less(i, j int) bool { + return (*a)[i] < (*a)[j] +} + +func (a *uint8Array) swap(i, j int) { + (*a)[i], (*a)[j] = (*a)[j], (*a)[i] +} + +func (a *uint8Array) typeMatch(v Value) bool { + if i, ok := v.(valueInt); ok { + return i >= 0 && i <= 255 + } + return false +} + +func (a *uint8ClampedArray) get(idx int) Value { + return intToValue(int64((*a)[idx])) +} + +func (a *uint8ClampedArray) getRaw(idx int) uint64 { + return uint64((*a)[idx]) +} + +func (a *uint8ClampedArray) set(idx int, value Value) { + (*a)[idx] = toUint8Clamp(value) +} + +func (a *uint8ClampedArray) toRaw(v Value) uint64 { + return uint64(toUint8Clamp(v)) +} + +func (a *uint8ClampedArray) setRaw(idx int, v uint64) { + (*a)[idx] = uint8(v) +} + +func (a *uint8ClampedArray) less(i, j int) bool { + return (*a)[i] < (*a)[j] +} + +func (a *uint8ClampedArray) swap(i, j int) { + (*a)[i], (*a)[j] = (*a)[j], (*a)[i] +} + +func (a *uint8ClampedArray) typeMatch(v Value) bool { + if i, ok := v.(valueInt); ok { + return i >= 0 && i <= 255 + } + return false +} + +func (a *int8Array) get(idx int) Value { + return intToValue(int64((*a)[idx])) +} + +func (a *int8Array) getRaw(idx int) uint64 { + return uint64((*a)[idx]) +} + +func (a *int8Array) set(idx int, value Value) { + (*a)[idx] = toInt8(value) +} + +func (a *int8Array) toRaw(v Value) uint64 { + return uint64(toInt8(v)) +} + +func (a *int8Array) setRaw(idx int, v uint64) { + (*a)[idx] = int8(v) +} + +func (a *int8Array) less(i, j int) bool { + return (*a)[i] < (*a)[j] +} + +func (a *int8Array) swap(i, j int) { + (*a)[i], (*a)[j] = (*a)[j], (*a)[i] +} + +func (a *int8Array) typeMatch(v Value) bool { + if i, ok := v.(valueInt); ok { + return i >= math.MinInt8 && i <= math.MaxInt8 + } + return false +} + +func (a *uint16Array) get(idx int) Value { + return intToValue(int64((*a)[idx])) +} + +func (a *uint16Array) getRaw(idx int) uint64 { + return uint64((*a)[idx]) +} + +func (a *uint16Array) set(idx int, value Value) { + (*a)[idx] = toUint16(value) +} + +func (a *uint16Array) toRaw(v Value) uint64 { + return uint64(toUint16(v)) +} + +func (a *uint16Array) setRaw(idx int, v uint64) { + (*a)[idx] = uint16(v) +} + +func (a *uint16Array) less(i, j int) bool { + return (*a)[i] < (*a)[j] +} + +func (a *uint16Array) swap(i, j int) { + (*a)[i], (*a)[j] = (*a)[j], (*a)[i] +} + +func (a *uint16Array) typeMatch(v Value) bool { + if i, ok := v.(valueInt); ok { + return i >= 0 && i <= math.MaxUint16 + } + return false +} + +func (a *int16Array) get(idx int) Value { + return intToValue(int64((*a)[idx])) +} + +func (a *int16Array) getRaw(idx int) uint64 { + return uint64((*a)[idx]) +} + +func (a *int16Array) set(idx int, value Value) { + (*a)[idx] = toInt16(value) +} + +func (a *int16Array) toRaw(v Value) uint64 { + return uint64(toInt16(v)) +} + +func (a *int16Array) setRaw(idx int, v uint64) { + (*a)[idx] = int16(v) +} + +func (a *int16Array) less(i, j int) bool { + return (*a)[i] < (*a)[j] +} + +func (a *int16Array) swap(i, j int) { + (*a)[i], (*a)[j] = (*a)[j], (*a)[i] +} + +func (a *int16Array) typeMatch(v Value) bool { + if i, ok := v.(valueInt); ok { + return i >= math.MinInt16 && i <= math.MaxInt16 + } + return false +} + +func (a *uint32Array) get(idx int) Value { + return intToValue(int64((*a)[idx])) +} + +func (a *uint32Array) getRaw(idx int) uint64 { + return uint64((*a)[idx]) +} + +func (a *uint32Array) set(idx int, value Value) { + (*a)[idx] = toUint32(value) +} + +func (a *uint32Array) toRaw(v Value) uint64 { + return uint64(toUint32(v)) +} + +func (a *uint32Array) setRaw(idx int, v uint64) { + (*a)[idx] = uint32(v) +} + +func (a *uint32Array) less(i, j int) bool { + return (*a)[i] < (*a)[j] +} + +func (a *uint32Array) swap(i, j int) { + (*a)[i], (*a)[j] = (*a)[j], (*a)[i] +} + +func (a *uint32Array) typeMatch(v Value) bool { + if i, ok := v.(valueInt); ok { + return i >= 0 && i <= math.MaxUint32 + } + return false +} + +func (a *int32Array) get(idx int) Value { + return intToValue(int64((*a)[idx])) +} + +func (a *int32Array) getRaw(idx int) uint64 { + return uint64((*a)[idx]) +} + +func (a *int32Array) set(idx int, value Value) { + (*a)[idx] = toInt32(value) +} + +func (a *int32Array) toRaw(v Value) uint64 { + return uint64(toInt32(v)) +} + +func (a *int32Array) setRaw(idx int, v uint64) { + (*a)[idx] = int32(v) +} + +func (a *int32Array) less(i, j int) bool { + return (*a)[i] < (*a)[j] +} + +func (a *int32Array) swap(i, j int) { + (*a)[i], (*a)[j] = (*a)[j], (*a)[i] +} + +func (a *int32Array) typeMatch(v Value) bool { + if i, ok := v.(valueInt); ok { + return i >= math.MinInt32 && i <= math.MaxInt32 + } + return false +} + +func (a *float32Array) get(idx int) Value { + return floatToValue(float64((*a)[idx])) +} + +func (a *float32Array) getRaw(idx int) uint64 { + return uint64(math.Float32bits((*a)[idx])) +} + +func (a *float32Array) set(idx int, value Value) { + (*a)[idx] = toFloat32(value) +} + +func (a *float32Array) toRaw(v Value) uint64 { + return uint64(math.Float32bits(toFloat32(v))) +} + +func (a *float32Array) setRaw(idx int, v uint64) { + (*a)[idx] = math.Float32frombits(uint32(v)) +} + +func typedFloatLess(x, y float64) bool { + xNan := math.IsNaN(x) + yNan := math.IsNaN(y) + if yNan { + return !xNan + } else if xNan { + return false + } + if x == 0 && y == 0 { // handle neg zero + return math.Signbit(x) + } + return x < y +} + +func (a *float32Array) less(i, j int) bool { + return typedFloatLess(float64((*a)[i]), float64((*a)[j])) +} + +func (a *float32Array) swap(i, j int) { + (*a)[i], (*a)[j] = (*a)[j], (*a)[i] +} + +func (a *float32Array) typeMatch(v Value) bool { + switch v.(type) { + case valueInt, valueFloat: + return true + } + return false +} + +func (a *float64Array) get(idx int) Value { + return floatToValue((*a)[idx]) +} + +func (a *float64Array) getRaw(idx int) uint64 { + return math.Float64bits((*a)[idx]) +} + +func (a *float64Array) set(idx int, value Value) { + (*a)[idx] = value.ToFloat() +} + +func (a *float64Array) toRaw(v Value) uint64 { + return math.Float64bits(v.ToFloat()) +} + +func (a *float64Array) setRaw(idx int, v uint64) { + (*a)[idx] = math.Float64frombits(v) +} + +func (a *float64Array) less(i, j int) bool { + return typedFloatLess((*a)[i], (*a)[j]) +} + +func (a *float64Array) swap(i, j int) { + (*a)[i], (*a)[j] = (*a)[j], (*a)[i] +} + +func (a *float64Array) typeMatch(v Value) bool { + switch v.(type) { + case valueInt, valueFloat: + return true + } + return false +} + +func (a *typedArrayObject) _getIdx(idx int) Value { + if 0 <= idx && idx < a.length { + if !a.viewedArrayBuf.ensureNotDetached(false) { + return nil + } + return a.typedArray.get(idx + a.offset) + } + return nil +} + +func (a *typedArrayObject) getOwnPropStr(name unistring.String) Value { + idx, ok := strToIntNum(name) + if ok { + v := a._getIdx(idx) + if v != nil { + return &valueProperty{ + value: v, + writable: true, + enumerable: true, + configurable: true, + } + } + return nil + } + if idx == 0 { + return nil + } + return a.baseObject.getOwnPropStr(name) +} + +func (a *typedArrayObject) getOwnPropIdx(idx valueInt) Value { + v := a._getIdx(toIntClamp(int64(idx))) + if v != nil { + return &valueProperty{ + value: v, + writable: true, + enumerable: true, + configurable: true, + } + } + return nil +} + +func (a *typedArrayObject) getStr(name unistring.String, receiver Value) Value { + idx, ok := strToIntNum(name) + if ok { + return a._getIdx(idx) + } + if idx == 0 { + return nil + } + return a.baseObject.getStr(name, receiver) +} + +func (a *typedArrayObject) getIdx(idx valueInt, receiver Value) Value { + return a._getIdx(toIntClamp(int64(idx))) +} + +func (a *typedArrayObject) isValidIntegerIndex(idx int, throw bool) bool { + if a.viewedArrayBuf.ensureNotDetached(throw) { + if idx >= 0 && idx < a.length { + return true + } + a.val.runtime.typeErrorResult(throw, "Invalid typed array index") + } + return false +} + +func (a *typedArrayObject) _putIdx(idx int, v Value) { + v = v.ToNumber() + if a.isValidIntegerIndex(idx, false) { + a.typedArray.set(idx+a.offset, v) + } +} + +func (a *typedArrayObject) _hasIdx(idx int) bool { + return a.isValidIntegerIndex(idx, false) +} + +func (a *typedArrayObject) setOwnStr(p unistring.String, v Value, throw bool) bool { + idx, ok := strToIntNum(p) + if ok { + a._putIdx(idx, v) + return true + } + if idx == 0 { + v.ToNumber() // make sure it throws + return false + } + return a.baseObject.setOwnStr(p, v, throw) +} + +func (a *typedArrayObject) setOwnIdx(p valueInt, v Value, throw bool) bool { + a._putIdx(toIntClamp(int64(p)), v) + return true +} + +func (a *typedArrayObject) setForeignStr(p unistring.String, v, receiver Value, throw bool) (res bool, handled bool) { + return a._setForeignStr(p, a.getOwnPropStr(p), v, receiver, throw) +} + +func (a *typedArrayObject) setForeignIdx(p valueInt, v, receiver Value, throw bool) (res bool, handled bool) { + return a._setForeignIdx(p, trueValIfPresent(a.hasOwnPropertyIdx(p)), v, receiver, throw) +} + +func (a *typedArrayObject) hasOwnPropertyStr(name unistring.String) bool { + idx, ok := strToIntNum(name) + if ok { + return a._hasIdx(idx) + } + if idx == 0 { + return false + } + return a.baseObject.hasOwnPropertyStr(name) +} + +func (a *typedArrayObject) hasOwnPropertyIdx(idx valueInt) bool { + return a._hasIdx(toIntClamp(int64(idx))) +} + +func (a *typedArrayObject) _defineIdxProperty(idx int, desc PropertyDescriptor, throw bool) bool { + if desc.Configurable == FLAG_FALSE || desc.Enumerable == FLAG_FALSE || desc.IsAccessor() || desc.Writable == FLAG_FALSE { + a.val.runtime.typeErrorResult(throw, "Cannot redefine property: %d", idx) + return false + } + _, ok := a._defineOwnProperty(unistring.String(strconv.Itoa(idx)), a.getOwnPropIdx(valueInt(idx)), desc, throw) + if ok { + if !a.isValidIntegerIndex(idx, throw) { + return false + } + a._putIdx(idx, desc.Value) + return true + } + return ok +} + +func (a *typedArrayObject) defineOwnPropertyStr(name unistring.String, desc PropertyDescriptor, throw bool) bool { + idx, ok := strToIntNum(name) + if ok { + return a._defineIdxProperty(idx, desc, throw) + } + if idx == 0 { + a.viewedArrayBuf.ensureNotDetached(throw) + a.val.runtime.typeErrorResult(throw, "Invalid typed array index") + return false + } + return a.baseObject.defineOwnPropertyStr(name, desc, throw) +} + +func (a *typedArrayObject) defineOwnPropertyIdx(name valueInt, desc PropertyDescriptor, throw bool) bool { + return a._defineIdxProperty(toIntClamp(int64(name)), desc, throw) +} + +func (a *typedArrayObject) deleteStr(name unistring.String, throw bool) bool { + idx, ok := strToIntNum(name) + if ok { + if !a.isValidIntegerIndex(idx, false) { + a.val.runtime.typeErrorResult(throw, "Cannot delete property '%d' of %s", idx, a.val.String()) + return false + } + return true + } + if idx == 0 { + return true + } + return a.baseObject.deleteStr(name, throw) +} + +func (a *typedArrayObject) deleteIdx(idx valueInt, throw bool) bool { + if a.viewedArrayBuf.ensureNotDetached(throw) && idx >= 0 && int64(idx) < int64(a.length) { + a.val.runtime.typeErrorResult(throw, "Cannot delete property '%d' of %s", idx, a.val.String()) + return false + } + + return true +} + +func (a *typedArrayObject) ownKeys(all bool, accum []Value) []Value { + if accum == nil { + accum = make([]Value, 0, a.length) + } + for i := 0; i < a.length; i++ { + accum = append(accum, asciiString(strconv.Itoa(i))) + } + return a.baseObject.ownKeys(all, accum) +} + +type typedArrayPropIter struct { + a *typedArrayObject + idx int +} + +func (i *typedArrayPropIter) next() (propIterItem, iterNextFunc) { + if i.idx < i.a.length { + name := strconv.Itoa(i.idx) + prop := i.a._getIdx(i.idx) + i.idx++ + return propIterItem{name: unistring.String(name), value: prop}, i.next + } + + return i.a.baseObject.enumerateOwnKeys()() +} + +func (a *typedArrayObject) enumerateOwnKeys() iterNextFunc { + return (&typedArrayPropIter{ + a: a, + }).next +} + +func (r *Runtime) _newTypedArrayObject(buf *arrayBufferObject, offset, length, elemSize int, defCtor *Object, arr typedArray, proto *Object) *typedArrayObject { + o := &Object{runtime: r} + a := &typedArrayObject{ + baseObject: baseObject{ + val: o, + class: classObject, + prototype: proto, + extensible: true, + }, + viewedArrayBuf: buf, + offset: offset, + length: length, + elemSize: elemSize, + defaultCtor: defCtor, + typedArray: arr, + } + o.self = a + a.init() + return a + +} + +func (r *Runtime) newUint8ArrayObject(buf *arrayBufferObject, offset, length int, proto *Object) *typedArrayObject { + return r._newTypedArrayObject(buf, offset, length, 1, r.global.Uint8Array, (*uint8Array)(&buf.data), proto) +} + +func (r *Runtime) newUint8ClampedArrayObject(buf *arrayBufferObject, offset, length int, proto *Object) *typedArrayObject { + return r._newTypedArrayObject(buf, offset, length, 1, r.global.Uint8ClampedArray, (*uint8ClampedArray)(&buf.data), proto) +} + +func (r *Runtime) newInt8ArrayObject(buf *arrayBufferObject, offset, length int, proto *Object) *typedArrayObject { + return r._newTypedArrayObject(buf, offset, length, 1, r.global.Int8Array, (*int8Array)(unsafe.Pointer(&buf.data)), proto) +} + +func (r *Runtime) newUint16ArrayObject(buf *arrayBufferObject, offset, length int, proto *Object) *typedArrayObject { + return r._newTypedArrayObject(buf, offset, length, 2, r.global.Uint16Array, (*uint16Array)(unsafe.Pointer(&buf.data)), proto) +} + +func (r *Runtime) newInt16ArrayObject(buf *arrayBufferObject, offset, length int, proto *Object) *typedArrayObject { + return r._newTypedArrayObject(buf, offset, length, 2, r.global.Int16Array, (*int16Array)(unsafe.Pointer(&buf.data)), proto) +} + +func (r *Runtime) newUint32ArrayObject(buf *arrayBufferObject, offset, length int, proto *Object) *typedArrayObject { + return r._newTypedArrayObject(buf, offset, length, 4, r.global.Uint32Array, (*uint32Array)(unsafe.Pointer(&buf.data)), proto) +} + +func (r *Runtime) newInt32ArrayObject(buf *arrayBufferObject, offset, length int, proto *Object) *typedArrayObject { + return r._newTypedArrayObject(buf, offset, length, 4, r.global.Int32Array, (*int32Array)(unsafe.Pointer(&buf.data)), proto) +} + +func (r *Runtime) newFloat32ArrayObject(buf *arrayBufferObject, offset, length int, proto *Object) *typedArrayObject { + return r._newTypedArrayObject(buf, offset, length, 4, r.global.Float32Array, (*float32Array)(unsafe.Pointer(&buf.data)), proto) +} + +func (r *Runtime) newFloat64ArrayObject(buf *arrayBufferObject, offset, length int, proto *Object) *typedArrayObject { + return r._newTypedArrayObject(buf, offset, length, 8, r.global.Float64Array, (*float64Array)(unsafe.Pointer(&buf.data)), proto) +} + +func (o *dataViewObject) getIdxAndByteOrder(idxVal, littleEndianVal Value, size int) (int, byteOrder) { + getIdx := o.val.runtime.toIndex(idxVal) + o.viewedArrayBuf.ensureNotDetached(true) + if getIdx+size > o.byteLen { + panic(o.val.runtime.newError(o.val.runtime.global.RangeError, "Index %d is out of bounds", getIdx)) + } + getIdx += o.byteOffset + var bo byteOrder + if littleEndianVal != nil { + if littleEndianVal.ToBoolean() { + bo = littleEndian + } else { + bo = bigEndian + } + } else { + bo = nativeEndian + } + return getIdx, bo +} + +func (o *arrayBufferObject) ensureNotDetached(throw bool) bool { + if o.detached { + o.val.runtime.typeErrorResult(throw, "ArrayBuffer is detached") + return false + } + return true +} + +func (o *arrayBufferObject) getFloat32(idx int, byteOrder byteOrder) float32 { + return math.Float32frombits(o.getUint32(idx, byteOrder)) +} + +func (o *arrayBufferObject) setFloat32(idx int, val float32, byteOrder byteOrder) { + o.setUint32(idx, math.Float32bits(val), byteOrder) +} + +func (o *arrayBufferObject) getFloat64(idx int, byteOrder byteOrder) float64 { + return math.Float64frombits(o.getUint64(idx, byteOrder)) +} + +func (o *arrayBufferObject) setFloat64(idx int, val float64, byteOrder byteOrder) { + o.setUint64(idx, math.Float64bits(val), byteOrder) +} + +func (o *arrayBufferObject) getUint64(idx int, byteOrder byteOrder) uint64 { + var b []byte + if byteOrder == nativeEndian { + b = o.data[idx : idx+8] + } else { + b = make([]byte, 8) + d := o.data[idx : idx+8] + b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7] = d[7], d[6], d[5], d[4], d[3], d[2], d[1], d[0] + } + return *((*uint64)(unsafe.Pointer(&b[0]))) +} + +func (o *arrayBufferObject) setUint64(idx int, val uint64, byteOrder byteOrder) { + if byteOrder == nativeEndian { + *(*uint64)(unsafe.Pointer(&o.data[idx])) = val + } else { + b := (*[8]byte)(unsafe.Pointer(&val)) + d := o.data[idx : idx+8] + d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7] = b[7], b[6], b[5], b[4], b[3], b[2], b[1], b[0] + } +} + +func (o *arrayBufferObject) getUint32(idx int, byteOrder byteOrder) uint32 { + var b []byte + if byteOrder == nativeEndian { + b = o.data[idx : idx+4] + } else { + b = make([]byte, 4) + d := o.data[idx : idx+4] + b[0], b[1], b[2], b[3] = d[3], d[2], d[1], d[0] + } + return *((*uint32)(unsafe.Pointer(&b[0]))) +} + +func (o *arrayBufferObject) setUint32(idx int, val uint32, byteOrder byteOrder) { + if byteOrder == nativeEndian { + *(*uint32)(unsafe.Pointer(&o.data[idx])) = val + } else { + b := (*[4]byte)(unsafe.Pointer(&val)) + d := o.data[idx : idx+4] + d[0], d[1], d[2], d[3] = b[3], b[2], b[1], b[0] + } +} + +func (o *arrayBufferObject) getUint16(idx int, byteOrder byteOrder) uint16 { + var b []byte + if byteOrder == nativeEndian { + b = o.data[idx : idx+2] + } else { + b = make([]byte, 2) + d := o.data[idx : idx+2] + b[0], b[1] = d[1], d[0] + } + return *((*uint16)(unsafe.Pointer(&b[0]))) +} + +func (o *arrayBufferObject) setUint16(idx int, val uint16, byteOrder byteOrder) { + if byteOrder == nativeEndian { + *(*uint16)(unsafe.Pointer(&o.data[idx])) = val + } else { + b := (*[2]byte)(unsafe.Pointer(&val)) + d := o.data[idx : idx+2] + d[0], d[1] = b[1], b[0] + } +} + +func (o *arrayBufferObject) getUint8(idx int) uint8 { + return o.data[idx] +} + +func (o *arrayBufferObject) setUint8(idx int, val uint8) { + o.data[idx] = val +} + +func (o *arrayBufferObject) getInt32(idx int, byteOrder byteOrder) int32 { + return int32(o.getUint32(idx, byteOrder)) +} + +func (o *arrayBufferObject) setInt32(idx int, val int32, byteOrder byteOrder) { + o.setUint32(idx, uint32(val), byteOrder) +} + +func (o *arrayBufferObject) getInt16(idx int, byteOrder byteOrder) int16 { + return int16(o.getUint16(idx, byteOrder)) +} + +func (o *arrayBufferObject) setInt16(idx int, val int16, byteOrder byteOrder) { + o.setUint16(idx, uint16(val), byteOrder) +} + +func (o *arrayBufferObject) getInt8(idx int) int8 { + return int8(o.data[idx]) +} + +func (o *arrayBufferObject) setInt8(idx int, val int8) { + o.setUint8(idx, uint8(val)) +} + +func (o *arrayBufferObject) detach() { + o.data = nil + o.detached = true +} + +func (o *arrayBufferObject) exportType() reflect.Type { + return arrayBufferType +} + +func (o *arrayBufferObject) export(*objectExportCtx) interface{} { + return ArrayBuffer{ + buf: o, + } +} + +func (r *Runtime) _newArrayBuffer(proto *Object, o *Object) *arrayBufferObject { + if o == nil { + o = &Object{runtime: r} + } + b := &arrayBufferObject{ + baseObject: baseObject{ + class: classObject, + val: o, + prototype: proto, + extensible: true, + }, + } + o.self = b + b.init() + return b +} + +func init() { + buf := [2]byte{} + *(*uint16)(unsafe.Pointer(&buf[0])) = uint16(0xCAFE) + + switch buf { + case [2]byte{0xFE, 0xCA}: + nativeEndian = littleEndian + case [2]byte{0xCA, 0xFE}: + nativeEndian = bigEndian + default: + panic("Could not determine native endianness.") + } +} diff --git a/vendor/github.com/dop251/goja/unistring/string.go b/vendor/github.com/dop251/goja/unistring/string.go new file mode 100644 index 0000000000..481f06cf68 --- /dev/null +++ b/vendor/github.com/dop251/goja/unistring/string.go @@ -0,0 +1,122 @@ +// Package unistring contains an implementation of a hybrid ASCII/UTF-16 string. +// For ASCII strings the underlying representation is equivalent to a normal Go string. +// For unicode strings the underlying representation is UTF-16 as []uint16 with 0th element set to 0xFEFF. +// unicode.String allows representing malformed UTF-16 values (e.g. stand-alone parts of surrogate pairs) +// which cannot be represented in UTF-8. +// At the same time it is possible to use unicode.String as property keys just as efficiently as simple strings, +// (the leading 0xFEFF ensures there is no clash with ASCII string), and it is possible to convert it +// to valueString without extra allocations. +package unistring + +import ( + "reflect" + "unicode/utf16" + "unicode/utf8" + "unsafe" +) + +const ( + BOM = 0xFEFF +) + +type String string + +func NewFromString(s string) String { + ascii := true + size := 0 + for _, c := range s { + if c >= utf8.RuneSelf { + ascii = false + if c > 0xFFFF { + size++ + } + } + size++ + } + if ascii { + return String(s) + } + b := make([]uint16, size+1) + b[0] = BOM + i := 1 + for _, c := range s { + if c <= 0xFFFF { + b[i] = uint16(c) + } else { + first, second := utf16.EncodeRune(c) + b[i] = uint16(first) + i++ + b[i] = uint16(second) + } + i++ + } + return FromUtf16(b) +} + +func NewFromRunes(s []rune) String { + ascii := true + size := 0 + for _, c := range s { + if c >= utf8.RuneSelf { + ascii = false + if c > 0xFFFF { + size++ + } + } + size++ + } + if ascii { + return String(s) + } + b := make([]uint16, size+1) + b[0] = BOM + i := 1 + for _, c := range s { + if c <= 0xFFFF { + b[i] = uint16(c) + } else { + first, second := utf16.EncodeRune(c) + b[i] = uint16(first) + i++ + b[i] = uint16(second) + } + i++ + } + return FromUtf16(b) +} + +func FromUtf16(b []uint16) String { + var str string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&str)) + hdr.Data = uintptr(unsafe.Pointer(&b[0])) + hdr.Len = len(b) * 2 + + return String(str) +} + +func (s String) String() string { + if b := s.AsUtf16(); b != nil { + return string(utf16.Decode(b[1:])) + } + + return string(s) +} + +func (s String) AsUtf16() []uint16 { + if len(s) < 4 || len(s)&1 != 0 { + return nil + } + l := len(s) / 2 + raw := string(s) + hdr := (*reflect.StringHeader)(unsafe.Pointer(&raw)) + a := *(*[]uint16)(unsafe.Pointer(&reflect.SliceHeader{ + Data: hdr.Data, + Len: l, + Cap: l, + })) + if a[0] == BOM { + return a + } + + return nil +} diff --git a/vendor/github.com/dop251/goja/value.go b/vendor/github.com/dop251/goja/value.go new file mode 100644 index 0000000000..dd492148da --- /dev/null +++ b/vendor/github.com/dop251/goja/value.go @@ -0,0 +1,1092 @@ +package goja + +import ( + "hash/maphash" + "math" + "reflect" + "strconv" + "unsafe" + + "github.com/dop251/goja/ftoa" + "github.com/dop251/goja/unistring" +) + +var ( + // Not goroutine-safe, do not use for anything other than package level init + pkgHasher maphash.Hash + + hashFalse = randomHash() + hashTrue = randomHash() + hashNull = randomHash() + hashUndef = randomHash() +) + +// Not goroutine-safe, do not use for anything other than package level init +func randomHash() uint64 { + pkgHasher.WriteByte(0) + return pkgHasher.Sum64() +} + +var ( + valueFalse Value = valueBool(false) + valueTrue Value = valueBool(true) + _null Value = valueNull{} + _NaN Value = valueFloat(math.NaN()) + _positiveInf Value = valueFloat(math.Inf(+1)) + _negativeInf Value = valueFloat(math.Inf(-1)) + _positiveZero Value = valueInt(0) + negativeZero = math.Float64frombits(0 | (1 << 63)) + _negativeZero Value = valueFloat(negativeZero) + _epsilon = valueFloat(2.2204460492503130808472633361816e-16) + _undefined Value = valueUndefined{} +) + +var ( + reflectTypeInt = reflect.TypeOf(int64(0)) + reflectTypeBool = reflect.TypeOf(false) + reflectTypeNil = reflect.TypeOf(nil) + reflectTypeFloat = reflect.TypeOf(float64(0)) + reflectTypeMap = reflect.TypeOf(map[string]interface{}{}) + reflectTypeArray = reflect.TypeOf([]interface{}{}) + reflectTypeString = reflect.TypeOf("") +) + +var intCache [256]Value + +type Value interface { + ToInteger() int64 + toString() valueString + string() unistring.String + ToString() Value + String() string + ToFloat() float64 + ToNumber() Value + ToBoolean() bool + ToObject(*Runtime) *Object + SameAs(Value) bool + Equals(Value) bool + StrictEquals(Value) bool + Export() interface{} + ExportType() reflect.Type + + baseObject(r *Runtime) *Object + + hash(hasher *maphash.Hash) uint64 +} + +type valueContainer interface { + toValue(*Runtime) Value +} + +type typeError string +type rangeError string +type referenceError string + +type valueInt int64 +type valueFloat float64 +type valueBool bool +type valueNull struct{} +type valueUndefined struct { + valueNull +} + +// *Symbol is a Value containing ECMAScript Symbol primitive. Symbols must only be created +// using NewSymbol(). Zero values and copying of values (i.e. *s1 = *s2) are not permitted. +// Well-known Symbols can be accessed using Sym* package variables (SymIterator, etc...) +// Symbols can be shared by multiple Runtimes. +type Symbol struct { + h uintptr + desc valueString +} + +type valueUnresolved struct { + r *Runtime + ref unistring.String +} + +type memberUnresolved struct { + valueUnresolved +} + +type valueProperty struct { + value Value + writable bool + configurable bool + enumerable bool + accessor bool + getterFunc *Object + setterFunc *Object +} + +var ( + errAccessBeforeInit = referenceError("Cannot access a variable before initialization") + errAssignToConst = typeError("Assignment to constant variable.") +) + +func propGetter(o Value, v Value, r *Runtime) *Object { + if v == _undefined { + return nil + } + if obj, ok := v.(*Object); ok { + if _, ok := obj.self.assertCallable(); ok { + return obj + } + } + r.typeErrorResult(true, "Getter must be a function: %s", v.toString()) + return nil +} + +func propSetter(o Value, v Value, r *Runtime) *Object { + if v == _undefined { + return nil + } + if obj, ok := v.(*Object); ok { + if _, ok := obj.self.assertCallable(); ok { + return obj + } + } + r.typeErrorResult(true, "Setter must be a function: %s", v.toString()) + return nil +} + +func fToStr(num float64, mode ftoa.FToStrMode, prec int) string { + var buf1 [128]byte + return string(ftoa.FToStr(num, mode, prec, buf1[:0])) +} + +func (i valueInt) ToInteger() int64 { + return int64(i) +} + +func (i valueInt) toString() valueString { + return asciiString(i.String()) +} + +func (i valueInt) string() unistring.String { + return unistring.String(i.String()) +} + +func (i valueInt) ToString() Value { + return i +} + +func (i valueInt) String() string { + return strconv.FormatInt(int64(i), 10) +} + +func (i valueInt) ToFloat() float64 { + return float64(i) +} + +func (i valueInt) ToBoolean() bool { + return i != 0 +} + +func (i valueInt) ToObject(r *Runtime) *Object { + return r.newPrimitiveObject(i, r.global.NumberPrototype, classNumber) +} + +func (i valueInt) ToNumber() Value { + return i +} + +func (i valueInt) SameAs(other Value) bool { + return i == other +} + +func (i valueInt) Equals(other Value) bool { + switch o := other.(type) { + case valueInt: + return i == o + case valueFloat: + return float64(i) == float64(o) + case valueString: + return o.ToNumber().Equals(i) + case valueBool: + return int64(i) == o.ToInteger() + case *Object: + return i.Equals(o.toPrimitive()) + } + + return false +} + +func (i valueInt) StrictEquals(other Value) bool { + switch o := other.(type) { + case valueInt: + return i == o + case valueFloat: + return float64(i) == float64(o) + } + + return false +} + +func (i valueInt) baseObject(r *Runtime) *Object { + return r.global.NumberPrototype +} + +func (i valueInt) Export() interface{} { + return int64(i) +} + +func (i valueInt) ExportType() reflect.Type { + return reflectTypeInt +} + +func (i valueInt) hash(*maphash.Hash) uint64 { + return uint64(i) +} + +func (b valueBool) ToInteger() int64 { + if b { + return 1 + } + return 0 +} + +func (b valueBool) toString() valueString { + if b { + return stringTrue + } + return stringFalse +} + +func (b valueBool) ToString() Value { + return b +} + +func (b valueBool) String() string { + if b { + return "true" + } + return "false" +} + +func (b valueBool) string() unistring.String { + return unistring.String(b.String()) +} + +func (b valueBool) ToFloat() float64 { + if b { + return 1.0 + } + return 0 +} + +func (b valueBool) ToBoolean() bool { + return bool(b) +} + +func (b valueBool) ToObject(r *Runtime) *Object { + return r.newPrimitiveObject(b, r.global.BooleanPrototype, "Boolean") +} + +func (b valueBool) ToNumber() Value { + if b { + return valueInt(1) + } + return valueInt(0) +} + +func (b valueBool) SameAs(other Value) bool { + if other, ok := other.(valueBool); ok { + return b == other + } + return false +} + +func (b valueBool) Equals(other Value) bool { + if o, ok := other.(valueBool); ok { + return b == o + } + + if b { + return other.Equals(intToValue(1)) + } else { + return other.Equals(intToValue(0)) + } + +} + +func (b valueBool) StrictEquals(other Value) bool { + if other, ok := other.(valueBool); ok { + return b == other + } + return false +} + +func (b valueBool) baseObject(r *Runtime) *Object { + return r.global.BooleanPrototype +} + +func (b valueBool) Export() interface{} { + return bool(b) +} + +func (b valueBool) ExportType() reflect.Type { + return reflectTypeBool +} + +func (b valueBool) hash(*maphash.Hash) uint64 { + if b { + return hashTrue + } + + return hashFalse +} + +func (n valueNull) ToInteger() int64 { + return 0 +} + +func (n valueNull) toString() valueString { + return stringNull +} + +func (n valueNull) string() unistring.String { + return stringNull.string() +} + +func (n valueNull) ToString() Value { + return n +} + +func (n valueNull) String() string { + return "null" +} + +func (u valueUndefined) toString() valueString { + return stringUndefined +} + +func (u valueUndefined) ToString() Value { + return u +} + +func (u valueUndefined) String() string { + return "undefined" +} + +func (u valueUndefined) string() unistring.String { + return "undefined" +} + +func (u valueUndefined) ToNumber() Value { + return _NaN +} + +func (u valueUndefined) SameAs(other Value) bool { + _, same := other.(valueUndefined) + return same +} + +func (u valueUndefined) StrictEquals(other Value) bool { + _, same := other.(valueUndefined) + return same +} + +func (u valueUndefined) ToFloat() float64 { + return math.NaN() +} + +func (u valueUndefined) hash(*maphash.Hash) uint64 { + return hashUndef +} + +func (n valueNull) ToFloat() float64 { + return 0 +} + +func (n valueNull) ToBoolean() bool { + return false +} + +func (n valueNull) ToObject(r *Runtime) *Object { + r.typeErrorResult(true, "Cannot convert undefined or null to object") + return nil + //return r.newObject() +} + +func (n valueNull) ToNumber() Value { + return intToValue(0) +} + +func (n valueNull) SameAs(other Value) bool { + _, same := other.(valueNull) + return same +} + +func (n valueNull) Equals(other Value) bool { + switch other.(type) { + case valueUndefined, valueNull: + return true + } + return false +} + +func (n valueNull) StrictEquals(other Value) bool { + _, same := other.(valueNull) + return same +} + +func (n valueNull) baseObject(*Runtime) *Object { + return nil +} + +func (n valueNull) Export() interface{} { + return nil +} + +func (n valueNull) ExportType() reflect.Type { + return reflectTypeNil +} + +func (n valueNull) hash(*maphash.Hash) uint64 { + return hashNull +} + +func (p *valueProperty) ToInteger() int64 { + return 0 +} + +func (p *valueProperty) toString() valueString { + return stringEmpty +} + +func (p *valueProperty) string() unistring.String { + return "" +} + +func (p *valueProperty) ToString() Value { + return _undefined +} + +func (p *valueProperty) String() string { + return "" +} + +func (p *valueProperty) ToFloat() float64 { + return math.NaN() +} + +func (p *valueProperty) ToBoolean() bool { + return false +} + +func (p *valueProperty) ToObject(*Runtime) *Object { + return nil +} + +func (p *valueProperty) ToNumber() Value { + return nil +} + +func (p *valueProperty) isWritable() bool { + return p.writable || p.setterFunc != nil +} + +func (p *valueProperty) get(this Value) Value { + if p.getterFunc == nil { + if p.value != nil { + return p.value + } + return _undefined + } + call, _ := p.getterFunc.self.assertCallable() + return call(FunctionCall{ + This: this, + }) +} + +func (p *valueProperty) set(this, v Value) { + if p.setterFunc == nil { + p.value = v + return + } + call, _ := p.setterFunc.self.assertCallable() + call(FunctionCall{ + This: this, + Arguments: []Value{v}, + }) +} + +func (p *valueProperty) SameAs(other Value) bool { + if otherProp, ok := other.(*valueProperty); ok { + return p == otherProp + } + return false +} + +func (p *valueProperty) Equals(Value) bool { + return false +} + +func (p *valueProperty) StrictEquals(Value) bool { + return false +} + +func (p *valueProperty) baseObject(r *Runtime) *Object { + r.typeErrorResult(true, "BUG: baseObject() is called on valueProperty") // TODO error message + return nil +} + +func (p *valueProperty) Export() interface{} { + panic("Cannot export valueProperty") +} + +func (p *valueProperty) ExportType() reflect.Type { + panic("Cannot export valueProperty") +} + +func (p *valueProperty) hash(*maphash.Hash) uint64 { + panic("valueProperty should never be used in maps or sets") +} + +func floatToIntClip(n float64) int64 { + switch { + case math.IsNaN(n): + return 0 + case n >= math.MaxInt64: + return math.MaxInt64 + case n <= math.MinInt64: + return math.MinInt64 + } + return int64(n) +} + +func (f valueFloat) ToInteger() int64 { + return floatToIntClip(float64(f)) +} + +func (f valueFloat) toString() valueString { + return asciiString(f.String()) +} + +func (f valueFloat) string() unistring.String { + return unistring.String(f.String()) +} + +func (f valueFloat) ToString() Value { + return f +} + +func (f valueFloat) String() string { + return fToStr(float64(f), ftoa.ModeStandard, 0) +} + +func (f valueFloat) ToFloat() float64 { + return float64(f) +} + +func (f valueFloat) ToBoolean() bool { + return float64(f) != 0.0 && !math.IsNaN(float64(f)) +} + +func (f valueFloat) ToObject(r *Runtime) *Object { + return r.newPrimitiveObject(f, r.global.NumberPrototype, "Number") +} + +func (f valueFloat) ToNumber() Value { + return f +} + +func (f valueFloat) SameAs(other Value) bool { + switch o := other.(type) { + case valueFloat: + this := float64(f) + o1 := float64(o) + if math.IsNaN(this) && math.IsNaN(o1) { + return true + } else { + ret := this == o1 + if ret && this == 0 { + ret = math.Signbit(this) == math.Signbit(o1) + } + return ret + } + case valueInt: + this := float64(f) + ret := this == float64(o) + if ret && this == 0 { + ret = !math.Signbit(this) + } + return ret + } + + return false +} + +func (f valueFloat) Equals(other Value) bool { + switch o := other.(type) { + case valueFloat: + return f == o + case valueInt: + return float64(f) == float64(o) + case valueString, valueBool: + return float64(f) == o.ToFloat() + case *Object: + return f.Equals(o.toPrimitive()) + } + + return false +} + +func (f valueFloat) StrictEquals(other Value) bool { + switch o := other.(type) { + case valueFloat: + return f == o + case valueInt: + return float64(f) == float64(o) + } + + return false +} + +func (f valueFloat) baseObject(r *Runtime) *Object { + return r.global.NumberPrototype +} + +func (f valueFloat) Export() interface{} { + return float64(f) +} + +func (f valueFloat) ExportType() reflect.Type { + return reflectTypeFloat +} + +func (f valueFloat) hash(*maphash.Hash) uint64 { + if f == _negativeZero { + return 0 + } + return math.Float64bits(float64(f)) +} + +func (o *Object) ToInteger() int64 { + return o.toPrimitiveNumber().ToNumber().ToInteger() +} + +func (o *Object) toString() valueString { + return o.toPrimitiveString().toString() +} + +func (o *Object) string() unistring.String { + return o.toPrimitiveString().string() +} + +func (o *Object) ToString() Value { + return o.toPrimitiveString().ToString() +} + +func (o *Object) String() string { + return o.toPrimitiveString().String() +} + +func (o *Object) ToFloat() float64 { + return o.toPrimitiveNumber().ToFloat() +} + +func (o *Object) ToBoolean() bool { + return true +} + +func (o *Object) ToObject(*Runtime) *Object { + return o +} + +func (o *Object) ToNumber() Value { + return o.toPrimitiveNumber().ToNumber() +} + +func (o *Object) SameAs(other Value) bool { + if other, ok := other.(*Object); ok { + return o == other + } + return false +} + +func (o *Object) Equals(other Value) bool { + if other, ok := other.(*Object); ok { + return o == other || o.self.equal(other.self) + } + + switch o1 := other.(type) { + case valueInt, valueFloat, valueString, *Symbol: + return o.toPrimitive().Equals(other) + case valueBool: + return o.Equals(o1.ToNumber()) + } + + return false +} + +func (o *Object) StrictEquals(other Value) bool { + if other, ok := other.(*Object); ok { + return o == other || o.self.equal(other.self) + } + return false +} + +func (o *Object) baseObject(*Runtime) *Object { + return o +} + +// Export the Object to a plain Go type. The returned value will be map[string]interface{} unless +// the Object is a wrapped Go value (created using ToValue()). +// This method will panic with an *Exception if a JavaScript exception is thrown in the process. +func (o *Object) Export() (ret interface{}) { + o.runtime.tryPanic(func() { + ret = o.self.export(&objectExportCtx{}) + }) + + return +} + +func (o *Object) ExportType() reflect.Type { + return o.self.exportType() +} + +func (o *Object) hash(*maphash.Hash) uint64 { + return o.getId() +} + +// Get an object's property by name. +// This method will panic with an *Exception if a JavaScript exception is thrown in the process. +func (o *Object) Get(name string) Value { + return o.self.getStr(unistring.NewFromString(name), nil) +} + +// GetSymbol returns the value of a symbol property. Use one of the Sym* values for well-known +// symbols (such as SymIterator, SymToStringTag, etc...). +// This method will panic with an *Exception if a JavaScript exception is thrown in the process. +func (o *Object) GetSymbol(sym *Symbol) Value { + return o.self.getSym(sym, nil) +} + +// Keys returns a list of Object's enumerable keys. +// This method will panic with an *Exception if a JavaScript exception is thrown in the process. +func (o *Object) Keys() (keys []string) { + iter := &enumerableIter{ + wrapped: o.self.enumerateOwnKeys(), + } + for item, next := iter.next(); next != nil; item, next = next() { + keys = append(keys, item.name.String()) + } + + return +} + +// Symbols returns a list of Object's enumerable symbol properties. +// This method will panic with an *Exception if a JavaScript exception is thrown in the process. +func (o *Object) Symbols() []*Symbol { + symbols := o.self.ownSymbols(false, nil) + ret := make([]*Symbol, len(symbols)) + for i, sym := range symbols { + ret[i], _ = sym.(*Symbol) + } + return ret +} + +// DefineDataProperty is a Go equivalent of Object.defineProperty(o, name, {value: value, writable: writable, +// configurable: configurable, enumerable: enumerable}) +func (o *Object) DefineDataProperty(name string, value Value, writable, configurable, enumerable Flag) error { + return o.runtime.try(func() { + o.self.defineOwnPropertyStr(unistring.NewFromString(name), PropertyDescriptor{ + Value: value, + Writable: writable, + Configurable: configurable, + Enumerable: enumerable, + }, true) + }) +} + +// DefineAccessorProperty is a Go equivalent of Object.defineProperty(o, name, {get: getter, set: setter, +// configurable: configurable, enumerable: enumerable}) +func (o *Object) DefineAccessorProperty(name string, getter, setter Value, configurable, enumerable Flag) error { + return o.runtime.try(func() { + o.self.defineOwnPropertyStr(unistring.NewFromString(name), PropertyDescriptor{ + Getter: getter, + Setter: setter, + Configurable: configurable, + Enumerable: enumerable, + }, true) + }) +} + +// DefineDataPropertySymbol is a Go equivalent of Object.defineProperty(o, name, {value: value, writable: writable, +// configurable: configurable, enumerable: enumerable}) +func (o *Object) DefineDataPropertySymbol(name *Symbol, value Value, writable, configurable, enumerable Flag) error { + return o.runtime.try(func() { + o.self.defineOwnPropertySym(name, PropertyDescriptor{ + Value: value, + Writable: writable, + Configurable: configurable, + Enumerable: enumerable, + }, true) + }) +} + +// DefineAccessorPropertySymbol is a Go equivalent of Object.defineProperty(o, name, {get: getter, set: setter, +// configurable: configurable, enumerable: enumerable}) +func (o *Object) DefineAccessorPropertySymbol(name *Symbol, getter, setter Value, configurable, enumerable Flag) error { + return o.runtime.try(func() { + o.self.defineOwnPropertySym(name, PropertyDescriptor{ + Getter: getter, + Setter: setter, + Configurable: configurable, + Enumerable: enumerable, + }, true) + }) +} + +func (o *Object) Set(name string, value interface{}) error { + return o.runtime.try(func() { + o.self.setOwnStr(unistring.NewFromString(name), o.runtime.ToValue(value), true) + }) +} + +func (o *Object) SetSymbol(name *Symbol, value interface{}) error { + return o.runtime.try(func() { + o.self.setOwnSym(name, o.runtime.ToValue(value), true) + }) +} + +func (o *Object) Delete(name string) error { + return o.runtime.try(func() { + o.self.deleteStr(unistring.NewFromString(name), true) + }) +} + +func (o *Object) DeleteSymbol(name *Symbol) error { + return o.runtime.try(func() { + o.self.deleteSym(name, true) + }) +} + +// Prototype returns the Object's prototype, same as Object.getPrototypeOf(). If the prototype is null +// returns nil. +func (o *Object) Prototype() *Object { + return o.self.proto() +} + +// SetPrototype sets the Object's prototype, same as Object.setPrototypeOf(). Setting proto to nil +// is an equivalent of Object.setPrototypeOf(null). +func (o *Object) SetPrototype(proto *Object) error { + return o.runtime.try(func() { + o.self.setProto(proto, true) + }) +} + +// MarshalJSON returns JSON representation of the Object. It is equivalent to JSON.stringify(o). +// Note, this implements json.Marshaler so that json.Marshal() can be used without the need to Export(). +func (o *Object) MarshalJSON() ([]byte, error) { + ctx := _builtinJSON_stringifyContext{ + r: o.runtime, + } + ex := o.runtime.vm.try(func() { + if !ctx.do(o) { + ctx.buf.WriteString("null") + } + }) + if ex != nil { + return nil, ex + } + return ctx.buf.Bytes(), nil +} + +// ClassName returns the class name +func (o *Object) ClassName() string { + return o.self.className() +} + +func (o valueUnresolved) throw() { + o.r.throwReferenceError(o.ref) +} + +func (o valueUnresolved) ToInteger() int64 { + o.throw() + return 0 +} + +func (o valueUnresolved) toString() valueString { + o.throw() + return nil +} + +func (o valueUnresolved) string() unistring.String { + o.throw() + return "" +} + +func (o valueUnresolved) ToString() Value { + o.throw() + return nil +} + +func (o valueUnresolved) String() string { + o.throw() + return "" +} + +func (o valueUnresolved) ToFloat() float64 { + o.throw() + return 0 +} + +func (o valueUnresolved) ToBoolean() bool { + o.throw() + return false +} + +func (o valueUnresolved) ToObject(*Runtime) *Object { + o.throw() + return nil +} + +func (o valueUnresolved) ToNumber() Value { + o.throw() + return nil +} + +func (o valueUnresolved) SameAs(Value) bool { + o.throw() + return false +} + +func (o valueUnresolved) Equals(Value) bool { + o.throw() + return false +} + +func (o valueUnresolved) StrictEquals(Value) bool { + o.throw() + return false +} + +func (o valueUnresolved) baseObject(*Runtime) *Object { + o.throw() + return nil +} + +func (o valueUnresolved) Export() interface{} { + o.throw() + return nil +} + +func (o valueUnresolved) ExportType() reflect.Type { + o.throw() + return nil +} + +func (o valueUnresolved) hash(*maphash.Hash) uint64 { + o.throw() + return 0 +} + +func (s *Symbol) ToInteger() int64 { + panic(typeError("Cannot convert a Symbol value to a number")) +} + +func (s *Symbol) toString() valueString { + panic(typeError("Cannot convert a Symbol value to a string")) +} + +func (s *Symbol) ToString() Value { + return s +} + +func (s *Symbol) String() string { + return s.desc.String() +} + +func (s *Symbol) string() unistring.String { + return s.desc.string() +} + +func (s *Symbol) ToFloat() float64 { + panic(typeError("Cannot convert a Symbol value to a number")) +} + +func (s *Symbol) ToNumber() Value { + panic(typeError("Cannot convert a Symbol value to a number")) +} + +func (s *Symbol) ToBoolean() bool { + return true +} + +func (s *Symbol) ToObject(r *Runtime) *Object { + return s.baseObject(r) +} + +func (s *Symbol) SameAs(other Value) bool { + if s1, ok := other.(*Symbol); ok { + return s == s1 + } + return false +} + +func (s *Symbol) Equals(o Value) bool { + switch o := o.(type) { + case *Object: + return s.Equals(o.toPrimitive()) + } + return s.SameAs(o) +} + +func (s *Symbol) StrictEquals(o Value) bool { + return s.SameAs(o) +} + +func (s *Symbol) Export() interface{} { + return s.String() +} + +func (s *Symbol) ExportType() reflect.Type { + return reflectTypeString +} + +func (s *Symbol) baseObject(r *Runtime) *Object { + return r.newPrimitiveObject(s, r.global.SymbolPrototype, "Symbol") +} + +func (s *Symbol) hash(*maphash.Hash) uint64 { + return uint64(s.h) +} + +func exportValue(v Value, ctx *objectExportCtx) interface{} { + if obj, ok := v.(*Object); ok { + return obj.self.export(ctx) + } + return v.Export() +} + +func newSymbol(s valueString) *Symbol { + r := &Symbol{ + desc: s, + } + // This may need to be reconsidered in the future. + // Depending on changes in Go's allocation policy and/or introduction of a compacting GC + // this may no longer provide sufficient dispersion. The alternative, however, is a globally + // synchronised random generator/hasher/sequencer and I don't want to go down that route just yet. + r.h = uintptr(unsafe.Pointer(r)) + return r +} + +func NewSymbol(s string) *Symbol { + return newSymbol(newStringValue(s)) +} + +func (s *Symbol) descriptiveString() valueString { + if s.desc == nil { + return stringEmpty + } + return asciiString("Symbol(").concat(s.desc).concat(asciiString(")")) +} + +func init() { + for i := 0; i < 256; i++ { + intCache[i] = valueInt(i - 128) + } + _positiveZero = intToValue(0) +} diff --git a/vendor/github.com/dop251/goja/vm.go b/vendor/github.com/dop251/goja/vm.go new file mode 100644 index 0000000000..172adc537a --- /dev/null +++ b/vendor/github.com/dop251/goja/vm.go @@ -0,0 +1,3885 @@ +package goja + +import ( + "fmt" + "math" + "runtime" + "strconv" + "sync" + "sync/atomic" + + "github.com/dop251/goja/unistring" +) + +const ( + maxInt = 1 << 53 +) + +type valueStack []Value + +type stash struct { + values []Value + extraArgs []Value + names map[unistring.String]uint32 + obj *Object + + outer *stash + + // true if this stash is a VariableEnvironment, i.e. dynamic var declarations created + // by direct eval go here. + variable bool +} + +type context struct { + prg *Program + funcName unistring.String + stash *stash + newTarget Value + result Value + pc, sb int + args int +} + +type iterStackItem struct { + val Value + f iterNextFunc + iter *Object +} + +type ref interface { + get() Value + set(Value) + refname() unistring.String +} + +type stashRef struct { + n unistring.String + v *[]Value + idx int +} + +func (r *stashRef) get() Value { + return nilSafe((*r.v)[r.idx]) +} + +func (r *stashRef) set(v Value) { + (*r.v)[r.idx] = v +} + +func (r *stashRef) refname() unistring.String { + return r.n +} + +type stashRefLex struct { + stashRef +} + +func (r *stashRefLex) get() Value { + v := (*r.v)[r.idx] + if v == nil { + panic(errAccessBeforeInit) + } + return v +} + +func (r *stashRefLex) set(v Value) { + p := &(*r.v)[r.idx] + if *p == nil { + panic(errAccessBeforeInit) + } + *p = v +} + +type stashRefConst struct { + stashRefLex + strictConst bool +} + +func (r *stashRefConst) set(v Value) { + if r.strictConst { + panic(errAssignToConst) + } +} + +type objRef struct { + base objectImpl + name unistring.String + strict bool +} + +func (r *objRef) get() Value { + return r.base.getStr(r.name, nil) +} + +func (r *objRef) set(v Value) { + r.base.setOwnStr(r.name, v, r.strict) +} + +func (r *objRef) refname() unistring.String { + return r.name +} + +type unresolvedRef struct { + runtime *Runtime + name unistring.String +} + +func (r *unresolvedRef) get() Value { + r.runtime.throwReferenceError(r.name) + panic("Unreachable") +} + +func (r *unresolvedRef) set(Value) { + r.get() +} + +func (r *unresolvedRef) refname() unistring.String { + return r.name +} + +type vm struct { + r *Runtime + prg *Program + funcName unistring.String + pc int + stack valueStack + sp, sb, args int + + stash *stash + callStack []context + iterStack []iterStackItem + refStack []ref + newTarget Value + result Value + + maxCallStackSize int + + stashAllocs int + halt bool + + interrupted uint32 + interruptVal interface{} + interruptLock sync.Mutex +} + +type instruction interface { + exec(*vm) +} + +func intToValue(i int64) Value { + if i >= -maxInt && i <= maxInt { + if i >= -128 && i <= 127 { + return intCache[i+128] + } + return valueInt(i) + } + return valueFloat(i) +} + +func floatToInt(f float64) (result int64, ok bool) { + if (f != 0 || !math.Signbit(f)) && !math.IsInf(f, 0) && f == math.Trunc(f) && f >= -maxInt && f <= maxInt { + return int64(f), true + } + return 0, false +} + +func floatToValue(f float64) (result Value) { + if i, ok := floatToInt(f); ok { + return intToValue(i) + } + switch { + case f == 0: + return _negativeZero + case math.IsNaN(f): + return _NaN + case math.IsInf(f, 1): + return _positiveInf + case math.IsInf(f, -1): + return _negativeInf + } + return valueFloat(f) +} + +func assertInt64(v Value) (int64, bool) { + num := v.ToNumber() + if i, ok := num.(valueInt); ok { + return int64(i), true + } + if f, ok := num.(valueFloat); ok { + if i, ok := floatToInt(float64(f)); ok { + return i, true + } + } + return 0, false +} + +func toIntIgnoreNegZero(v Value) (int64, bool) { + num := v.ToNumber() + if i, ok := num.(valueInt); ok { + return int64(i), true + } + if f, ok := num.(valueFloat); ok { + if v == _negativeZero { + return 0, true + } + if i, ok := floatToInt(float64(f)); ok { + return i, true + } + } + return 0, false +} + +func (s *valueStack) expand(idx int) { + if idx < len(*s) { + return + } + idx++ + if idx < cap(*s) { + *s = (*s)[:idx] + } else { + var newCap int + if idx < 1024 { + newCap = idx * 2 + } else { + newCap = (idx + 1025) &^ 1023 + } + n := make([]Value, idx, newCap) + copy(n, *s) + *s = n + } +} + +func stashObjHas(obj *Object, name unistring.String) bool { + if obj.self.hasPropertyStr(name) { + if unscopables, ok := obj.self.getSym(SymUnscopables, nil).(*Object); ok { + if b := unscopables.self.getStr(name, nil); b != nil { + return !b.ToBoolean() + } + } + return true + } + return false +} + +func (s *stash) initByIdx(idx uint32, v Value) { + if s.obj != nil { + panic("Attempt to init by idx into an object scope") + } + s.values[idx] = v +} + +func (s *stash) initByName(name unistring.String, v Value) { + if idx, exists := s.names[name]; exists { + s.values[idx&^maskTyp] = v + } else { + panic(referenceError(fmt.Sprintf("%s is not defined", name))) + } +} + +func (s *stash) getByIdx(idx uint32) Value { + return s.values[idx] +} + +func (s *stash) getByName(name unistring.String) (v Value, exists bool) { + if s.obj != nil { + if stashObjHas(s.obj, name) { + return nilSafe(s.obj.self.getStr(name, nil)), true + } + return nil, false + } + if idx, exists := s.names[name]; exists { + v := s.values[idx&^maskTyp] + if v == nil { + if idx&maskVar == 0 { + panic(errAccessBeforeInit) + } else { + v = _undefined + } + } + return v, true + } + return nil, false +} + +func (s *stash) getRefByName(name unistring.String, strict bool) ref { + if obj := s.obj; obj != nil { + if stashObjHas(obj, name) { + return &objRef{ + base: obj.self, + name: name, + strict: strict, + } + } + } else { + if idx, exists := s.names[name]; exists { + if idx&maskVar == 0 { + if idx&maskConst == 0 { + return &stashRefLex{ + stashRef: stashRef{ + n: name, + v: &s.values, + idx: int(idx &^ maskTyp), + }, + } + } else { + return &stashRefConst{ + stashRefLex: stashRefLex{ + stashRef: stashRef{ + n: name, + v: &s.values, + idx: int(idx &^ maskTyp), + }, + }, + strictConst: strict || (idx&maskStrict != 0), + } + } + } else { + return &stashRef{ + n: name, + v: &s.values, + idx: int(idx &^ maskTyp), + } + } + } + } + return nil +} + +func (s *stash) createBinding(name unistring.String, deletable bool) { + if s.names == nil { + s.names = make(map[unistring.String]uint32) + } + if _, exists := s.names[name]; !exists { + idx := uint32(len(s.names)) | maskVar + if deletable { + idx |= maskDeletable + } + s.names[name] = idx + s.values = append(s.values, _undefined) + } +} + +func (s *stash) createLexBinding(name unistring.String, isConst bool) { + if s.names == nil { + s.names = make(map[unistring.String]uint32) + } + if _, exists := s.names[name]; !exists { + idx := uint32(len(s.names)) + if isConst { + idx |= maskConst | maskStrict + } + s.names[name] = idx + s.values = append(s.values, nil) + } +} + +func (s *stash) deleteBinding(name unistring.String) { + delete(s.names, name) +} + +func (vm *vm) newStash() { + vm.stash = &stash{ + outer: vm.stash, + } + vm.stashAllocs++ +} + +func (vm *vm) init() { + vm.sb = -1 + vm.stash = &vm.r.global.stash + vm.maxCallStackSize = math.MaxInt32 +} + +func (vm *vm) run() { + vm.halt = false + interrupted := false + ticks := 0 + for !vm.halt { + if interrupted = atomic.LoadUint32(&vm.interrupted) != 0; interrupted { + break + } + vm.prg.code[vm.pc].exec(vm) + ticks++ + if ticks > 10000 { + runtime.Gosched() + ticks = 0 + } + } + + if interrupted { + vm.interruptLock.Lock() + v := &InterruptedError{ + iface: vm.interruptVal, + } + atomic.StoreUint32(&vm.interrupted, 0) + vm.interruptVal = nil + vm.interruptLock.Unlock() + panic(&uncatchableException{ + stack: &v.stack, + err: v, + }) + } +} + +func (vm *vm) Interrupt(v interface{}) { + vm.interruptLock.Lock() + vm.interruptVal = v + atomic.StoreUint32(&vm.interrupted, 1) + vm.interruptLock.Unlock() +} + +func (vm *vm) ClearInterrupt() { + atomic.StoreUint32(&vm.interrupted, 0) +} + +func (vm *vm) captureStack(stack []StackFrame, ctxOffset int) []StackFrame { + // Unroll the context stack + if vm.pc != -1 { + stack = append(stack, StackFrame{prg: vm.prg, pc: vm.pc, funcName: vm.funcName}) + } + for i := len(vm.callStack) - 1; i > ctxOffset-1; i-- { + if vm.callStack[i].pc != -1 { + stack = append(stack, StackFrame{prg: vm.callStack[i].prg, pc: vm.callStack[i].pc - 1, funcName: vm.callStack[i].funcName}) + } + } + return stack +} + +func (vm *vm) try(f func()) (ex *Exception) { + var ctx context + vm.saveCtx(&ctx) + + ctxOffset := len(vm.callStack) + sp := vm.sp + iterLen := len(vm.iterStack) + refLen := len(vm.refStack) + + defer func() { + if x := recover(); x != nil { + defer func() { + vm.callStack = vm.callStack[:ctxOffset] + vm.restoreCtx(&ctx) + vm.sp = sp + + // Restore other stacks + iterTail := vm.iterStack[iterLen:] + for i := range iterTail { + if iter := iterTail[i].iter; iter != nil { + vm.try(func() { + returnIter(iter) + }) + } + iterTail[i] = iterStackItem{} + } + vm.iterStack = vm.iterStack[:iterLen] + refTail := vm.refStack[refLen:] + for i := range refTail { + refTail[i] = nil + } + vm.refStack = vm.refStack[:refLen] + }() + switch x1 := x.(type) { + case Value: + ex = &Exception{ + val: x1, + } + case *Exception: + ex = x1 + case *uncatchableException: + *x1.stack = vm.captureStack(*x1.stack, ctxOffset) + panic(x1) + case typeError: + ex = &Exception{ + val: vm.r.NewTypeError(string(x1)), + } + case referenceError: + ex = &Exception{ + val: vm.r.newError(vm.r.global.ReferenceError, string(x1)), + } + case rangeError: + ex = &Exception{ + val: vm.r.newError(vm.r.global.RangeError, string(x1)), + } + default: + /* + if vm.prg != nil { + vm.prg.dumpCode(log.Printf) + } + log.Print("Stack: ", string(debug.Stack())) + panic(fmt.Errorf("Panic at %d: %v", vm.pc, x)) + */ + panic(x) + } + ex.stack = vm.captureStack(ex.stack, ctxOffset) + } + }() + + f() + return +} + +func (vm *vm) runTry() (ex *Exception) { + return vm.try(vm.run) +} + +func (vm *vm) push(v Value) { + vm.stack.expand(vm.sp) + vm.stack[vm.sp] = v + vm.sp++ +} + +func (vm *vm) pop() Value { + vm.sp-- + return vm.stack[vm.sp] +} + +func (vm *vm) peek() Value { + return vm.stack[vm.sp-1] +} + +func (vm *vm) saveCtx(ctx *context) { + ctx.prg, ctx.stash, ctx.newTarget, ctx.result, ctx.pc, ctx.sb, ctx.args = + vm.prg, vm.stash, vm.newTarget, vm.result, vm.pc, vm.sb, vm.args + if vm.funcName != "" { + ctx.funcName = vm.funcName + } else if ctx.prg != nil && ctx.prg.funcName != "" { + ctx.funcName = ctx.prg.funcName + } +} + +func (vm *vm) pushCtx() { + if len(vm.callStack) > vm.maxCallStackSize { + ex := &StackOverflowError{} + panic(&uncatchableException{ + stack: &ex.stack, + err: ex, + }) + } + vm.callStack = append(vm.callStack, context{}) + ctx := &vm.callStack[len(vm.callStack)-1] + vm.saveCtx(ctx) +} + +func (vm *vm) restoreCtx(ctx *context) { + vm.prg, vm.funcName, vm.stash, vm.newTarget, vm.result, vm.pc, vm.sb, vm.args = + ctx.prg, ctx.funcName, ctx.stash, ctx.newTarget, ctx.result, ctx.pc, ctx.sb, ctx.args +} + +func (vm *vm) popCtx() { + l := len(vm.callStack) - 1 + ctx := &vm.callStack[l] + vm.restoreCtx(ctx) + + ctx.prg = nil + ctx.stash = nil + ctx.result = nil + ctx.newTarget = nil + + vm.callStack = vm.callStack[:l] +} + +func (vm *vm) toCallee(v Value) *Object { + if obj, ok := v.(*Object); ok { + return obj + } + switch unresolved := v.(type) { + case valueUnresolved: + unresolved.throw() + panic("Unreachable") + case memberUnresolved: + panic(vm.r.NewTypeError("Object has no member '%s'", unresolved.ref)) + } + panic(vm.r.NewTypeError("Value is not an object: %s", v.toString())) +} + +type loadVal uint32 + +func (l loadVal) exec(vm *vm) { + vm.push(vm.prg.values[l]) + vm.pc++ +} + +type _loadUndef struct{} + +var loadUndef _loadUndef + +func (_loadUndef) exec(vm *vm) { + vm.push(_undefined) + vm.pc++ +} + +type _loadNil struct{} + +var loadNil _loadNil + +func (_loadNil) exec(vm *vm) { + vm.push(nil) + vm.pc++ +} + +type _saveResult struct{} + +var saveResult _saveResult + +func (_saveResult) exec(vm *vm) { + vm.sp-- + vm.result = vm.stack[vm.sp] + vm.pc++ +} + +type _clearResult struct{} + +var clearResult _clearResult + +func (_clearResult) exec(vm *vm) { + vm.result = _undefined + vm.pc++ +} + +type _loadGlobalObject struct{} + +var loadGlobalObject _loadGlobalObject + +func (_loadGlobalObject) exec(vm *vm) { + vm.push(vm.r.globalObject) + vm.pc++ +} + +type loadStack int + +func (l loadStack) exec(vm *vm) { + // l > 0 -- var + // l == 0 -- this + + if l > 0 { + vm.push(nilSafe(vm.stack[vm.sb+vm.args+int(l)])) + } else { + vm.push(vm.stack[vm.sb]) + } + vm.pc++ +} + +type loadStack1 int + +func (l loadStack1) exec(vm *vm) { + // args are in stash + // l > 0 -- var + // l == 0 -- this + + if l > 0 { + vm.push(nilSafe(vm.stack[vm.sb+int(l)])) + } else { + vm.push(vm.stack[vm.sb]) + } + vm.pc++ +} + +type loadStackLex int + +func (l loadStackLex) exec(vm *vm) { + // l < 0 -- arg<-l-1> + // l > 0 -- var + var p *Value + if l < 0 { + arg := int(-l) + if arg > vm.args { + vm.push(_undefined) + vm.pc++ + return + } else { + p = &vm.stack[vm.sb+arg] + } + } else { + p = &vm.stack[vm.sb+vm.args+int(l)] + } + if *p == nil { + panic(errAccessBeforeInit) + } + vm.push(*p) + vm.pc++ +} + +type loadStack1Lex int + +func (l loadStack1Lex) exec(vm *vm) { + p := &vm.stack[vm.sb+int(l)] + if *p == nil { + panic(errAccessBeforeInit) + } + vm.push(*p) + vm.pc++ +} + +type _loadCallee struct{} + +var loadCallee _loadCallee + +func (_loadCallee) exec(vm *vm) { + vm.push(vm.stack[vm.sb-1]) + vm.pc++ +} + +func (vm *vm) storeStack(s int) { + // l > 0 -- var + + if s > 0 { + vm.stack[vm.sb+vm.args+s] = vm.stack[vm.sp-1] + } else { + panic("Illegal stack var index") + } + vm.pc++ +} + +func (vm *vm) storeStack1(s int) { + // args are in stash + // l > 0 -- var + + if s > 0 { + vm.stack[vm.sb+s] = vm.stack[vm.sp-1] + } else { + panic("Illegal stack var index") + } + vm.pc++ +} + +func (vm *vm) storeStackLex(s int) { + // l < 0 -- arg<-l-1> + // l > 0 -- var + var p *Value + if s < 0 { + p = &vm.stack[vm.sb-s] + } else { + p = &vm.stack[vm.sb+vm.args+s] + } + + if *p != nil { + *p = vm.stack[vm.sp-1] + } else { + panic(errAccessBeforeInit) + } + vm.pc++ +} + +func (vm *vm) storeStack1Lex(s int) { + // args are in stash + // s > 0 -- var + if s <= 0 { + panic("Illegal stack var index") + } + p := &vm.stack[vm.sb+s] + if *p != nil { + *p = vm.stack[vm.sp-1] + } else { + panic(errAccessBeforeInit) + } + vm.pc++ +} + +func (vm *vm) initStack(s int) { + if s <= 0 { + vm.stack[vm.sb-s] = vm.stack[vm.sp-1] + } else { + vm.stack[vm.sb+vm.args+s] = vm.stack[vm.sp-1] + } + vm.pc++ +} + +func (vm *vm) initStack1(s int) { + if s <= 0 { + panic("Illegal stack var index") + } + vm.stack[vm.sb+s] = vm.stack[vm.sp-1] + vm.pc++ +} + +type storeStack int + +func (s storeStack) exec(vm *vm) { + vm.storeStack(int(s)) +} + +type storeStack1 int + +func (s storeStack1) exec(vm *vm) { + vm.storeStack1(int(s)) +} + +type storeStackLex int + +func (s storeStackLex) exec(vm *vm) { + vm.storeStackLex(int(s)) +} + +type storeStack1Lex int + +func (s storeStack1Lex) exec(vm *vm) { + vm.storeStack1Lex(int(s)) +} + +type initStack int + +func (s initStack) exec(vm *vm) { + vm.initStack(int(s)) + vm.sp-- +} + +type initStack1 int + +func (s initStack1) exec(vm *vm) { + vm.initStack1(int(s)) + vm.sp-- +} + +type storeStackP int + +func (s storeStackP) exec(vm *vm) { + vm.storeStack(int(s)) + vm.sp-- +} + +type storeStack1P int + +func (s storeStack1P) exec(vm *vm) { + vm.storeStack1(int(s)) + vm.sp-- +} + +type storeStackLexP int + +func (s storeStackLexP) exec(vm *vm) { + vm.storeStackLex(int(s)) + vm.sp-- +} + +type storeStack1LexP int + +func (s storeStack1LexP) exec(vm *vm) { + vm.storeStack1Lex(int(s)) + vm.sp-- +} + +type _toNumber struct{} + +var toNumber _toNumber + +func (_toNumber) exec(vm *vm) { + vm.stack[vm.sp-1] = vm.stack[vm.sp-1].ToNumber() + vm.pc++ +} + +type _add struct{} + +var add _add + +func (_add) exec(vm *vm) { + right := vm.stack[vm.sp-1] + left := vm.stack[vm.sp-2] + + if o, ok := left.(*Object); ok { + left = o.toPrimitive() + } + + if o, ok := right.(*Object); ok { + right = o.toPrimitive() + } + + var ret Value + + leftString, isLeftString := left.(valueString) + rightString, isRightString := right.(valueString) + + if isLeftString || isRightString { + if !isLeftString { + leftString = left.toString() + } + if !isRightString { + rightString = right.toString() + } + ret = leftString.concat(rightString) + } else { + if leftInt, ok := left.(valueInt); ok { + if rightInt, ok := right.(valueInt); ok { + ret = intToValue(int64(leftInt) + int64(rightInt)) + } else { + ret = floatToValue(float64(leftInt) + right.ToFloat()) + } + } else { + ret = floatToValue(left.ToFloat() + right.ToFloat()) + } + } + + vm.stack[vm.sp-2] = ret + vm.sp-- + vm.pc++ +} + +type _sub struct{} + +var sub _sub + +func (_sub) exec(vm *vm) { + right := vm.stack[vm.sp-1] + left := vm.stack[vm.sp-2] + + var result Value + + if left, ok := left.(valueInt); ok { + if right, ok := right.(valueInt); ok { + result = intToValue(int64(left) - int64(right)) + goto end + } + } + + result = floatToValue(left.ToFloat() - right.ToFloat()) +end: + vm.sp-- + vm.stack[vm.sp-1] = result + vm.pc++ +} + +type _mul struct{} + +var mul _mul + +func (_mul) exec(vm *vm) { + left := vm.stack[vm.sp-2] + right := vm.stack[vm.sp-1] + + var result Value + + if left, ok := assertInt64(left); ok { + if right, ok := assertInt64(right); ok { + if left == 0 && right == -1 || left == -1 && right == 0 { + result = _negativeZero + goto end + } + res := left * right + // check for overflow + if left == 0 || right == 0 || res/left == right { + result = intToValue(res) + goto end + } + + } + } + + result = floatToValue(left.ToFloat() * right.ToFloat()) + +end: + vm.sp-- + vm.stack[vm.sp-1] = result + vm.pc++ +} + +type _div struct{} + +var div _div + +func (_div) exec(vm *vm) { + left := vm.stack[vm.sp-2].ToFloat() + right := vm.stack[vm.sp-1].ToFloat() + + var result Value + + if math.IsNaN(left) || math.IsNaN(right) { + result = _NaN + goto end + } + if math.IsInf(left, 0) && math.IsInf(right, 0) { + result = _NaN + goto end + } + if left == 0 && right == 0 { + result = _NaN + goto end + } + + if math.IsInf(left, 0) { + if math.Signbit(left) == math.Signbit(right) { + result = _positiveInf + goto end + } else { + result = _negativeInf + goto end + } + } + if math.IsInf(right, 0) { + if math.Signbit(left) == math.Signbit(right) { + result = _positiveZero + goto end + } else { + result = _negativeZero + goto end + } + } + if right == 0 { + if math.Signbit(left) == math.Signbit(right) { + result = _positiveInf + goto end + } else { + result = _negativeInf + goto end + } + } + + result = floatToValue(left / right) + +end: + vm.sp-- + vm.stack[vm.sp-1] = result + vm.pc++ +} + +type _mod struct{} + +var mod _mod + +func (_mod) exec(vm *vm) { + left := vm.stack[vm.sp-2] + right := vm.stack[vm.sp-1] + + var result Value + + if leftInt, ok := assertInt64(left); ok { + if rightInt, ok := assertInt64(right); ok { + if rightInt == 0 { + result = _NaN + goto end + } + r := leftInt % rightInt + if r == 0 && leftInt < 0 { + result = _negativeZero + } else { + result = intToValue(leftInt % rightInt) + } + goto end + } + } + + result = floatToValue(math.Mod(left.ToFloat(), right.ToFloat())) +end: + vm.sp-- + vm.stack[vm.sp-1] = result + vm.pc++ +} + +type _neg struct{} + +var neg _neg + +func (_neg) exec(vm *vm) { + operand := vm.stack[vm.sp-1] + + var result Value + + if i, ok := assertInt64(operand); ok { + if i == 0 { + result = _negativeZero + } else { + result = valueInt(-i) + } + } else { + f := operand.ToFloat() + if !math.IsNaN(f) { + f = -f + } + result = valueFloat(f) + } + + vm.stack[vm.sp-1] = result + vm.pc++ +} + +type _plus struct{} + +var plus _plus + +func (_plus) exec(vm *vm) { + vm.stack[vm.sp-1] = vm.stack[vm.sp-1].ToNumber() + vm.pc++ +} + +type _inc struct{} + +var inc _inc + +func (_inc) exec(vm *vm) { + v := vm.stack[vm.sp-1] + + if i, ok := assertInt64(v); ok { + v = intToValue(i + 1) + goto end + } + + v = valueFloat(v.ToFloat() + 1) + +end: + vm.stack[vm.sp-1] = v + vm.pc++ +} + +type _dec struct{} + +var dec _dec + +func (_dec) exec(vm *vm) { + v := vm.stack[vm.sp-1] + + if i, ok := assertInt64(v); ok { + v = intToValue(i - 1) + goto end + } + + v = valueFloat(v.ToFloat() - 1) + +end: + vm.stack[vm.sp-1] = v + vm.pc++ +} + +type _and struct{} + +var and _and + +func (_and) exec(vm *vm) { + left := toInt32(vm.stack[vm.sp-2]) + right := toInt32(vm.stack[vm.sp-1]) + vm.stack[vm.sp-2] = intToValue(int64(left & right)) + vm.sp-- + vm.pc++ +} + +type _or struct{} + +var or _or + +func (_or) exec(vm *vm) { + left := toInt32(vm.stack[vm.sp-2]) + right := toInt32(vm.stack[vm.sp-1]) + vm.stack[vm.sp-2] = intToValue(int64(left | right)) + vm.sp-- + vm.pc++ +} + +type _xor struct{} + +var xor _xor + +func (_xor) exec(vm *vm) { + left := toInt32(vm.stack[vm.sp-2]) + right := toInt32(vm.stack[vm.sp-1]) + vm.stack[vm.sp-2] = intToValue(int64(left ^ right)) + vm.sp-- + vm.pc++ +} + +type _bnot struct{} + +var bnot _bnot + +func (_bnot) exec(vm *vm) { + op := toInt32(vm.stack[vm.sp-1]) + vm.stack[vm.sp-1] = intToValue(int64(^op)) + vm.pc++ +} + +type _sal struct{} + +var sal _sal + +func (_sal) exec(vm *vm) { + left := toInt32(vm.stack[vm.sp-2]) + right := toUint32(vm.stack[vm.sp-1]) + vm.stack[vm.sp-2] = intToValue(int64(left << (right & 0x1F))) + vm.sp-- + vm.pc++ +} + +type _sar struct{} + +var sar _sar + +func (_sar) exec(vm *vm) { + left := toInt32(vm.stack[vm.sp-2]) + right := toUint32(vm.stack[vm.sp-1]) + vm.stack[vm.sp-2] = intToValue(int64(left >> (right & 0x1F))) + vm.sp-- + vm.pc++ +} + +type _shr struct{} + +var shr _shr + +func (_shr) exec(vm *vm) { + left := toUint32(vm.stack[vm.sp-2]) + right := toUint32(vm.stack[vm.sp-1]) + vm.stack[vm.sp-2] = intToValue(int64(left >> (right & 0x1F))) + vm.sp-- + vm.pc++ +} + +type _halt struct{} + +var halt _halt + +func (_halt) exec(vm *vm) { + vm.halt = true + vm.pc++ +} + +type jump int32 + +func (j jump) exec(vm *vm) { + vm.pc += int(j) +} + +type _toPropertyKey struct{} + +func (_toPropertyKey) exec(vm *vm) { + p := vm.sp - 1 + vm.stack[p] = toPropertyKey(vm.stack[p]) + vm.pc++ +} + +type _getElemRef struct{} + +var getElemRef _getElemRef + +func (_getElemRef) exec(vm *vm) { + obj := vm.stack[vm.sp-2].ToObject(vm.r) + propName := toPropertyKey(vm.stack[vm.sp-1]) + vm.refStack = append(vm.refStack, &objRef{ + base: obj.self, + name: propName.string(), + }) + vm.sp -= 2 + vm.pc++ +} + +type _getElemRefStrict struct{} + +var getElemRefStrict _getElemRefStrict + +func (_getElemRefStrict) exec(vm *vm) { + obj := vm.stack[vm.sp-2].ToObject(vm.r) + propName := toPropertyKey(vm.stack[vm.sp-1]) + vm.refStack = append(vm.refStack, &objRef{ + base: obj.self, + name: propName.string(), + strict: true, + }) + vm.sp -= 2 + vm.pc++ +} + +type _setElem struct{} + +var setElem _setElem + +func (_setElem) exec(vm *vm) { + obj := vm.stack[vm.sp-3].ToObject(vm.r) + propName := toPropertyKey(vm.stack[vm.sp-2]) + val := vm.stack[vm.sp-1] + + obj.setOwn(propName, val, false) + + vm.sp -= 2 + vm.stack[vm.sp-1] = val + vm.pc++ +} + +type _setElem1 struct{} + +var setElem1 _setElem1 + +func (_setElem1) exec(vm *vm) { + obj := vm.stack[vm.sp-3].ToObject(vm.r) + propName := vm.stack[vm.sp-2] + val := vm.stack[vm.sp-1] + + obj.setOwn(propName, val, true) + + vm.sp -= 2 + vm.pc++ +} + +type _setElem1Named struct{} + +var setElem1Named _setElem1Named + +func (_setElem1Named) exec(vm *vm) { + obj := vm.stack[vm.sp-3].ToObject(vm.r) + propName := vm.stack[vm.sp-2] + val := vm.stack[vm.sp-1] + vm.r.toObject(val).self.defineOwnPropertyStr("name", PropertyDescriptor{ + Value: propName, + Configurable: FLAG_TRUE, + }, true) + obj.setOwn(propName, val, true) + + vm.sp -= 2 + vm.pc++ +} + +type _setElemP struct{} + +var setElemP _setElemP + +func (_setElemP) exec(vm *vm) { + obj := vm.stack[vm.sp-3].ToObject(vm.r) + propName := toPropertyKey(vm.stack[vm.sp-2]) + val := vm.stack[vm.sp-1] + + obj.setOwn(propName, val, false) + + vm.sp -= 3 + vm.pc++ +} + +type _setElemStrict struct{} + +var setElemStrict _setElemStrict + +func (_setElemStrict) exec(vm *vm) { + obj := vm.r.toObject(vm.stack[vm.sp-3]) + propName := toPropertyKey(vm.stack[vm.sp-2]) + val := vm.stack[vm.sp-1] + + obj.setOwn(propName, val, true) + + vm.sp -= 2 + vm.stack[vm.sp-1] = val + vm.pc++ +} + +type _setElemStrictP struct{} + +var setElemStrictP _setElemStrictP + +func (_setElemStrictP) exec(vm *vm) { + obj := vm.r.toObject(vm.stack[vm.sp-3]) + propName := toPropertyKey(vm.stack[vm.sp-2]) + val := vm.stack[vm.sp-1] + + obj.setOwn(propName, val, true) + + vm.sp -= 3 + vm.pc++ +} + +type _deleteElem struct{} + +var deleteElem _deleteElem + +func (_deleteElem) exec(vm *vm) { + obj := vm.r.toObject(vm.stack[vm.sp-2]) + propName := toPropertyKey(vm.stack[vm.sp-1]) + if obj.delete(propName, false) { + vm.stack[vm.sp-2] = valueTrue + } else { + vm.stack[vm.sp-2] = valueFalse + } + vm.sp-- + vm.pc++ +} + +type _deleteElemStrict struct{} + +var deleteElemStrict _deleteElemStrict + +func (_deleteElemStrict) exec(vm *vm) { + obj := vm.r.toObject(vm.stack[vm.sp-2]) + propName := toPropertyKey(vm.stack[vm.sp-1]) + obj.delete(propName, true) + vm.stack[vm.sp-2] = valueTrue + vm.sp-- + vm.pc++ +} + +type deleteProp unistring.String + +func (d deleteProp) exec(vm *vm) { + obj := vm.r.toObject(vm.stack[vm.sp-1]) + if obj.self.deleteStr(unistring.String(d), false) { + vm.stack[vm.sp-1] = valueTrue + } else { + vm.stack[vm.sp-1] = valueFalse + } + vm.pc++ +} + +type deletePropStrict unistring.String + +func (d deletePropStrict) exec(vm *vm) { + obj := vm.r.toObject(vm.stack[vm.sp-1]) + obj.self.deleteStr(unistring.String(d), true) + vm.stack[vm.sp-1] = valueTrue + vm.pc++ +} + +type getPropRef unistring.String + +func (p getPropRef) exec(vm *vm) { + vm.refStack = append(vm.refStack, &objRef{ + base: vm.stack[vm.sp-1].ToObject(vm.r).self, + name: unistring.String(p), + }) + vm.sp-- + vm.pc++ +} + +type getPropRefStrict unistring.String + +func (p getPropRefStrict) exec(vm *vm) { + vm.refStack = append(vm.refStack, &objRef{ + base: vm.stack[vm.sp-1].ToObject(vm.r).self, + name: unistring.String(p), + strict: true, + }) + vm.sp-- + vm.pc++ +} + +type setProp unistring.String + +func (p setProp) exec(vm *vm) { + val := vm.stack[vm.sp-1] + vm.stack[vm.sp-2].ToObject(vm.r).self.setOwnStr(unistring.String(p), val, false) + vm.stack[vm.sp-2] = val + vm.sp-- + vm.pc++ +} + +type setPropP unistring.String + +func (p setPropP) exec(vm *vm) { + val := vm.stack[vm.sp-1] + vm.stack[vm.sp-2].ToObject(vm.r).self.setOwnStr(unistring.String(p), val, false) + vm.sp -= 2 + vm.pc++ +} + +type setPropStrict unistring.String + +func (p setPropStrict) exec(vm *vm) { + obj := vm.stack[vm.sp-2] + val := vm.stack[vm.sp-1] + + obj1 := vm.r.toObject(obj) + obj1.self.setOwnStr(unistring.String(p), val, true) + vm.stack[vm.sp-2] = val + vm.sp-- + vm.pc++ +} + +type setPropStrictP unistring.String + +func (p setPropStrictP) exec(vm *vm) { + obj := vm.r.toObject(vm.stack[vm.sp-2]) + val := vm.stack[vm.sp-1] + + obj.self.setOwnStr(unistring.String(p), val, true) + vm.sp -= 2 + vm.pc++ +} + +type setProp1 unistring.String + +func (p setProp1) exec(vm *vm) { + vm.r.toObject(vm.stack[vm.sp-2]).self._putProp(unistring.String(p), vm.stack[vm.sp-1], true, true, true) + + vm.sp-- + vm.pc++ +} + +type _setProto struct{} + +var setProto _setProto + +func (_setProto) exec(vm *vm) { + vm.r.toObject(vm.stack[vm.sp-2]).self.setProto(vm.r.toProto(vm.stack[vm.sp-1]), true) + + vm.sp-- + vm.pc++ +} + +type setPropGetter unistring.String + +func (s setPropGetter) exec(vm *vm) { + obj := vm.r.toObject(vm.stack[vm.sp-2]) + val := vm.stack[vm.sp-1] + vm.r.toObject(val).self.defineOwnPropertyStr("name", PropertyDescriptor{ + Value: asciiString("get ").concat(stringValueFromRaw(val.string())), + Configurable: FLAG_TRUE, + }, true) + + descr := PropertyDescriptor{ + Getter: val, + Configurable: FLAG_TRUE, + Enumerable: FLAG_TRUE, + } + + obj.self.defineOwnPropertyStr(unistring.String(s), descr, false) + + vm.sp-- + vm.pc++ +} + +type setPropSetter unistring.String + +func (s setPropSetter) exec(vm *vm) { + obj := vm.r.toObject(vm.stack[vm.sp-2]) + val := vm.stack[vm.sp-1] + + vm.r.toObject(val).self.defineOwnPropertyStr("name", PropertyDescriptor{ + Value: asciiString("set ").concat(stringValueFromRaw(val.string())), + Configurable: FLAG_TRUE, + }, true) + + descr := PropertyDescriptor{ + Setter: val, + Configurable: FLAG_TRUE, + Enumerable: FLAG_TRUE, + } + + obj.self.defineOwnPropertyStr(unistring.String(s), descr, false) + + vm.sp-- + vm.pc++ +} + +type _setPropGetter1 struct{} + +var setPropGetter1 _setPropGetter1 + +func (s _setPropGetter1) exec(vm *vm) { + obj := vm.r.toObject(vm.stack[vm.sp-3]) + propName := vm.stack[vm.sp-2] + val := vm.stack[vm.sp-1] + vm.r.toObject(val).self.defineOwnPropertyStr("name", PropertyDescriptor{ + Value: asciiString("get ").concat(stringValueFromRaw(val.string())), + Configurable: FLAG_TRUE, + }, true) + + descr := PropertyDescriptor{ + Getter: val, + Configurable: FLAG_TRUE, + Enumerable: FLAG_TRUE, + } + + obj.defineOwnProperty(propName, descr, false) + + vm.sp -= 2 + vm.pc++ +} + +type _setPropSetter1 struct{} + +var setPropSetter1 _setPropSetter1 + +func (s _setPropSetter1) exec(vm *vm) { + obj := vm.r.toObject(vm.stack[vm.sp-3]) + propName := vm.stack[vm.sp-2] + val := vm.stack[vm.sp-1] + + vm.r.toObject(val).self.defineOwnPropertyStr("name", PropertyDescriptor{ + Value: asciiString("set ").concat(stringValueFromRaw(val.string())), + Configurable: FLAG_TRUE, + }, true) + + descr := PropertyDescriptor{ + Setter: val, + Configurable: FLAG_TRUE, + Enumerable: FLAG_TRUE, + } + + obj.defineOwnProperty(propName, descr, false) + + vm.sp -= 2 + vm.pc++ +} + +type getProp unistring.String + +func (g getProp) exec(vm *vm) { + v := vm.stack[vm.sp-1] + obj := v.baseObject(vm.r) + if obj == nil { + panic(vm.r.NewTypeError("Cannot read property '%s' of undefined", g)) + } + vm.stack[vm.sp-1] = nilSafe(obj.self.getStr(unistring.String(g), v)) + + vm.pc++ +} + +type getPropCallee unistring.String + +func (g getPropCallee) exec(vm *vm) { + v := vm.stack[vm.sp-1] + obj := v.baseObject(vm.r) + n := unistring.String(g) + if obj == nil { + panic(vm.r.NewTypeError("Cannot read property '%s' of undefined or null", n)) + } + prop := obj.self.getStr(n, v) + if prop == nil { + prop = memberUnresolved{valueUnresolved{r: vm.r, ref: n}} + } + vm.stack[vm.sp-1] = prop + + vm.pc++ +} + +type _getElem struct{} + +var getElem _getElem + +func (_getElem) exec(vm *vm) { + v := vm.stack[vm.sp-2] + obj := v.baseObject(vm.r) + propName := toPropertyKey(vm.stack[vm.sp-1]) + if obj == nil { + panic(vm.r.NewTypeError("Cannot read property '%s' of undefined", propName.String())) + } + + vm.stack[vm.sp-2] = nilSafe(obj.get(propName, v)) + + vm.sp-- + vm.pc++ +} + +type _getKey struct{} + +var getKey _getKey + +func (_getKey) exec(vm *vm) { + v := vm.stack[vm.sp-2] + obj := v.baseObject(vm.r) + propName := vm.stack[vm.sp-1] + if obj == nil { + panic(vm.r.NewTypeError("Cannot read property '%s' of undefined", propName.String())) + } + + vm.stack[vm.sp-2] = nilSafe(obj.get(propName, v)) + + vm.sp-- + vm.pc++ +} + +type _getElemCallee struct{} + +var getElemCallee _getElemCallee + +func (_getElemCallee) exec(vm *vm) { + v := vm.stack[vm.sp-2] + obj := v.baseObject(vm.r) + propName := toPropertyKey(vm.stack[vm.sp-1]) + if obj == nil { + panic(vm.r.NewTypeError("Cannot read property '%s' of undefined", propName.String())) + } + + prop := obj.get(propName, v) + if prop == nil { + prop = memberUnresolved{valueUnresolved{r: vm.r, ref: propName.string()}} + } + vm.stack[vm.sp-2] = prop + + vm.sp-- + vm.pc++ +} + +type _dup struct{} + +var dup _dup + +func (_dup) exec(vm *vm) { + vm.push(vm.stack[vm.sp-1]) + vm.pc++ +} + +type dupN uint32 + +func (d dupN) exec(vm *vm) { + vm.push(vm.stack[vm.sp-1-int(d)]) + vm.pc++ +} + +type rdupN uint32 + +func (d rdupN) exec(vm *vm) { + vm.stack[vm.sp-1-int(d)] = vm.stack[vm.sp-1] + vm.pc++ +} + +type _newObject struct{} + +var newObject _newObject + +func (_newObject) exec(vm *vm) { + vm.push(vm.r.NewObject()) + vm.pc++ +} + +type newArray uint32 + +func (l newArray) exec(vm *vm) { + values := make([]Value, 0, l) + vm.push(vm.r.newArrayValues(values)) + vm.pc++ +} + +type _pushArrayItem struct{} + +var pushArrayItem _pushArrayItem + +func (_pushArrayItem) exec(vm *vm) { + arr := vm.stack[vm.sp-2].(*Object).self.(*arrayObject) + if arr.length < math.MaxUint32 { + arr.length++ + } else { + panic(vm.r.newError(vm.r.global.RangeError, "Invalid array length")) + } + val := vm.stack[vm.sp-1] + arr.values = append(arr.values, val) + if val != nil { + arr.objCount++ + } + vm.sp-- + vm.pc++ +} + +type _pushArraySpread struct{} + +var pushArraySpread _pushArraySpread + +func (_pushArraySpread) exec(vm *vm) { + arr := vm.stack[vm.sp-2].(*Object).self.(*arrayObject) + vm.r.iterate(vm.r.getIterator(vm.stack[vm.sp-1], nil), func(val Value) { + if arr.length < math.MaxUint32 { + arr.length++ + } else { + panic(vm.r.newError(vm.r.global.RangeError, "Invalid array length")) + } + arr.values = append(arr.values, val) + arr.objCount++ + }) + vm.sp-- + vm.pc++ +} + +type _pushSpread struct{} + +var pushSpread _pushSpread + +func (_pushSpread) exec(vm *vm) { + vm.sp-- + obj := vm.stack[vm.sp] + vm.r.iterate(vm.r.getIterator(obj, nil), func(val Value) { + vm.push(val) + }) + vm.pc++ +} + +type _newArrayFromIter struct{} + +var newArrayFromIter _newArrayFromIter + +func (_newArrayFromIter) exec(vm *vm) { + var values []Value + l := len(vm.iterStack) - 1 + iter := vm.iterStack[l].iter + vm.iterStack[l] = iterStackItem{} + vm.iterStack = vm.iterStack[:l] + vm.r.iterate(iter, func(val Value) { + values = append(values, val) + }) + vm.push(vm.r.newArrayValues(values)) + vm.pc++ +} + +type newRegexp struct { + pattern *regexpPattern + src valueString +} + +func (n *newRegexp) exec(vm *vm) { + vm.push(vm.r.newRegExpp(n.pattern.clone(), n.src, vm.r.global.RegExpPrototype).val) + vm.pc++ +} + +func (vm *vm) setLocalLex(s int) { + v := vm.stack[vm.sp-1] + level := s >> 24 + idx := uint32(s & 0x00FFFFFF) + stash := vm.stash + for i := 0; i < level; i++ { + stash = stash.outer + } + p := &stash.values[idx] + if *p == nil { + panic(errAccessBeforeInit) + } + *p = v + vm.pc++ +} + +func (vm *vm) initLocal(s int) { + v := vm.stack[vm.sp-1] + level := s >> 24 + idx := uint32(s & 0x00FFFFFF) + stash := vm.stash + for i := 0; i < level; i++ { + stash = stash.outer + } + stash.initByIdx(idx, v) + vm.pc++ +} + +type storeStash uint32 + +func (s storeStash) exec(vm *vm) { + vm.initLocal(int(s)) +} + +type storeStashP uint32 + +func (s storeStashP) exec(vm *vm) { + vm.initLocal(int(s)) + vm.sp-- +} + +type storeStashLex uint32 + +func (s storeStashLex) exec(vm *vm) { + vm.setLocalLex(int(s)) +} + +type storeStashLexP uint32 + +func (s storeStashLexP) exec(vm *vm) { + vm.setLocalLex(int(s)) + vm.sp-- +} + +type initStash uint32 + +func (s initStash) exec(vm *vm) { + vm.initLocal(int(s)) + vm.sp-- +} + +type initGlobal unistring.String + +func (s initGlobal) exec(vm *vm) { + vm.sp-- + vm.r.global.stash.initByName(unistring.String(s), vm.stack[vm.sp]) + vm.pc++ +} + +type resolveVar1 unistring.String + +func (s resolveVar1) exec(vm *vm) { + name := unistring.String(s) + var ref ref + for stash := vm.stash; stash != nil; stash = stash.outer { + ref = stash.getRefByName(name, false) + if ref != nil { + goto end + } + } + + ref = &objRef{ + base: vm.r.globalObject.self, + name: name, + } + +end: + vm.refStack = append(vm.refStack, ref) + vm.pc++ +} + +type deleteVar unistring.String + +func (d deleteVar) exec(vm *vm) { + name := unistring.String(d) + ret := true + for stash := vm.stash; stash != nil; stash = stash.outer { + if stash.obj != nil { + if stashObjHas(stash.obj, name) { + ret = stash.obj.self.deleteStr(name, false) + goto end + } + } else { + if idx, exists := stash.names[name]; exists { + if idx&(maskVar|maskDeletable) == maskVar|maskDeletable { + stash.deleteBinding(name) + } else { + ret = false + } + goto end + } + } + } + + if vm.r.globalObject.self.hasPropertyStr(name) { + ret = vm.r.globalObject.self.deleteStr(name, false) + } + +end: + if ret { + vm.push(valueTrue) + } else { + vm.push(valueFalse) + } + vm.pc++ +} + +type deleteGlobal unistring.String + +func (d deleteGlobal) exec(vm *vm) { + name := unistring.String(d) + var ret bool + if vm.r.globalObject.self.hasPropertyStr(name) { + ret = vm.r.globalObject.self.deleteStr(name, false) + if ret { + delete(vm.r.global.varNames, name) + } + } else { + ret = true + } + if ret { + vm.push(valueTrue) + } else { + vm.push(valueFalse) + } + vm.pc++ +} + +type resolveVar1Strict unistring.String + +func (s resolveVar1Strict) exec(vm *vm) { + name := unistring.String(s) + var ref ref + for stash := vm.stash; stash != nil; stash = stash.outer { + ref = stash.getRefByName(name, true) + if ref != nil { + goto end + } + } + + if vm.r.globalObject.self.hasPropertyStr(name) { + ref = &objRef{ + base: vm.r.globalObject.self, + name: name, + strict: true, + } + goto end + } + + ref = &unresolvedRef{ + runtime: vm.r, + name: name, + } + +end: + vm.refStack = append(vm.refStack, ref) + vm.pc++ +} + +type setGlobal unistring.String + +func (s setGlobal) exec(vm *vm) { + vm.r.setGlobal(unistring.String(s), vm.peek(), false) + vm.pc++ +} + +type setGlobalStrict unistring.String + +func (s setGlobalStrict) exec(vm *vm) { + vm.r.setGlobal(unistring.String(s), vm.peek(), true) + vm.pc++ +} + +// Load a var from stash +type loadStash uint32 + +func (g loadStash) exec(vm *vm) { + level := int(g >> 24) + idx := uint32(g & 0x00FFFFFF) + stash := vm.stash + for i := 0; i < level; i++ { + stash = stash.outer + } + + vm.push(nilSafe(stash.getByIdx(idx))) + vm.pc++ +} + +// Load a lexical binding from stash +type loadStashLex uint32 + +func (g loadStashLex) exec(vm *vm) { + level := int(g >> 24) + idx := uint32(g & 0x00FFFFFF) + stash := vm.stash + for i := 0; i < level; i++ { + stash = stash.outer + } + + v := stash.getByIdx(idx) + if v == nil { + panic(errAccessBeforeInit) + } + vm.push(v) + vm.pc++ +} + +// scan dynamic stashes up to the given level (encoded as 8 most significant bits of idx), if not found +// return the indexed var binding value from stash +type loadMixed struct { + name unistring.String + idx uint32 + callee bool +} + +func (g *loadMixed) exec(vm *vm) { + level := int(g.idx >> 24) + idx := g.idx & 0x00FFFFFF + stash := vm.stash + name := g.name + for i := 0; i < level; i++ { + if v, found := stash.getByName(name); found { + if g.callee { + if stash.obj != nil { + vm.push(stash.obj) + } else { + vm.push(_undefined) + } + } + vm.push(v) + goto end + } + stash = stash.outer + } + if g.callee { + vm.push(_undefined) + } + if stash != nil { + vm.push(nilSafe(stash.getByIdx(idx))) + } +end: + vm.pc++ +} + +// scan dynamic stashes up to the given level (encoded as 8 most significant bits of idx), if not found +// return the indexed lexical binding value from stash +type loadMixedLex loadMixed + +func (g *loadMixedLex) exec(vm *vm) { + level := int(g.idx >> 24) + idx := g.idx & 0x00FFFFFF + stash := vm.stash + name := g.name + for i := 0; i < level; i++ { + if v, found := stash.getByName(name); found { + if g.callee { + if stash.obj != nil { + vm.push(stash.obj) + } else { + vm.push(_undefined) + } + } + vm.push(v) + goto end + } + stash = stash.outer + } + if g.callee { + vm.push(_undefined) + } + if stash != nil { + v := stash.getByIdx(idx) + if v == nil { + panic(errAccessBeforeInit) + } + vm.push(v) + } +end: + vm.pc++ +} + +// scan dynamic stashes up to the given level (encoded as 8 most significant bits of idx), if not found +// return the indexed var binding value from stack +type loadMixedStack struct { + name unistring.String + idx int + level uint8 + callee bool +} + +// same as loadMixedStack, but the args have been moved to stash (therefore stack layout is different) +type loadMixedStack1 loadMixedStack + +func (g *loadMixedStack) exec(vm *vm) { + stash := vm.stash + name := g.name + level := int(g.level) + for i := 0; i < level; i++ { + if v, found := stash.getByName(name); found { + if g.callee { + if stash.obj != nil { + vm.push(stash.obj) + } else { + vm.push(_undefined) + } + } + vm.push(v) + goto end + } + stash = stash.outer + } + if g.callee { + vm.push(_undefined) + } + loadStack(g.idx).exec(vm) + return +end: + vm.pc++ +} + +func (g *loadMixedStack1) exec(vm *vm) { + stash := vm.stash + name := g.name + level := int(g.level) + for i := 0; i < level; i++ { + if v, found := stash.getByName(name); found { + if g.callee { + if stash.obj != nil { + vm.push(stash.obj) + } else { + vm.push(_undefined) + } + } + vm.push(v) + goto end + } + stash = stash.outer + } + if g.callee { + vm.push(_undefined) + } + loadStack1(g.idx).exec(vm) + return +end: + vm.pc++ +} + +type loadMixedStackLex loadMixedStack + +// same as loadMixedStackLex but when the arguments have been moved into stash +type loadMixedStack1Lex loadMixedStack + +func (g *loadMixedStackLex) exec(vm *vm) { + stash := vm.stash + name := g.name + level := int(g.level) + for i := 0; i < level; i++ { + if v, found := stash.getByName(name); found { + if g.callee { + if stash.obj != nil { + vm.push(stash.obj) + } else { + vm.push(_undefined) + } + } + vm.push(v) + goto end + } + stash = stash.outer + } + if g.callee { + vm.push(_undefined) + } + loadStackLex(g.idx).exec(vm) + return +end: + vm.pc++ +} + +func (g *loadMixedStack1Lex) exec(vm *vm) { + stash := vm.stash + name := g.name + level := int(g.level) + for i := 0; i < level; i++ { + if v, found := stash.getByName(name); found { + if g.callee { + if stash.obj != nil { + vm.push(stash.obj) + } else { + vm.push(_undefined) + } + } + vm.push(v) + goto end + } + stash = stash.outer + } + if g.callee { + vm.push(_undefined) + } + loadStack1Lex(g.idx).exec(vm) + return +end: + vm.pc++ +} + +type resolveMixed struct { + name unistring.String + idx uint32 + typ varType + strict bool +} + +func newStashRef(typ varType, name unistring.String, v *[]Value, idx int) ref { + switch typ { + case varTypeVar: + return &stashRef{ + n: name, + v: v, + idx: idx, + } + case varTypeLet: + return &stashRefLex{ + stashRef: stashRef{ + n: name, + v: v, + idx: idx, + }, + } + case varTypeConst, varTypeStrictConst: + return &stashRefConst{ + stashRefLex: stashRefLex{ + stashRef: stashRef{ + n: name, + v: v, + idx: idx, + }, + }, + strictConst: typ == varTypeStrictConst, + } + } + panic("unsupported var type") +} + +func (r *resolveMixed) exec(vm *vm) { + level := int(r.idx >> 24) + idx := r.idx & 0x00FFFFFF + stash := vm.stash + var ref ref + for i := 0; i < level; i++ { + ref = stash.getRefByName(r.name, r.strict) + if ref != nil { + goto end + } + stash = stash.outer + } + + if stash != nil { + ref = newStashRef(r.typ, r.name, &stash.values, int(idx)) + goto end + } + + ref = &unresolvedRef{ + runtime: vm.r, + name: r.name, + } + +end: + vm.refStack = append(vm.refStack, ref) + vm.pc++ +} + +type resolveMixedStack struct { + name unistring.String + idx int + typ varType + level uint8 + strict bool +} + +type resolveMixedStack1 resolveMixedStack + +func (r *resolveMixedStack) exec(vm *vm) { + level := int(r.level) + stash := vm.stash + var ref ref + var idx int + for i := 0; i < level; i++ { + ref = stash.getRefByName(r.name, r.strict) + if ref != nil { + goto end + } + stash = stash.outer + } + + if r.idx > 0 { + idx = vm.sb + vm.args + r.idx + } else { + idx = vm.sb + r.idx + } + + ref = newStashRef(r.typ, r.name, (*[]Value)(&vm.stack), idx) + +end: + vm.refStack = append(vm.refStack, ref) + vm.pc++ +} + +func (r *resolveMixedStack1) exec(vm *vm) { + level := int(r.level) + stash := vm.stash + var ref ref + for i := 0; i < level; i++ { + ref = stash.getRefByName(r.name, r.strict) + if ref != nil { + goto end + } + stash = stash.outer + } + + ref = newStashRef(r.typ, r.name, (*[]Value)(&vm.stack), vm.sb+r.idx) + +end: + vm.refStack = append(vm.refStack, ref) + vm.pc++ +} + +type _getValue struct{} + +var getValue _getValue + +func (_getValue) exec(vm *vm) { + ref := vm.refStack[len(vm.refStack)-1] + if v := ref.get(); v != nil { + vm.push(v) + } else { + vm.r.throwReferenceError(ref.refname()) + panic("Unreachable") + } + vm.pc++ +} + +type _putValue struct{} + +var putValue _putValue + +func (_putValue) exec(vm *vm) { + l := len(vm.refStack) - 1 + ref := vm.refStack[l] + vm.refStack[l] = nil + vm.refStack = vm.refStack[:l] + ref.set(vm.stack[vm.sp-1]) + vm.pc++ +} + +type _putValueP struct{} + +var putValueP _putValueP + +func (_putValueP) exec(vm *vm) { + l := len(vm.refStack) - 1 + ref := vm.refStack[l] + vm.refStack[l] = nil + vm.refStack = vm.refStack[:l] + ref.set(vm.stack[vm.sp-1]) + vm.sp-- + vm.pc++ +} + +type loadDynamic unistring.String + +func (n loadDynamic) exec(vm *vm) { + name := unistring.String(n) + var val Value + for stash := vm.stash; stash != nil; stash = stash.outer { + if v, exists := stash.getByName(name); exists { + val = v + break + } + } + if val == nil { + val = vm.r.globalObject.self.getStr(name, nil) + if val == nil { + vm.r.throwReferenceError(name) + } + } + vm.push(val) + vm.pc++ +} + +type loadDynamicRef unistring.String + +func (n loadDynamicRef) exec(vm *vm) { + name := unistring.String(n) + var val Value + for stash := vm.stash; stash != nil; stash = stash.outer { + if v, exists := stash.getByName(name); exists { + val = v + break + } + } + if val == nil { + val = vm.r.globalObject.self.getStr(name, nil) + if val == nil { + val = valueUnresolved{r: vm.r, ref: name} + } + } + vm.push(val) + vm.pc++ +} + +type loadDynamicCallee unistring.String + +func (n loadDynamicCallee) exec(vm *vm) { + name := unistring.String(n) + var val Value + var callee *Object + for stash := vm.stash; stash != nil; stash = stash.outer { + if v, exists := stash.getByName(name); exists { + callee = stash.obj + val = v + break + } + } + if val == nil { + val = vm.r.globalObject.self.getStr(name, nil) + if val == nil { + val = valueUnresolved{r: vm.r, ref: name} + } + } + if callee != nil { + vm.push(callee) + } else { + vm.push(_undefined) + } + vm.push(val) + vm.pc++ +} + +type _pop struct{} + +var pop _pop + +func (_pop) exec(vm *vm) { + vm.sp-- + vm.pc++ +} + +func (vm *vm) callEval(n int, strict bool) { + if vm.r.toObject(vm.stack[vm.sp-n-1]) == vm.r.global.Eval { + if n > 0 { + srcVal := vm.stack[vm.sp-n] + if src, ok := srcVal.(valueString); ok { + var this Value + if vm.sb >= 0 { + this = vm.stack[vm.sb] + } else { + this = vm.r.globalObject + } + ret := vm.r.eval(src, true, strict, this) + vm.stack[vm.sp-n-2] = ret + } else { + vm.stack[vm.sp-n-2] = srcVal + } + } else { + vm.stack[vm.sp-n-2] = _undefined + } + + vm.sp -= n + 1 + vm.pc++ + } else { + call(n).exec(vm) + } +} + +type callEval uint32 + +func (numargs callEval) exec(vm *vm) { + vm.callEval(int(numargs), false) +} + +type callEvalStrict uint32 + +func (numargs callEvalStrict) exec(vm *vm) { + vm.callEval(int(numargs), true) +} + +type _callEvalVariadic struct{} + +var callEvalVariadic _callEvalVariadic + +func (_callEvalVariadic) exec(vm *vm) { + vm.callEval(vm.countVariadicArgs()-2, false) +} + +type _callEvalVariadicStrict struct{} + +var callEvalVariadicStrict _callEvalVariadicStrict + +func (_callEvalVariadicStrict) exec(vm *vm) { + vm.callEval(vm.countVariadicArgs()-2, true) +} + +type _boxThis struct{} + +var boxThis _boxThis + +func (_boxThis) exec(vm *vm) { + v := vm.stack[vm.sb] + if v == _undefined || v == _null { + vm.stack[vm.sb] = vm.r.globalObject + } else { + vm.stack[vm.sb] = v.ToObject(vm.r) + } + vm.pc++ +} + +var variadicMarker Value = newSymbol(asciiString("[variadic marker]")) + +type _startVariadic struct{} + +var startVariadic _startVariadic + +func (_startVariadic) exec(vm *vm) { + vm.push(variadicMarker) + vm.pc++ +} + +type _callVariadic struct{} + +var callVariadic _callVariadic + +func (vm *vm) countVariadicArgs() int { + count := 0 + for i := vm.sp - 1; i >= 0; i-- { + if vm.stack[i] == variadicMarker { + return count + } + count++ + } + panic("Variadic marker was not found. Compiler bug.") +} + +func (_callVariadic) exec(vm *vm) { + call(vm.countVariadicArgs() - 2).exec(vm) +} + +type _endVariadic struct{} + +var endVariadic _endVariadic + +func (_endVariadic) exec(vm *vm) { + vm.sp-- + vm.stack[vm.sp-1] = vm.stack[vm.sp] + vm.pc++ +} + +type call uint32 + +func (numargs call) exec(vm *vm) { + // this + // callee + // arg0 + // ... + // arg + n := int(numargs) + v := vm.stack[vm.sp-n-1] // callee + obj := vm.toCallee(v) +repeat: + switch f := obj.self.(type) { + case *funcObject: + vm.pc++ + vm.pushCtx() + vm.args = n + vm.prg = f.prg + vm.stash = f.stash + vm.pc = 0 + vm.stack[vm.sp-n-1], vm.stack[vm.sp-n-2] = vm.stack[vm.sp-n-2], vm.stack[vm.sp-n-1] + return + case *nativeFuncObject: + vm._nativeCall(f, n) + case *boundFuncObject: + vm._nativeCall(&f.nativeFuncObject, n) + case *proxyObject: + vm.pushCtx() + vm.prg = nil + vm.funcName = "proxy" + ret := f.apply(FunctionCall{This: vm.stack[vm.sp-n-2], Arguments: vm.stack[vm.sp-n : vm.sp]}) + if ret == nil { + ret = _undefined + } + vm.stack[vm.sp-n-2] = ret + vm.popCtx() + vm.sp -= n + 1 + vm.pc++ + case *lazyObject: + obj.self = f.create(obj) + goto repeat + default: + vm.r.typeErrorResult(true, "Not a function: %s", obj.toString()) + } +} + +func (vm *vm) _nativeCall(f *nativeFuncObject, n int) { + if f.f != nil { + vm.pushCtx() + vm.prg = nil + vm.funcName = nilSafe(f.getStr("name", nil)).string() + ret := f.f(FunctionCall{ + Arguments: vm.stack[vm.sp-n : vm.sp], + This: vm.stack[vm.sp-n-2], + }) + if ret == nil { + ret = _undefined + } + vm.stack[vm.sp-n-2] = ret + vm.popCtx() + } else { + vm.stack[vm.sp-n-2] = _undefined + } + vm.sp -= n + 1 + vm.pc++ +} + +func (vm *vm) clearStack() { + sp := vm.sp + stackTail := vm.stack[sp:] + for i := range stackTail { + stackTail[i] = nil + } + vm.stack = vm.stack[:sp] +} + +type enterBlock struct { + names map[unistring.String]uint32 + stashSize uint32 + stackSize uint32 +} + +func (e *enterBlock) exec(vm *vm) { + if e.stashSize > 0 { + vm.newStash() + vm.stash.values = make([]Value, e.stashSize) + if len(e.names) > 0 { + vm.stash.names = e.names + } + } + ss := int(e.stackSize) + vm.stack.expand(vm.sp + ss - 1) + vv := vm.stack[vm.sp : vm.sp+ss] + for i := range vv { + vv[i] = nil + } + vm.sp += ss + vm.pc++ +} + +type enterCatchBlock struct { + names map[unistring.String]uint32 + stashSize uint32 + stackSize uint32 +} + +func (e *enterCatchBlock) exec(vm *vm) { + vm.newStash() + vm.stash.values = make([]Value, e.stashSize) + if len(e.names) > 0 { + vm.stash.names = e.names + } + vm.sp-- + vm.stash.values[0] = vm.stack[vm.sp] + ss := int(e.stackSize) + vm.stack.expand(vm.sp + ss - 1) + vv := vm.stack[vm.sp : vm.sp+ss] + for i := range vv { + vv[i] = nil + } + vm.sp += ss + vm.pc++ +} + +type leaveBlock struct { + stackSize uint32 + popStash bool +} + +func (l *leaveBlock) exec(vm *vm) { + if l.popStash { + vm.stash = vm.stash.outer + } + if ss := l.stackSize; ss > 0 { + vm.sp -= int(ss) + } + vm.pc++ +} + +type enterFunc struct { + names map[unistring.String]uint32 + stashSize uint32 + stackSize uint32 + numArgs uint32 + argsToStash bool + extensible bool +} + +func (e *enterFunc) exec(vm *vm) { + // Input stack: + // + // callee + // this + // arg0 + // ... + // argN + // <- sp + + // Output stack: + // + // this <- sb + // + // <- sp + sp := vm.sp + vm.sb = sp - vm.args - 1 + vm.newStash() + stash := vm.stash + stash.variable = true + stash.values = make([]Value, e.stashSize) + if len(e.names) > 0 { + if e.extensible { + m := make(map[unistring.String]uint32, len(e.names)) + for name, idx := range e.names { + m[name] = idx + } + stash.names = m + } else { + stash.names = e.names + } + } + + ss := int(e.stackSize) + ea := 0 + if e.argsToStash { + offset := vm.args - int(e.numArgs) + copy(stash.values, vm.stack[sp-vm.args:sp]) + if offset > 0 { + vm.stash.extraArgs = make([]Value, offset) + copy(stash.extraArgs, vm.stack[sp-offset:]) + } else { + vv := stash.values[vm.args:e.numArgs] + for i := range vv { + vv[i] = _undefined + } + } + sp -= vm.args + } else { + d := int(e.numArgs) - vm.args + if d > 0 { + ss += d + ea = d + vm.args = int(e.numArgs) + } + } + vm.stack.expand(sp + ss - 1) + if ea > 0 { + vv := vm.stack[sp : vm.sp+ea] + for i := range vv { + vv[i] = _undefined + } + } + vv := vm.stack[sp+ea : sp+ss] + for i := range vv { + vv[i] = nil + } + vm.sp = sp + ss + vm.pc++ +} + +// Similar to enterFunc, but for when arguments may be accessed before they are initialised, +// e.g. by an eval() code or from a closure, or from an earlier initialiser code. +// In this case the arguments remain on stack, first argsToCopy of them are copied to the stash. +type enterFunc1 struct { + names map[unistring.String]uint32 + stashSize uint32 + numArgs uint32 + argsToCopy uint32 + extensible bool +} + +func (e *enterFunc1) exec(vm *vm) { + sp := vm.sp + vm.sb = sp - vm.args - 1 + vm.newStash() + stash := vm.stash + stash.variable = true + stash.values = make([]Value, e.stashSize) + if len(e.names) > 0 { + if e.extensible { + m := make(map[unistring.String]uint32, len(e.names)) + for name, idx := range e.names { + m[name] = idx + } + stash.names = m + } else { + stash.names = e.names + } + } + offset := vm.args - int(e.argsToCopy) + if offset > 0 { + copy(stash.values, vm.stack[sp-vm.args:sp-offset]) + if offset := vm.args - int(e.numArgs); offset > 0 { + vm.stash.extraArgs = make([]Value, offset) + copy(stash.extraArgs, vm.stack[sp-offset:]) + } + } else { + copy(stash.values, vm.stack[sp-vm.args:sp]) + if int(e.argsToCopy) > vm.args { + vv := stash.values[vm.args:e.argsToCopy] + for i := range vv { + vv[i] = _undefined + } + } + } + + vm.pc++ +} + +// Finalises the initialisers section and starts the function body which has its own +// scope. When used in conjunction with enterFunc1 adjustStack is set to true which +// causes the arguments to be removed from the stack. +type enterFuncBody struct { + enterBlock + extensible bool + adjustStack bool +} + +func (e *enterFuncBody) exec(vm *vm) { + if e.stashSize > 0 || e.extensible { + vm.newStash() + stash := vm.stash + stash.variable = true + stash.values = make([]Value, e.stashSize) + if len(e.names) > 0 { + if e.extensible { + m := make(map[unistring.String]uint32, len(e.names)) + for name, idx := range e.names { + m[name] = idx + } + stash.names = m + } else { + stash.names = e.names + } + } + } + sp := vm.sp + if e.adjustStack { + sp -= vm.args + } + nsp := sp + int(e.stackSize) + if e.stackSize > 0 { + vm.stack.expand(nsp - 1) + vv := vm.stack[sp:nsp] + for i := range vv { + vv[i] = nil + } + } + vm.sp = nsp + vm.pc++ +} + +type _ret struct{} + +var ret _ret + +func (_ret) exec(vm *vm) { + // callee -3 + // this -2 <- sb + // retval -1 + + vm.stack[vm.sb-1] = vm.stack[vm.sp-1] + vm.sp = vm.sb + vm.popCtx() + if vm.pc < 0 { + vm.halt = true + } +} + +type enterFuncStashless struct { + stackSize uint32 + args uint32 +} + +func (e *enterFuncStashless) exec(vm *vm) { + sp := vm.sp + vm.sb = sp - vm.args - 1 + d := int(e.args) - vm.args + if d > 0 { + ss := sp + int(e.stackSize) + d + vm.stack.expand(ss) + vv := vm.stack[sp : sp+d] + for i := range vv { + vv[i] = _undefined + } + vv = vm.stack[sp+d : ss] + for i := range vv { + vv[i] = nil + } + vm.args = int(e.args) + vm.sp = ss + } else { + if e.stackSize > 0 { + ss := sp + int(e.stackSize) + vm.stack.expand(ss) + vv := vm.stack[sp:ss] + for i := range vv { + vv[i] = nil + } + vm.sp = ss + } + } + vm.pc++ +} + +type newFunc struct { + prg *Program + name unistring.String + length uint32 + strict bool + + srcStart, srcEnd uint32 +} + +func (n *newFunc) exec(vm *vm) { + obj := vm.r.newFunc(n.name, int(n.length), n.strict) + obj.prg = n.prg + obj.stash = vm.stash + obj.src = n.prg.src.Source()[n.srcStart:n.srcEnd] + vm.push(obj.val) + vm.pc++ +} + +func (vm *vm) alreadyDeclared(name unistring.String) Value { + return vm.r.newError(vm.r.global.SyntaxError, "Identifier '%s' has already been declared", name) +} + +func (vm *vm) checkBindVarsGlobal(names []unistring.String) { + o := vm.r.globalObject.self + sn := vm.r.global.stash.names + if o, ok := o.(*baseObject); ok { + // shortcut + for _, name := range names { + if !o.hasOwnPropertyStr(name) && !o.extensible { + panic(vm.r.NewTypeError("Cannot define global variable '%s', global object is not extensible", name)) + } + if _, exists := sn[name]; exists { + panic(vm.alreadyDeclared(name)) + } + } + } else { + for _, name := range names { + if !o.hasOwnPropertyStr(name) && !o.isExtensible() { + panic(vm.r.NewTypeError("Cannot define global variable '%s', global object is not extensible", name)) + } + if _, exists := sn[name]; exists { + panic(vm.alreadyDeclared(name)) + } + } + } +} + +func (vm *vm) createGlobalVarBindings(names []unistring.String, d bool) { + globalVarNames := vm.r.global.varNames + if globalVarNames == nil { + globalVarNames = make(map[unistring.String]struct{}) + vm.r.global.varNames = globalVarNames + } + o := vm.r.globalObject.self + if o, ok := o.(*baseObject); ok { + for _, name := range names { + if !o.hasOwnPropertyStr(name) && o.extensible { + o._putProp(name, _undefined, true, true, d) + } + globalVarNames[name] = struct{}{} + } + } else { + var cf Flag + if d { + cf = FLAG_TRUE + } else { + cf = FLAG_FALSE + } + for _, name := range names { + if !o.hasOwnPropertyStr(name) && o.isExtensible() { + o.defineOwnPropertyStr(name, PropertyDescriptor{ + Value: _undefined, + Writable: FLAG_TRUE, + Enumerable: FLAG_TRUE, + Configurable: cf, + }, true) + o.setOwnStr(name, _undefined, false) + } + globalVarNames[name] = struct{}{} + } + } +} + +func (vm *vm) createGlobalFuncBindings(names []unistring.String, d bool) { + globalVarNames := vm.r.global.varNames + if globalVarNames == nil { + globalVarNames = make(map[unistring.String]struct{}) + vm.r.global.varNames = globalVarNames + } + o := vm.r.globalObject.self + b := vm.sp - len(names) + var shortcutObj *baseObject + if o, ok := o.(*baseObject); ok { + shortcutObj = o + } + for i, name := range names { + var desc PropertyDescriptor + prop := o.getOwnPropStr(name) + desc.Value = vm.stack[b+i] + if shortcutObj != nil && prop == nil && shortcutObj.extensible { + shortcutObj._putProp(name, desc.Value, true, true, d) + } else { + if prop, ok := prop.(*valueProperty); ok && !prop.configurable { + // no-op + } else { + desc.Writable = FLAG_TRUE + desc.Enumerable = FLAG_TRUE + if d { + desc.Configurable = FLAG_TRUE + } else { + desc.Configurable = FLAG_FALSE + } + } + if shortcutObj != nil { + shortcutObj.defineOwnPropertyStr(name, desc, true) + } else { + o.defineOwnPropertyStr(name, desc, true) + o.setOwnStr(name, desc.Value, false) // not a bug, see https://262.ecma-international.org/#sec-createglobalfunctionbinding + } + } + globalVarNames[name] = struct{}{} + } + vm.sp = b +} + +func (vm *vm) checkBindFuncsGlobal(names []unistring.String) { + o := vm.r.globalObject.self + sn := vm.r.global.stash.names + for _, name := range names { + if _, exists := sn[name]; exists { + panic(vm.alreadyDeclared(name)) + } + prop := o.getOwnPropStr(name) + allowed := true + switch prop := prop.(type) { + case nil: + allowed = o.isExtensible() + case *valueProperty: + allowed = prop.configurable || prop.getterFunc == nil && prop.setterFunc == nil && prop.writable && prop.enumerable + } + if !allowed { + panic(vm.r.NewTypeError("Cannot redefine global function '%s'", name)) + } + } +} + +func (vm *vm) checkBindLexGlobal(names []unistring.String) { + o := vm.r.globalObject.self + s := &vm.r.global.stash + for _, name := range names { + if _, exists := vm.r.global.varNames[name]; exists { + goto fail + } + if _, exists := s.names[name]; exists { + goto fail + } + if prop, ok := o.getOwnPropStr(name).(*valueProperty); ok && !prop.configurable { + goto fail + } + continue + fail: + panic(vm.alreadyDeclared(name)) + } +} + +type bindVars struct { + names []unistring.String + deletable bool +} + +func (d *bindVars) exec(vm *vm) { + var target *stash + for _, name := range d.names { + for s := vm.stash; s != nil; s = s.outer { + if idx, exists := s.names[name]; exists && idx&maskVar == 0 { + panic(vm.alreadyDeclared(name)) + } + if s.variable { + target = s + break + } + } + } + if target == nil { + target = vm.stash + } + deletable := d.deletable + for _, name := range d.names { + target.createBinding(name, deletable) + } + vm.pc++ +} + +type bindGlobal struct { + vars, funcs, lets, consts []unistring.String + + deletable bool +} + +func (b *bindGlobal) exec(vm *vm) { + vm.checkBindFuncsGlobal(b.funcs) + vm.checkBindLexGlobal(b.lets) + vm.checkBindLexGlobal(b.consts) + vm.checkBindVarsGlobal(b.vars) + + s := &vm.r.global.stash + for _, name := range b.lets { + s.createLexBinding(name, false) + } + for _, name := range b.consts { + s.createLexBinding(name, true) + } + vm.createGlobalFuncBindings(b.funcs, b.deletable) + vm.createGlobalVarBindings(b.vars, b.deletable) + vm.pc++ +} + +type jne int32 + +func (j jne) exec(vm *vm) { + vm.sp-- + if !vm.stack[vm.sp].ToBoolean() { + vm.pc += int(j) + } else { + vm.pc++ + } +} + +type jeq int32 + +func (j jeq) exec(vm *vm) { + vm.sp-- + if vm.stack[vm.sp].ToBoolean() { + vm.pc += int(j) + } else { + vm.pc++ + } +} + +type jeq1 int32 + +func (j jeq1) exec(vm *vm) { + if vm.stack[vm.sp-1].ToBoolean() { + vm.pc += int(j) + } else { + vm.pc++ + } +} + +type jneq1 int32 + +func (j jneq1) exec(vm *vm) { + if !vm.stack[vm.sp-1].ToBoolean() { + vm.pc += int(j) + } else { + vm.pc++ + } +} + +type jdef int32 + +func (j jdef) exec(vm *vm) { + if vm.stack[vm.sp-1] != _undefined { + vm.pc += int(j) + } else { + vm.sp-- + vm.pc++ + } +} + +type jdefP int32 + +func (j jdefP) exec(vm *vm) { + if vm.stack[vm.sp-1] != _undefined { + vm.pc += int(j) + } else { + vm.pc++ + } + vm.sp-- +} + +type _not struct{} + +var not _not + +func (_not) exec(vm *vm) { + if vm.stack[vm.sp-1].ToBoolean() { + vm.stack[vm.sp-1] = valueFalse + } else { + vm.stack[vm.sp-1] = valueTrue + } + vm.pc++ +} + +func toPrimitiveNumber(v Value) Value { + if o, ok := v.(*Object); ok { + return o.toPrimitiveNumber() + } + return v +} + +func toPrimitive(v Value) Value { + if o, ok := v.(*Object); ok { + return o.toPrimitive() + } + return v +} + +func cmp(px, py Value) Value { + var ret bool + var nx, ny float64 + + if xs, ok := px.(valueString); ok { + if ys, ok := py.(valueString); ok { + ret = xs.compareTo(ys) < 0 + goto end + } + } + + if xi, ok := px.(valueInt); ok { + if yi, ok := py.(valueInt); ok { + ret = xi < yi + goto end + } + } + + nx = px.ToFloat() + ny = py.ToFloat() + + if math.IsNaN(nx) || math.IsNaN(ny) { + return _undefined + } + + ret = nx < ny + +end: + if ret { + return valueTrue + } + return valueFalse + +} + +type _op_lt struct{} + +var op_lt _op_lt + +func (_op_lt) exec(vm *vm) { + left := toPrimitiveNumber(vm.stack[vm.sp-2]) + right := toPrimitiveNumber(vm.stack[vm.sp-1]) + + r := cmp(left, right) + if r == _undefined { + vm.stack[vm.sp-2] = valueFalse + } else { + vm.stack[vm.sp-2] = r + } + vm.sp-- + vm.pc++ +} + +type _op_lte struct{} + +var op_lte _op_lte + +func (_op_lte) exec(vm *vm) { + left := toPrimitiveNumber(vm.stack[vm.sp-2]) + right := toPrimitiveNumber(vm.stack[vm.sp-1]) + + r := cmp(right, left) + if r == _undefined || r == valueTrue { + vm.stack[vm.sp-2] = valueFalse + } else { + vm.stack[vm.sp-2] = valueTrue + } + + vm.sp-- + vm.pc++ +} + +type _op_gt struct{} + +var op_gt _op_gt + +func (_op_gt) exec(vm *vm) { + left := toPrimitiveNumber(vm.stack[vm.sp-2]) + right := toPrimitiveNumber(vm.stack[vm.sp-1]) + + r := cmp(right, left) + if r == _undefined { + vm.stack[vm.sp-2] = valueFalse + } else { + vm.stack[vm.sp-2] = r + } + vm.sp-- + vm.pc++ +} + +type _op_gte struct{} + +var op_gte _op_gte + +func (_op_gte) exec(vm *vm) { + left := toPrimitiveNumber(vm.stack[vm.sp-2]) + right := toPrimitiveNumber(vm.stack[vm.sp-1]) + + r := cmp(left, right) + if r == _undefined || r == valueTrue { + vm.stack[vm.sp-2] = valueFalse + } else { + vm.stack[vm.sp-2] = valueTrue + } + + vm.sp-- + vm.pc++ +} + +type _op_eq struct{} + +var op_eq _op_eq + +func (_op_eq) exec(vm *vm) { + if vm.stack[vm.sp-2].Equals(vm.stack[vm.sp-1]) { + vm.stack[vm.sp-2] = valueTrue + } else { + vm.stack[vm.sp-2] = valueFalse + } + vm.sp-- + vm.pc++ +} + +type _op_neq struct{} + +var op_neq _op_neq + +func (_op_neq) exec(vm *vm) { + if vm.stack[vm.sp-2].Equals(vm.stack[vm.sp-1]) { + vm.stack[vm.sp-2] = valueFalse + } else { + vm.stack[vm.sp-2] = valueTrue + } + vm.sp-- + vm.pc++ +} + +type _op_strict_eq struct{} + +var op_strict_eq _op_strict_eq + +func (_op_strict_eq) exec(vm *vm) { + if vm.stack[vm.sp-2].StrictEquals(vm.stack[vm.sp-1]) { + vm.stack[vm.sp-2] = valueTrue + } else { + vm.stack[vm.sp-2] = valueFalse + } + vm.sp-- + vm.pc++ +} + +type _op_strict_neq struct{} + +var op_strict_neq _op_strict_neq + +func (_op_strict_neq) exec(vm *vm) { + if vm.stack[vm.sp-2].StrictEquals(vm.stack[vm.sp-1]) { + vm.stack[vm.sp-2] = valueFalse + } else { + vm.stack[vm.sp-2] = valueTrue + } + vm.sp-- + vm.pc++ +} + +type _op_instanceof struct{} + +var op_instanceof _op_instanceof + +func (_op_instanceof) exec(vm *vm) { + left := vm.stack[vm.sp-2] + right := vm.r.toObject(vm.stack[vm.sp-1]) + + if instanceOfOperator(left, right) { + vm.stack[vm.sp-2] = valueTrue + } else { + vm.stack[vm.sp-2] = valueFalse + } + + vm.sp-- + vm.pc++ +} + +type _op_in struct{} + +var op_in _op_in + +func (_op_in) exec(vm *vm) { + left := vm.stack[vm.sp-2] + right := vm.r.toObject(vm.stack[vm.sp-1]) + + if right.hasProperty(left) { + vm.stack[vm.sp-2] = valueTrue + } else { + vm.stack[vm.sp-2] = valueFalse + } + + vm.sp-- + vm.pc++ +} + +type try struct { + catchOffset int32 + finallyOffset int32 +} + +func (t try) exec(vm *vm) { + o := vm.pc + vm.pc++ + ex := vm.runTry() + if ex != nil && t.catchOffset > 0 { + // run the catch block (in try) + vm.pc = o + int(t.catchOffset) + // TODO: if ex.val is an Error, set the stack property + vm.push(ex.val) + ex = vm.runTry() + } + + if t.finallyOffset > 0 { + pc := vm.pc + // Run finally + vm.pc = o + int(t.finallyOffset) + vm.run() + if vm.prg.code[vm.pc] == retFinally { + vm.pc = pc + } else { + // break or continue out of finally, dropping exception + ex = nil + } + } + + vm.halt = false + + if ex != nil { + vm.pc = -1 // to prevent the current position from being captured in the stacktrace + panic(ex) + } +} + +type _retFinally struct{} + +var retFinally _retFinally + +func (_retFinally) exec(vm *vm) { + vm.pc++ +} + +type _throw struct{} + +var throw _throw + +func (_throw) exec(vm *vm) { + panic(vm.stack[vm.sp-1]) +} + +type _newVariadic struct{} + +var newVariadic _newVariadic + +func (_newVariadic) exec(vm *vm) { + _new(vm.countVariadicArgs() - 1).exec(vm) +} + +type _new uint32 + +func (n _new) exec(vm *vm) { + sp := vm.sp - int(n) + obj := vm.stack[sp-1] + ctor := vm.r.toConstructor(obj) + vm.stack[sp-1] = ctor(vm.stack[sp:vm.sp], nil) + vm.sp = sp + vm.pc++ +} + +type _loadNewTarget struct{} + +var loadNewTarget _loadNewTarget + +func (_loadNewTarget) exec(vm *vm) { + if t := vm.newTarget; t != nil { + vm.push(t) + } else { + vm.push(_undefined) + } + vm.pc++ +} + +type _typeof struct{} + +var typeof _typeof + +func (_typeof) exec(vm *vm) { + var r Value + switch v := vm.stack[vm.sp-1].(type) { + case valueUndefined, valueUnresolved: + r = stringUndefined + case valueNull: + r = stringObjectC + case *Object: + repeat: + switch s := v.self.(type) { + case *funcObject, *nativeFuncObject, *boundFuncObject: + r = stringFunction + case *lazyObject: + v.self = s.create(v) + goto repeat + default: + r = stringObjectC + } + case valueBool: + r = stringBoolean + case valueString: + r = stringString + case valueInt, valueFloat: + r = stringNumber + case *Symbol: + r = stringSymbol + default: + panic(fmt.Errorf("Unknown type: %T", v)) + } + vm.stack[vm.sp-1] = r + vm.pc++ +} + +type createArgsMapped uint32 + +func (formalArgs createArgsMapped) exec(vm *vm) { + v := &Object{runtime: vm.r} + args := &argumentsObject{} + args.extensible = true + args.prototype = vm.r.global.ObjectPrototype + args.class = "Arguments" + v.self = args + args.val = v + args.length = vm.args + args.init() + i := 0 + c := int(formalArgs) + if vm.args < c { + c = vm.args + } + for ; i < c; i++ { + args._put(unistring.String(strconv.Itoa(i)), &mappedProperty{ + valueProperty: valueProperty{ + writable: true, + configurable: true, + enumerable: true, + }, + v: &vm.stash.values[i], + }) + } + + for _, v := range vm.stash.extraArgs { + args._put(unistring.String(strconv.Itoa(i)), v) + i++ + } + + args._putProp("callee", vm.stack[vm.sb-1], true, false, true) + args._putSym(SymIterator, valueProp(vm.r.global.arrayValues, true, false, true)) + vm.push(v) + vm.pc++ +} + +type createArgsUnmapped uint32 + +func (formalArgs createArgsUnmapped) exec(vm *vm) { + args := vm.r.newBaseObject(vm.r.global.ObjectPrototype, "Arguments") + i := 0 + c := int(formalArgs) + if vm.args < c { + c = vm.args + } + for _, v := range vm.stash.values[:c] { + args._put(unistring.String(strconv.Itoa(i)), v) + i++ + } + + for _, v := range vm.stash.extraArgs { + args._put(unistring.String(strconv.Itoa(i)), v) + i++ + } + + args._putProp("length", intToValue(int64(vm.args)), true, false, true) + args._put("callee", vm.r.global.throwerProperty) + args._put("caller", vm.r.global.throwerProperty) + args._putSym(SymIterator, valueProp(vm.r.global.arrayValues, true, false, true)) + vm.push(args.val) + vm.pc++ +} + +type _enterWith struct{} + +var enterWith _enterWith + +func (_enterWith) exec(vm *vm) { + vm.newStash() + vm.stash.obj = vm.stack[vm.sp-1].ToObject(vm.r) + vm.sp-- + vm.pc++ +} + +type _leaveWith struct{} + +var leaveWith _leaveWith + +func (_leaveWith) exec(vm *vm) { + vm.stash = vm.stash.outer + vm.pc++ +} + +func emptyIter() (propIterItem, iterNextFunc) { + return propIterItem{}, nil +} + +type _enumerate struct{} + +var enumerate _enumerate + +func (_enumerate) exec(vm *vm) { + v := vm.stack[vm.sp-1] + if v == _undefined || v == _null { + vm.iterStack = append(vm.iterStack, iterStackItem{f: emptyIter}) + } else { + vm.iterStack = append(vm.iterStack, iterStackItem{f: enumerateRecursive(v.ToObject(vm.r))}) + } + vm.sp-- + vm.pc++ +} + +type enumNext int32 + +func (jmp enumNext) exec(vm *vm) { + l := len(vm.iterStack) - 1 + item, n := vm.iterStack[l].f() + if n != nil { + vm.iterStack[l].val = stringValueFromRaw(item.name) + vm.iterStack[l].f = n + vm.pc++ + } else { + vm.pc += int(jmp) + } +} + +type _enumGet struct{} + +var enumGet _enumGet + +func (_enumGet) exec(vm *vm) { + l := len(vm.iterStack) - 1 + vm.push(vm.iterStack[l].val) + vm.pc++ +} + +type _enumPop struct{} + +var enumPop _enumPop + +func (_enumPop) exec(vm *vm) { + l := len(vm.iterStack) - 1 + vm.iterStack[l] = iterStackItem{} + vm.iterStack = vm.iterStack[:l] + vm.pc++ +} + +type _enumPopClose struct{} + +var enumPopClose _enumPopClose + +func (_enumPopClose) exec(vm *vm) { + l := len(vm.iterStack) - 1 + item := vm.iterStack[l] + vm.iterStack[l] = iterStackItem{} + vm.iterStack = vm.iterStack[:l] + if iter := item.iter; iter != nil { + returnIter(iter) + } + vm.pc++ +} + +type _iterateP struct{} + +var iterateP _iterateP + +func (_iterateP) exec(vm *vm) { + iter := vm.r.getIterator(vm.stack[vm.sp-1], nil) + vm.iterStack = append(vm.iterStack, iterStackItem{iter: iter}) + vm.sp-- + vm.pc++ +} + +type _iterate struct{} + +var iterate _iterate + +func (_iterate) exec(vm *vm) { + iter := vm.r.getIterator(vm.stack[vm.sp-1], nil) + vm.iterStack = append(vm.iterStack, iterStackItem{iter: iter}) + vm.pc++ +} + +type iterNext int32 + +func (jmp iterNext) exec(vm *vm) { + l := len(vm.iterStack) - 1 + iter := vm.iterStack[l].iter + var res *Object + var done bool + var value Value + ex := vm.try(func() { + res = vm.r.toObject(toMethod(iter.self.getStr("next", nil))(FunctionCall{This: iter})) + done = nilSafe(res.self.getStr("done", nil)).ToBoolean() + if !done { + value = nilSafe(res.self.getStr("value", nil)) + vm.iterStack[l].val = value + } + }) + if ex == nil { + if done { + vm.pc += int(jmp) + } else { + vm.iterStack[l].val = value + vm.pc++ + } + } else { + l := len(vm.iterStack) - 1 + vm.iterStack[l] = iterStackItem{} + vm.iterStack = vm.iterStack[:l] + panic(ex.val) + } +} + +type copyStash struct{} + +func (copyStash) exec(vm *vm) { + oldStash := vm.stash + newStash := &stash{ + outer: oldStash.outer, + } + vm.stashAllocs++ + newStash.values = append([]Value(nil), oldStash.values...) + vm.stash = newStash + vm.pc++ +} + +type _throwAssignToConst struct{} + +var throwAssignToConst _throwAssignToConst + +func (_throwAssignToConst) exec(vm *vm) { + panic(errAssignToConst) +} + +func (r *Runtime) copyDataProperties(target, source Value) { + targetObj := r.toObject(target) + if source == _null || source == _undefined { + return + } + sourceObj := source.ToObject(r) + for _, key := range sourceObj.self.ownPropertyKeys(false, nil) { + v := nilSafe(sourceObj.get(key, nil)) + createDataPropertyOrThrow(targetObj, key, v) + } +} + +type _copySpread struct{} + +var copySpread _copySpread + +func (_copySpread) exec(vm *vm) { + vm.r.copyDataProperties(vm.stack[vm.sp-2], vm.stack[vm.sp-1]) + vm.sp-- + vm.pc++ +} + +type _copyRest struct{} + +var copyRest _copyRest + +func (_copyRest) exec(vm *vm) { + vm.push(vm.r.NewObject()) + vm.r.copyDataProperties(vm.stack[vm.sp-1], vm.stack[vm.sp-2]) + vm.pc++ +} + +type _createDestructSrc struct{} + +var createDestructSrc _createDestructSrc + +func (_createDestructSrc) exec(vm *vm) { + v := vm.stack[vm.sp-1] + vm.r.checkObjectCoercible(v) + vm.push(vm.r.newDestructKeyedSource(v)) + vm.pc++ +} + +type _checkObjectCoercible struct{} + +var checkObjectCoercible _checkObjectCoercible + +func (_checkObjectCoercible) exec(vm *vm) { + vm.r.checkObjectCoercible(vm.stack[vm.sp-1]) + vm.pc++ +} + +type createArgsRestStack int + +func (n createArgsRestStack) exec(vm *vm) { + var values []Value + delta := vm.args - int(n) + if delta > 0 { + values = make([]Value, delta) + copy(values, vm.stack[vm.sb+int(n)+1:]) + } + vm.push(vm.r.newArrayValues(values)) + vm.pc++ +} + +type _createArgsRestStash struct{} + +var createArgsRestStash _createArgsRestStash + +func (_createArgsRestStash) exec(vm *vm) { + vm.push(vm.r.newArrayValues(vm.stash.extraArgs)) + vm.stash.extraArgs = nil + vm.pc++ +} diff --git a/vendor/github.com/evanw/esbuild/LICENSE.md b/vendor/github.com/evanw/esbuild/LICENSE.md new file mode 100644 index 0000000000..2027e8dcf3 --- /dev/null +++ b/vendor/github.com/evanw/esbuild/LICENSE.md @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Evan Wallace + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/evanw/esbuild/internal/api_helpers/use_timer.go b/vendor/github.com/evanw/esbuild/internal/api_helpers/use_timer.go new file mode 100644 index 0000000000..3b36fe25a3 --- /dev/null +++ b/vendor/github.com/evanw/esbuild/internal/api_helpers/use_timer.go @@ -0,0 +1,7 @@ +package api_helpers + +// This flag is set by the CLI to activate the timer. It's put here instead of +// by the timer to discourage code from checking this flag. Only the code that +// creates the root timer should check this flag. Other code should check that +// the timer is not null to detect if the timer is being used or not. +var UseTimer bool diff --git a/vendor/github.com/evanw/esbuild/internal/ast/ast.go b/vendor/github.com/evanw/esbuild/internal/ast/ast.go new file mode 100644 index 0000000000..77776cf6c8 --- /dev/null +++ b/vendor/github.com/evanw/esbuild/internal/ast/ast.go @@ -0,0 +1,138 @@ +package ast + +import "github.com/evanw/esbuild/internal/logger" + +// This file contains data structures that are used with the AST packages for +// both JavaScript and CSS. This helps the bundler treat both AST formats in +// a somewhat format-agnostic manner. + +type ImportKind uint8 + +const ( + // An entry point provided by the user + ImportEntryPoint ImportKind = iota + + // An ES6 import or re-export statement + ImportStmt + + // A call to "require()" + ImportRequire + + // An "import()" expression with a string argument + ImportDynamic + + // A call to "require.resolve()" + ImportRequireResolve + + // A CSS "@import" rule + ImportAt + + // A CSS "@import" rule with import conditions + ImportAtConditional + + // A CSS "url(...)" token + ImportURL +) + +func (kind ImportKind) StringForMetafile() string { + switch kind { + case ImportStmt: + return "import-statement" + case ImportRequire: + return "require-call" + case ImportDynamic: + return "dynamic-import" + case ImportRequireResolve: + return "require-resolve" + case ImportAt, ImportAtConditional: + return "import-rule" + case ImportURL: + return "url-token" + case ImportEntryPoint: + return "entry-point" + default: + panic("Internal error") + } +} + +func (kind ImportKind) IsFromCSS() bool { + return kind == ImportAt || kind == ImportURL +} + +type ImportRecord struct { + Range logger.Range + Path logger.Path + Assertions *[]AssertEntry + + // The resolved source index for an internal import (within the bundle) or + // nil for an external import (not included in the bundle) + SourceIndex Index32 + + // Sometimes the parser creates an import record and decides it isn't needed. + // For example, TypeScript code may have import statements that later turn + // out to be type-only imports after analyzing the whole file. + IsUnused bool + + // If this is true, the import contains syntax like "* as ns". This is used + // to determine whether modules that have no exports need to be wrapped in a + // CommonJS wrapper or not. + ContainsImportStar bool + + // If this is true, the import contains an import for the alias "default", + // either via the "import x from" or "import {default as x} from" syntax. + ContainsDefaultAlias bool + + // If true, this "export * from 'path'" statement is evaluated at run-time by + // calling the "__reExport()" helper function + CallsRunTimeReExportFn bool + + // Tell the printer to wrap this call to "require()" in "__toModule(...)" + WrapWithToModule bool + + // Tell the printer to use the runtime "__require()" instead of "require()" + CallRuntimeRequire bool + + // True for the following cases: + // + // try { require('x') } catch { handle } + // try { await import('x') } catch { handle } + // try { require.resolve('x') } catch { handle } + // import('x').catch(handle) + // import('x').then(_, handle) + // + // In these cases we shouldn't generate an error if the path could not be + // resolved. + HandlesImportErrors bool + + // If true, this was originally written as a bare "import 'file'" statement + WasOriginallyBareImport bool + + Kind ImportKind +} + +type AssertEntry struct { + Key []uint16 // An identifier or a string + Value []uint16 // Always a string + KeyLoc logger.Loc + ValueLoc logger.Loc + PreferQuotedKey bool +} + +// This stores a 32-bit index where the zero value is an invalid index. This is +// a better alternative to storing the index as a pointer since that has the +// same properties but takes up more space and costs an extra pointer traversal. +type Index32 struct { + flippedBits uint32 +} + +func MakeIndex32(index uint32) Index32 { + return Index32{flippedBits: ^index} +} + +func (i Index32) IsValid() bool { + return i.flippedBits != 0 +} + +func (i Index32) GetIndex() uint32 { + return ^i.flippedBits +} diff --git a/vendor/github.com/evanw/esbuild/internal/bundler/bundler.go b/vendor/github.com/evanw/esbuild/internal/bundler/bundler.go new file mode 100644 index 0000000000..c8f70ff76b --- /dev/null +++ b/vendor/github.com/evanw/esbuild/internal/bundler/bundler.go @@ -0,0 +1,2304 @@ +package bundler + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "fmt" + "math/rand" + "net/http" + "sort" + "strings" + "sync" + "syscall" + "time" + "unicode" + "unicode/utf8" + + "github.com/evanw/esbuild/internal/ast" + "github.com/evanw/esbuild/internal/cache" + "github.com/evanw/esbuild/internal/compat" + "github.com/evanw/esbuild/internal/config" + "github.com/evanw/esbuild/internal/css_parser" + "github.com/evanw/esbuild/internal/fs" + "github.com/evanw/esbuild/internal/graph" + "github.com/evanw/esbuild/internal/helpers" + "github.com/evanw/esbuild/internal/js_ast" + "github.com/evanw/esbuild/internal/js_lexer" + "github.com/evanw/esbuild/internal/js_parser" + "github.com/evanw/esbuild/internal/js_printer" + "github.com/evanw/esbuild/internal/logger" + "github.com/evanw/esbuild/internal/resolver" + "github.com/evanw/esbuild/internal/runtime" + "github.com/evanw/esbuild/internal/xxhash" +) + +type scannerFile struct { + inputFile graph.InputFile + pluginData interface{} + + // If "AbsMetadataFile" is present, this will be filled out with information + // about this file in JSON format. This is a partial JSON file that will be + // fully assembled later. + jsonMetadataChunk string +} + +// This is data related to source maps. It's computed in parallel with linking +// and must be ready by the time printing happens. This is beneficial because +// it is somewhat expensive to produce. +type dataForSourceMap struct { + // This data is for the printer. It maps from byte offsets in the file (which + // are stored at every AST node) to UTF-16 column offsets (required by source + // maps). + lineOffsetTables []js_printer.LineOffsetTable + + // This contains the quoted contents of the original source file. It's what + // needs to be embedded in the "sourcesContent" array in the final source + // map. Quoting is precomputed because it's somewhat expensive. + quotedContents [][]byte +} + +type Bundle struct { + fs fs.FS + res resolver.Resolver + files []scannerFile + entryPoints []graph.EntryPoint + + // The unique key prefix is a random string that is unique to every bundling + // operation. It is used as a prefix for the unique keys assigned to every + // chunk during linking. These unique keys are used to identify each chunk + // before the final output paths have been computed. + uniqueKeyPrefix string +} + +type parseArgs struct { + fs fs.FS + log logger.Log + res resolver.Resolver + caches *cache.CacheSet + keyPath logger.Path + prettyPath string + sourceIndex uint32 + importSource *logger.Source + sideEffects graph.SideEffects + importPathRange logger.Range + pluginData interface{} + options config.Options + results chan parseResult + inject chan config.InjectedFile + skipResolve bool + uniqueKeyPrefix string +} + +type parseResult struct { + file scannerFile + resolveResults []*resolver.ResolveResult + tlaCheck tlaCheck + ok bool +} + +type tlaCheck struct { + parent ast.Index32 + depth uint32 + importRecordIndex uint32 +} + +func parseFile(args parseArgs) { + source := logger.Source{ + Index: args.sourceIndex, + KeyPath: args.keyPath, + PrettyPath: args.prettyPath, + IdentifierName: js_ast.GenerateNonUniqueNameFromPath(args.keyPath.Text), + } + + var loader config.Loader + var absResolveDir string + var pluginName string + var pluginData interface{} + + if stdin := args.options.Stdin; stdin != nil { + // Special-case stdin + source.Contents = stdin.Contents + loader = stdin.Loader + if loader == config.LoaderNone { + loader = config.LoaderJS + } + absResolveDir = args.options.Stdin.AbsResolveDir + } else { + result, ok := runOnLoadPlugins( + args.options.Plugins, + args.res, + args.fs, + &args.caches.FSCache, + args.log, + &source, + args.importSource, + args.importPathRange, + args.pluginData, + args.options.WatchMode, + ) + if !ok { + if args.inject != nil { + args.inject <- config.InjectedFile{ + Source: source, + } + } + args.results <- parseResult{} + return + } + loader = result.loader + absResolveDir = result.absResolveDir + pluginName = result.pluginName + pluginData = result.pluginData + } + + _, base, ext := logger.PlatformIndependentPathDirBaseExt(source.KeyPath.Text) + + // The special "default" loader determines the loader from the file path + if loader == config.LoaderDefault { + loader = loaderFromFileExtension(args.options.ExtensionToLoader, base+ext) + } + + result := parseResult{ + file: scannerFile{ + inputFile: graph.InputFile{ + Source: source, + Loader: loader, + SideEffects: args.sideEffects, + }, + pluginData: pluginData, + }, + } + + switch loader { + case config.LoaderJS: + ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options)) + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} + result.ok = ok + + case config.LoaderJSX: + args.options.JSX.Parse = true + ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options)) + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} + result.ok = ok + + case config.LoaderTS: + args.options.TS.Parse = true + ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options)) + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} + result.ok = ok + + case config.LoaderTSX: + args.options.TS.Parse = true + args.options.JSX.Parse = true + ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options)) + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} + result.ok = ok + + case config.LoaderCSS: + ast := args.caches.CSSCache.Parse(args.log, source, css_parser.Options{ + MangleSyntax: args.options.MangleSyntax, + RemoveWhitespace: args.options.RemoveWhitespace, + UnsupportedCSSFeatures: args.options.UnsupportedCSSFeatures, + }) + result.file.inputFile.Repr = &graph.CSSRepr{AST: ast} + result.ok = true + + case config.LoaderJSON: + expr, ok := args.caches.JSONCache.Parse(args.log, source, js_parser.JSONOptions{}) + ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "") + if pluginName != "" { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin + } else { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData + } + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} + result.ok = ok + + case config.LoaderText: + encoded := base64.StdEncoding.EncodeToString([]byte(source.Contents)) + expr := js_ast.Expr{Data: &js_ast.EString{Value: js_lexer.StringToUTF16(source.Contents)}} + ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "") + ast.URLForCSS = "data:text/plain;base64," + encoded + if pluginName != "" { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin + } else { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData + } + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} + result.ok = true + + case config.LoaderBase64: + mimeType := guessMimeType(ext, source.Contents) + encoded := base64.StdEncoding.EncodeToString([]byte(source.Contents)) + expr := js_ast.Expr{Data: &js_ast.EString{Value: js_lexer.StringToUTF16(encoded)}} + ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "") + ast.URLForCSS = "data:" + mimeType + ";base64," + encoded + if pluginName != "" { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin + } else { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData + } + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} + result.ok = true + + case config.LoaderBinary: + encoded := base64.StdEncoding.EncodeToString([]byte(source.Contents)) + expr := js_ast.Expr{Data: &js_ast.EString{Value: js_lexer.StringToUTF16(encoded)}} + helper := "__toBinary" + if args.options.Platform == config.PlatformNode { + helper = "__toBinaryNode" + } + ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, helper) + ast.URLForCSS = "data:application/octet-stream;base64," + encoded + if pluginName != "" { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin + } else { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData + } + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} + result.ok = true + + case config.LoaderDataURL: + mimeType := guessMimeType(ext, source.Contents) + encoded := base64.StdEncoding.EncodeToString([]byte(source.Contents)) + url := fmt.Sprintf("data:%s;base64,%s", mimeType, encoded) + expr := js_ast.Expr{Data: &js_ast.EString{Value: js_lexer.StringToUTF16(url)}} + ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "") + ast.URLForCSS = url + if pluginName != "" { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin + } else { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData + } + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} + result.ok = true + + case config.LoaderFile: + uniqueKey := fmt.Sprintf("%sA%08d", args.uniqueKeyPrefix, args.sourceIndex) + uniqueKeyPath := uniqueKey + source.KeyPath.IgnoredSuffix + expr := js_ast.Expr{Data: &js_ast.EString{Value: js_lexer.StringToUTF16(uniqueKeyPath)}} + ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "") + ast.URLForCSS = uniqueKeyPath + if pluginName != "" { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin + } else { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData + } + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} + result.ok = true + + // Mark that this file is from the "file" loader + result.file.inputFile.UniqueKeyForFileLoader = uniqueKey + + default: + var message string + if source.KeyPath.Namespace == "file" && ext != "" { + message = fmt.Sprintf("No loader is configured for %q files: %s", ext, source.PrettyPath) + } else { + message = fmt.Sprintf("Do not know how to load path: %s", source.PrettyPath) + } + tracker := logger.MakeLineColumnTracker(args.importSource) + args.log.AddRangeError(&tracker, args.importPathRange, message) + } + + // This must come before we send on the "results" channel to avoid deadlock + if args.inject != nil { + var exports []config.InjectableExport + if repr, ok := result.file.inputFile.Repr.(*graph.JSRepr); ok { + aliases := make([]string, 0, len(repr.AST.NamedExports)) + for alias := range repr.AST.NamedExports { + aliases = append(aliases, alias) + } + sort.Strings(aliases) // Sort for determinism + exports = make([]config.InjectableExport, len(aliases)) + for i, alias := range aliases { + exports[i] = config.InjectableExport{ + Alias: alias, + Loc: repr.AST.NamedExports[alias].AliasLoc, + } + } + } + args.inject <- config.InjectedFile{ + Source: source, + Exports: exports, + } + } + + // Stop now if parsing failed + if !result.ok { + args.results <- result + return + } + + // Run the resolver on the parse thread so it's not run on the main thread. + // That way the main thread isn't blocked if the resolver takes a while. + if args.options.Mode == config.ModeBundle && !args.skipResolve { + // Clone the import records because they will be mutated later + recordsPtr := result.file.inputFile.Repr.ImportRecords() + records := append([]ast.ImportRecord{}, *recordsPtr...) + *recordsPtr = records + result.resolveResults = make([]*resolver.ResolveResult, len(records)) + + if len(records) > 0 { + resolverCache := make(map[ast.ImportKind]map[string]*resolver.ResolveResult) + tracker := logger.MakeLineColumnTracker(&source) + + for importRecordIndex := range records { + // Don't try to resolve imports that are already resolved + record := &records[importRecordIndex] + if record.SourceIndex.IsValid() { + continue + } + + // Ignore records that the parser has discarded. This is used to remove + // type-only imports in TypeScript files. + if record.IsUnused { + continue + } + + // Cache the path in case it's imported multiple times in this file + cache, ok := resolverCache[record.Kind] + if !ok { + cache = make(map[string]*resolver.ResolveResult) + resolverCache[record.Kind] = cache + } + if resolveResult, ok := cache[record.Path.Text]; ok { + result.resolveResults[importRecordIndex] = resolveResult + continue + } + + // Run the resolver and log an error if the path couldn't be resolved + resolveResult, didLogError, debug := runOnResolvePlugins( + args.options.Plugins, + args.res, + args.log, + args.fs, + &args.caches.FSCache, + &source, + record.Range, + source.KeyPath.Namespace, + record.Path.Text, + record.Kind, + absResolveDir, + pluginData, + ) + cache[record.Path.Text] = resolveResult + + // All "require.resolve()" imports should be external because we don't + // want to waste effort traversing into them + if record.Kind == ast.ImportRequireResolve { + if !record.HandlesImportErrors && (resolveResult == nil || !resolveResult.IsExternal) { + args.log.AddRangeWarning(&tracker, record.Range, + fmt.Sprintf("%q should be marked as external for use with \"require.resolve\"", record.Path.Text)) + } + continue + } + + if resolveResult == nil { + // Failed imports inside a try/catch are silently turned into + // external imports instead of causing errors. This matches a common + // code pattern for conditionally importing a module with a graceful + // fallback. + if !didLogError && !record.HandlesImportErrors { + hint := "" + if resolver.IsPackagePath(record.Path.Text) { + if record.Kind == ast.ImportRequire { + hint = ", or surround it with try/catch to handle the failure at run-time" + } else if record.Kind == ast.ImportDynamic { + hint = ", or add \".catch()\" to handle the failure at run-time" + } + hint = fmt.Sprintf(" (mark it as external to exclude it from the bundle%s)", hint) + if pluginName == "" && !args.fs.IsAbs(record.Path.Text) { + if query := args.res.ProbeResolvePackageAsRelative(absResolveDir, record.Path.Text, record.Kind); query != nil { + hint = fmt.Sprintf(" (use %q to reference the file %q)", "./"+record.Path.Text, args.res.PrettyPath(query.PathPair.Primary)) + } + } + } + if args.options.Platform != config.PlatformNode { + if _, ok := resolver.BuiltInNodeModules[record.Path.Text]; ok { + switch logger.API { + case logger.CLIAPI: + hint = " (use \"--platform=node\" when building for node)" + case logger.JSAPI: + hint = " (use \"platform: 'node'\" when building for node)" + case logger.GoAPI: + hint = " (use \"Platform: api.PlatformNode\" when building for node)" + } + } + } + if absResolveDir == "" && pluginName != "" { + hint = fmt.Sprintf(" (the plugin %q didn't set a resolve directory)", pluginName) + } + debug.LogErrorMsg(args.log, &source, record.Range, fmt.Sprintf("Could not resolve %q%s", record.Path.Text, hint)) + } else if args.log.Level <= logger.LevelDebug && !didLogError && record.HandlesImportErrors { + args.log.AddRangeDebug(&tracker, record.Range, + fmt.Sprintf("Importing %q was allowed even though it could not be resolved because dynamic import failures appear to be handled here", + record.Path.Text)) + } + continue + } + + result.resolveResults[importRecordIndex] = resolveResult + } + } + } + + // Attempt to parse the source map if present + if loader.CanHaveSourceMap() && args.options.SourceMap != config.SourceMapNone { + if repr, ok := result.file.inputFile.Repr.(*graph.JSRepr); ok && repr.AST.SourceMapComment.Text != "" { + if path, contents := extractSourceMapFromComment(args.log, args.fs, &args.caches.FSCache, + args.res, &source, repr.AST.SourceMapComment, absResolveDir); contents != nil { + result.file.inputFile.InputSourceMap = js_parser.ParseSourceMap(args.log, logger.Source{ + KeyPath: path, + PrettyPath: args.res.PrettyPath(path), + Contents: *contents, + }) + } + } + } + + args.results <- result +} + +func joinWithPublicPath(publicPath string, relPath string) string { + if strings.HasPrefix(relPath, "./") { + relPath = relPath[2:] + + // Strip any amount of further no-op slashes (i.e. ".///././/x/y" => "x/y") + for { + if strings.HasPrefix(relPath, "/") { + relPath = relPath[1:] + } else if strings.HasPrefix(relPath, "./") { + relPath = relPath[2:] + } else { + break + } + } + } + + // Use a relative path if there is no public path + if publicPath == "" { + publicPath = "." + } + + // Join with a slash + slash := "/" + if strings.HasSuffix(publicPath, "/") { + slash = "" + } + return fmt.Sprintf("%s%s%s", publicPath, slash, relPath) +} + +func isASCIIOnly(text string) bool { + for _, c := range text { + if c < 0x20 || c > 0x7E { + return false + } + } + return true +} + +func guessMimeType(extension string, contents string) string { + mimeType := helpers.MimeTypeByExtension(extension) + if mimeType == "" { + mimeType = http.DetectContentType([]byte(contents)) + } + + // Turn "text/plain; charset=utf-8" into "text/plain;charset=utf-8" + return strings.ReplaceAll(mimeType, "; ", ";") +} + +func extractSourceMapFromComment( + log logger.Log, + fs fs.FS, + fsCache *cache.FSCache, + res resolver.Resolver, + source *logger.Source, + comment js_ast.Span, + absResolveDir string, +) (logger.Path, *string) { + tracker := logger.MakeLineColumnTracker(source) + + // Support data URLs + if parsed, ok := resolver.ParseDataURL(comment.Text); ok { + if contents, err := parsed.DecodeData(); err == nil { + return logger.Path{Text: source.PrettyPath, IgnoredSuffix: "#sourceMappingURL"}, &contents + } else { + log.AddRangeWarning(&tracker, comment.Range, fmt.Sprintf("Unsupported source map comment: %s", err.Error())) + return logger.Path{}, nil + } + } + + // Relative path in a file with an absolute path + if absResolveDir != "" { + absPath := fs.Join(absResolveDir, comment.Text) + path := logger.Path{Text: absPath, Namespace: "file"} + contents, err, originalError := fsCache.ReadFile(fs, absPath) + if log.Level <= logger.LevelDebug && originalError != nil { + log.AddRangeDebug(&tracker, comment.Range, fmt.Sprintf("Failed to read file %q: %s", res.PrettyPath(path), originalError.Error())) + } + if err != nil { + if err == syscall.ENOENT { + // Don't report a warning because this is likely unactionable + return logger.Path{}, nil + } + log.AddRangeWarning(&tracker, comment.Range, fmt.Sprintf("Cannot read file %q: %s", res.PrettyPath(path), err.Error())) + return logger.Path{}, nil + } + return path, &contents + } + + // Anything else is unsupported + return logger.Path{}, nil +} + +func sanetizeLocation(res resolver.Resolver, loc *logger.MsgLocation) { + if loc != nil { + if loc.Namespace == "" { + loc.Namespace = "file" + } + if loc.File != "" { + loc.File = res.PrettyPath(logger.Path{Text: loc.File, Namespace: loc.Namespace}) + } + } +} + +func logPluginMessages( + res resolver.Resolver, + log logger.Log, + name string, + msgs []logger.Msg, + thrown error, + importSource *logger.Source, + importPathRange logger.Range, +) bool { + didLogError := false + tracker := logger.MakeLineColumnTracker(importSource) + + // Report errors and warnings generated by the plugin + for _, msg := range msgs { + if msg.PluginName == "" { + msg.PluginName = name + } + if msg.Kind == logger.Error { + didLogError = true + } + + // Sanitize the locations + for _, note := range msg.Notes { + sanetizeLocation(res, note.Location) + } + if msg.Data.Location == nil { + msg.Data.Location = logger.LocationOrNil(&tracker, importPathRange) + } else { + sanetizeLocation(res, msg.Data.Location) + if msg.Data.Location.File == "" && importSource != nil { + msg.Data.Location.File = importSource.PrettyPath + } + if importSource != nil { + msg.Notes = append(msg.Notes, logger.RangeData(&tracker, importPathRange, + fmt.Sprintf("The plugin %q was triggered by this import", name))) + } + } + + log.AddMsg(msg) + } + + // Report errors thrown by the plugin itself + if thrown != nil { + didLogError = true + text := thrown.Error() + log.AddMsg(logger.Msg{ + PluginName: name, + Kind: logger.Error, + Data: logger.MsgData{ + Text: text, + Location: logger.LocationOrNil(&tracker, importPathRange), + UserDetail: thrown, + }, + }) + } + + return didLogError +} + +func runOnResolvePlugins( + plugins []config.Plugin, + res resolver.Resolver, + log logger.Log, + fs fs.FS, + fsCache *cache.FSCache, + importSource *logger.Source, + importPathRange logger.Range, + importNamespace string, + path string, + kind ast.ImportKind, + absResolveDir string, + pluginData interface{}, +) (*resolver.ResolveResult, bool, resolver.DebugMeta) { + resolverArgs := config.OnResolveArgs{ + Path: path, + ResolveDir: absResolveDir, + Kind: kind, + PluginData: pluginData, + } + applyPath := logger.Path{ + Text: path, + Namespace: importNamespace, + } + if importSource != nil { + resolverArgs.Importer = importSource.KeyPath + } else { + resolverArgs.Importer.Namespace = importNamespace + } + tracker := logger.MakeLineColumnTracker(importSource) + + // Apply resolver plugins in order until one succeeds + for _, plugin := range plugins { + for _, onResolve := range plugin.OnResolve { + if !config.PluginAppliesToPath(applyPath, onResolve.Filter, onResolve.Namespace) { + continue + } + + result := onResolve.Callback(resolverArgs) + pluginName := result.PluginName + if pluginName == "" { + pluginName = plugin.Name + } + didLogError := logPluginMessages(res, log, pluginName, result.Msgs, result.ThrownError, importSource, importPathRange) + + // Plugins can also provide additional file system paths to watch + for _, file := range result.AbsWatchFiles { + fsCache.ReadFile(fs, file) + } + for _, dir := range result.AbsWatchDirs { + fs.ReadDirectory(dir) + } + + // Stop now if there was an error + if didLogError { + return nil, true, resolver.DebugMeta{} + } + + // The "file" namespace is the default for non-external paths, but not + // for external paths. External paths must explicitly specify the "file" + // namespace. + nsFromPlugin := result.Path.Namespace + if result.Path.Namespace == "" && !result.External { + result.Path.Namespace = "file" + } + + // Otherwise, continue on to the next resolver if this loader didn't succeed + if result.Path.Text == "" { + if result.External { + result.Path = logger.Path{Text: path} + } else { + continue + } + } + + // Paths in the file namespace must be absolute paths + if result.Path.Namespace == "file" && !fs.IsAbs(result.Path.Text) { + if nsFromPlugin == "file" { + log.AddRangeError(&tracker, importPathRange, + fmt.Sprintf("Plugin %q returned a path in the \"file\" namespace that is not an absolute path: %s", pluginName, result.Path.Text)) + } else { + log.AddRangeError(&tracker, importPathRange, + fmt.Sprintf("Plugin %q returned a non-absolute path: %s (set a namespace if this is not a file path)", pluginName, result.Path.Text)) + } + return nil, true, resolver.DebugMeta{} + } + + var sideEffectsData *resolver.SideEffectsData + if result.IsSideEffectFree { + sideEffectsData = &resolver.SideEffectsData{ + PluginName: pluginName, + } + } + + return &resolver.ResolveResult{ + PathPair: resolver.PathPair{Primary: result.Path}, + IsExternal: result.External, + PluginData: result.PluginData, + PrimarySideEffectsData: sideEffectsData, + }, false, resolver.DebugMeta{} + } + } + + // Resolve relative to the resolve directory by default. All paths in the + // "file" namespace automatically have a resolve directory. Loader plugins + // can also configure a custom resolve directory for files in other namespaces. + result, debug := res.Resolve(absResolveDir, path, kind) + + // Warn when the case used for importing differs from the actual file name + if result != nil && result.DifferentCase != nil && !helpers.IsInsideNodeModules(absResolveDir) { + diffCase := *result.DifferentCase + log.AddRangeWarning(&tracker, importPathRange, fmt.Sprintf( + "Use %q instead of %q to avoid issues with case-sensitive file systems", + res.PrettyPath(logger.Path{Text: fs.Join(diffCase.Dir, diffCase.Actual), Namespace: "file"}), + res.PrettyPath(logger.Path{Text: fs.Join(diffCase.Dir, diffCase.Query), Namespace: "file"}), + )) + } + + return result, false, debug +} + +type loaderPluginResult struct { + loader config.Loader + absResolveDir string + pluginName string + pluginData interface{} +} + +func runOnLoadPlugins( + plugins []config.Plugin, + res resolver.Resolver, + fs fs.FS, + fsCache *cache.FSCache, + log logger.Log, + source *logger.Source, + importSource *logger.Source, + importPathRange logger.Range, + pluginData interface{}, + isWatchMode bool, +) (loaderPluginResult, bool) { + loaderArgs := config.OnLoadArgs{ + Path: source.KeyPath, + PluginData: pluginData, + } + tracker := logger.MakeLineColumnTracker(importSource) + + // Apply loader plugins in order until one succeeds + for _, plugin := range plugins { + for _, onLoad := range plugin.OnLoad { + if !config.PluginAppliesToPath(source.KeyPath, onLoad.Filter, onLoad.Namespace) { + continue + } + + result := onLoad.Callback(loaderArgs) + pluginName := result.PluginName + if pluginName == "" { + pluginName = plugin.Name + } + didLogError := logPluginMessages(res, log, pluginName, result.Msgs, result.ThrownError, importSource, importPathRange) + + // Plugins can also provide additional file system paths to watch + for _, file := range result.AbsWatchFiles { + fsCache.ReadFile(fs, file) + } + for _, dir := range result.AbsWatchDirs { + fs.ReadDirectory(dir) + } + + // Stop now if there was an error + if didLogError { + if isWatchMode && source.KeyPath.Namespace == "file" { + fsCache.ReadFile(fs, source.KeyPath.Text) // Read the file for watch mode tracking + } + return loaderPluginResult{}, false + } + + // Otherwise, continue on to the next loader if this loader didn't succeed + if result.Contents == nil { + continue + } + + source.Contents = *result.Contents + loader := result.Loader + if loader == config.LoaderNone { + loader = config.LoaderJS + } + if result.AbsResolveDir == "" && source.KeyPath.Namespace == "file" { + result.AbsResolveDir = fs.Dir(source.KeyPath.Text) + } + if isWatchMode && source.KeyPath.Namespace == "file" { + fsCache.ReadFile(fs, source.KeyPath.Text) // Read the file for watch mode tracking + } + return loaderPluginResult{ + loader: loader, + absResolveDir: result.AbsResolveDir, + pluginName: pluginName, + pluginData: result.PluginData, + }, true + } + } + + // Force disabled modules to be empty + if source.KeyPath.IsDisabled() { + return loaderPluginResult{loader: config.LoaderJS}, true + } + + // Read normal modules from disk + if source.KeyPath.Namespace == "file" { + if contents, err, originalError := fsCache.ReadFile(fs, source.KeyPath.Text); err == nil { + source.Contents = contents + return loaderPluginResult{ + loader: config.LoaderDefault, + absResolveDir: fs.Dir(source.KeyPath.Text), + }, true + } else { + if log.Level <= logger.LevelDebug && originalError != nil { + log.AddDebug(nil, logger.Loc{}, fmt.Sprintf("Failed to read file %q: %s", source.KeyPath.Text, originalError.Error())) + } + if err == syscall.ENOENT { + log.AddRangeError(&tracker, importPathRange, + fmt.Sprintf("Could not read from file: %s", source.KeyPath.Text)) + return loaderPluginResult{}, false + } else { + log.AddRangeError(&tracker, importPathRange, + fmt.Sprintf("Cannot read file %q: %s", res.PrettyPath(source.KeyPath), err.Error())) + return loaderPluginResult{}, false + } + } + } + + // Native support for data URLs. This is supported natively by node: + // https://nodejs.org/docs/latest/api/esm.html#esm_data_imports + if source.KeyPath.Namespace == "dataurl" { + if parsed, ok := resolver.ParseDataURL(source.KeyPath.Text); ok { + if mimeType := parsed.DecodeMIMEType(); mimeType != resolver.MIMETypeUnsupported { + if contents, err := parsed.DecodeData(); err != nil { + log.AddRangeError(&tracker, importPathRange, + fmt.Sprintf("Could not load data URL: %s", err.Error())) + return loaderPluginResult{loader: config.LoaderNone}, true + } else { + source.Contents = contents + switch mimeType { + case resolver.MIMETypeTextCSS: + return loaderPluginResult{loader: config.LoaderCSS}, true + case resolver.MIMETypeTextJavaScript: + return loaderPluginResult{loader: config.LoaderJS}, true + case resolver.MIMETypeApplicationJSON: + return loaderPluginResult{loader: config.LoaderJSON}, true + } + } + } + } + } + + // Otherwise, fail to load the path + return loaderPluginResult{loader: config.LoaderNone}, true +} + +func loaderFromFileExtension(extensionToLoader map[string]config.Loader, base string) config.Loader { + // Pick the loader with the longest matching extension. So if there's an + // extension for ".css" and for ".module.css", we want to match the one for + // ".module.css" before the one for ".css". + for { + i := strings.IndexByte(base, '.') + if i == -1 { + break + } + if loader, ok := extensionToLoader[base[i:]]; ok { + return loader + } + base = base[i+1:] + } + return config.LoaderNone +} + +// Identify the path by its lowercase absolute path name with Windows-specific +// slashes substituted for standard slashes. This should hopefully avoid path +// issues on Windows where multiple different paths can refer to the same +// underlying file. +func canonicalFileSystemPathForWindows(absPath string) string { + return strings.ReplaceAll(strings.ToLower(absPath), "\\", "/") +} + +func hashForFileName(hashBytes []byte) string { + return base32.StdEncoding.EncodeToString(hashBytes)[:8] +} + +type scanner struct { + log logger.Log + fs fs.FS + res resolver.Resolver + caches *cache.CacheSet + options config.Options + timer *helpers.Timer + uniqueKeyPrefix string + + // This is not guarded by a mutex because it's only ever modified by a single + // thread. Note that not all results in the "results" array are necessarily + // valid. Make sure to check the "ok" flag before using them. + results []parseResult + visited map[logger.Path]uint32 + resultChannel chan parseResult + remaining int +} + +type EntryPoint struct { + InputPath string + OutputPath string + IsFile bool +} + +func generateUniqueKeyPrefix() (string, error) { + var data [12]byte + rand.Seed(time.Now().UnixNano()) + if _, err := rand.Read(data[:]); err != nil { + return "", err + } + + // This is 16 bytes and shouldn't generate escape characters when put into strings + return base64.URLEncoding.EncodeToString(data[:]), nil +} + +func ScanBundle( + log logger.Log, + fs fs.FS, + res resolver.Resolver, + caches *cache.CacheSet, + entryPoints []EntryPoint, + options config.Options, + timer *helpers.Timer, +) Bundle { + timer.Begin("Scan phase") + defer timer.End("Scan phase") + + applyOptionDefaults(&options) + + // Run "onStart" plugins in parallel + onStartWaitGroup := sync.WaitGroup{} + for _, plugin := range options.Plugins { + for _, onStart := range plugin.OnStart { + onStartWaitGroup.Add(1) + go func(plugin config.Plugin, onStart config.OnStart) { + result := onStart.Callback() + logPluginMessages(res, log, plugin.Name, result.Msgs, result.ThrownError, nil, logger.Range{}) + onStartWaitGroup.Done() + }(plugin, onStart) + } + } + + // Each bundling operation gets a separate unique key + uniqueKeyPrefix, err := generateUniqueKeyPrefix() + if err != nil { + log.AddError(nil, logger.Loc{}, fmt.Sprintf("Failed to read from randomness source: %s", err.Error())) + } + + s := scanner{ + log: log, + fs: fs, + res: res, + caches: caches, + options: options, + timer: timer, + results: make([]parseResult, 0, caches.SourceIndexCache.LenHint()), + visited: make(map[logger.Path]uint32), + resultChannel: make(chan parseResult), + uniqueKeyPrefix: uniqueKeyPrefix, + } + + // Always start by parsing the runtime file + s.results = append(s.results, parseResult{}) + s.remaining++ + go func() { + source, ast, ok := globalRuntimeCache.parseRuntime(&options) + s.resultChannel <- parseResult{ + file: scannerFile{ + inputFile: graph.InputFile{ + Source: source, + Repr: &graph.JSRepr{AST: ast}, + }, + }, + ok: ok, + } + }() + + s.preprocessInjectedFiles() + entryPointMeta := s.addEntryPoints(entryPoints) + s.scanAllDependencies() + files := s.processScannedFiles() + + onStartWaitGroup.Wait() + return Bundle{ + fs: fs, + res: res, + files: files, + entryPoints: entryPointMeta, + uniqueKeyPrefix: uniqueKeyPrefix, + } +} + +type inputKind uint8 + +const ( + inputKindNormal inputKind = iota + inputKindEntryPoint + inputKindStdin +) + +// This returns the source index of the resulting file +func (s *scanner) maybeParseFile( + resolveResult resolver.ResolveResult, + prettyPath string, + importSource *logger.Source, + importPathRange logger.Range, + pluginData interface{}, + kind inputKind, + inject chan config.InjectedFile, +) uint32 { + path := resolveResult.PathPair.Primary + visitedKey := path + if visitedKey.Namespace == "file" { + visitedKey.Text = canonicalFileSystemPathForWindows(visitedKey.Text) + } + + // Only parse a given file path once + sourceIndex, ok := s.visited[visitedKey] + if ok { + return sourceIndex + } + + sourceIndex = s.allocateSourceIndex(visitedKey, cache.SourceIndexNormal) + s.visited[visitedKey] = sourceIndex + s.remaining++ + optionsClone := s.options + if kind != inputKindStdin { + optionsClone.Stdin = nil + } + + // Allow certain properties to be overridden + if len(resolveResult.JSXFactory) > 0 { + optionsClone.JSX.Factory = config.JSXExpr{Parts: resolveResult.JSXFactory} + } + if len(resolveResult.JSXFragment) > 0 { + optionsClone.JSX.Fragment = config.JSXExpr{Parts: resolveResult.JSXFragment} + } + if resolveResult.UseDefineForClassFieldsTS != config.Unspecified { + optionsClone.UseDefineForClassFields = resolveResult.UseDefineForClassFieldsTS + } + if resolveResult.PreserveUnusedImportsTS { + optionsClone.PreserveUnusedImportsTS = true + } + optionsClone.TSTarget = resolveResult.TSTarget + + // Set the module type preference using node's module type rules + if strings.HasSuffix(path.Text, ".mjs") { + optionsClone.ModuleType = config.ModuleESM + } else if strings.HasSuffix(path.Text, ".cjs") { + optionsClone.ModuleType = config.ModuleCommonJS + } else { + optionsClone.ModuleType = resolveResult.ModuleType + } + + // Enable bundling for injected files so we always do tree shaking. We + // never want to include unnecessary code from injected files since they + // are essentially bundled. However, if we do this we should skip the + // resolving step when we're not bundling. It'd be strange to get + // resolution errors when the top-level bundling controls are disabled. + skipResolve := false + if inject != nil && optionsClone.Mode != config.ModeBundle { + optionsClone.Mode = config.ModeBundle + skipResolve = true + } + + // Special-case pretty-printed paths for data URLs + if path.Namespace == "dataurl" { + if _, ok := resolver.ParseDataURL(path.Text); ok { + prettyPath = path.Text + if len(prettyPath) > 64 { + prettyPath = prettyPath[:64] + "..." + } + prettyPath = fmt.Sprintf("<%s>", prettyPath) + } + } + + var sideEffects graph.SideEffects + if resolveResult.PrimarySideEffectsData != nil { + sideEffects.Kind = graph.NoSideEffects_PackageJSON + sideEffects.Data = resolveResult.PrimarySideEffectsData + } + + go parseFile(parseArgs{ + fs: s.fs, + log: s.log, + res: s.res, + caches: s.caches, + keyPath: path, + prettyPath: prettyPath, + sourceIndex: sourceIndex, + importSource: importSource, + sideEffects: sideEffects, + importPathRange: importPathRange, + pluginData: pluginData, + options: optionsClone, + results: s.resultChannel, + inject: inject, + skipResolve: skipResolve, + uniqueKeyPrefix: s.uniqueKeyPrefix, + }) + + return sourceIndex +} + +func (s *scanner) allocateSourceIndex(path logger.Path, kind cache.SourceIndexKind) uint32 { + // Allocate a source index using the shared source index cache so that + // subsequent builds reuse the same source index and therefore use the + // cached parse results for increased speed. + sourceIndex := s.caches.SourceIndexCache.Get(path, kind) + + // Grow the results array to fit this source index + if newLen := int(sourceIndex) + 1; len(s.results) < newLen { + // Reallocate to a bigger array + if cap(s.results) < newLen { + s.results = append(make([]parseResult, 0, 2*newLen), s.results...) + } + + // Grow in place + s.results = s.results[:newLen] + } + + return sourceIndex +} + +func (s *scanner) preprocessInjectedFiles() { + s.timer.Begin("Preprocess injected files") + defer s.timer.End("Preprocess injected files") + + injectedFiles := make([]config.InjectedFile, 0, len(s.options.InjectedDefines)+len(s.options.InjectAbsPaths)) + duplicateInjectedFiles := make(map[string]bool) + injectWaitGroup := sync.WaitGroup{} + + // These are virtual paths that are generated for compound "--define" values. + // They are special-cased and are not available for plugins to intercept. + for _, define := range s.options.InjectedDefines { + // These should be unique by construction so no need to check for collisions + visitedKey := logger.Path{Text: fmt.Sprintf("", define.Name)} + sourceIndex := s.allocateSourceIndex(visitedKey, cache.SourceIndexNormal) + s.visited[visitedKey] = sourceIndex + source := logger.Source{ + Index: sourceIndex, + KeyPath: visitedKey, + PrettyPath: s.res.PrettyPath(visitedKey), + IdentifierName: js_ast.EnsureValidIdentifier(visitedKey.Text), + } + + // The first "len(InjectedDefine)" injected files intentionally line up + // with the injected defines by index. The index will be used to import + // references to them in the parser. + injectedFiles = append(injectedFiles, config.InjectedFile{ + Source: source, + DefineName: define.Name, + }) + + // Generate the file inline here since it has already been parsed + expr := js_ast.Expr{Data: define.Data} + ast := js_parser.LazyExportAST(s.log, source, js_parser.OptionsFromConfig(&s.options), expr, "") + result := parseResult{ + ok: true, + file: scannerFile{ + inputFile: graph.InputFile{ + Source: source, + Repr: &graph.JSRepr{AST: ast}, + Loader: config.LoaderJSON, + SideEffects: graph.SideEffects{ + Kind: graph.NoSideEffects_PureData, + }, + }, + }, + } + + // Append to the channel on a goroutine in case it blocks due to capacity + s.remaining++ + go func() { s.resultChannel <- result }() + } + + results := make([]config.InjectedFile, len(s.options.InjectAbsPaths)) + j := 0 + for _, absPath := range s.options.InjectAbsPaths { + prettyPath := s.res.PrettyPath(logger.Path{Text: absPath, Namespace: "file"}) + absPathKey := canonicalFileSystemPathForWindows(absPath) + + if duplicateInjectedFiles[absPathKey] { + s.log.AddError(nil, logger.Loc{}, fmt.Sprintf("Duplicate injected file %q", prettyPath)) + continue + } + + duplicateInjectedFiles[absPathKey] = true + resolveResult := s.res.ResolveAbs(absPath) + + if resolveResult == nil { + s.log.AddError(nil, logger.Loc{}, fmt.Sprintf("Could not resolve %q", prettyPath)) + continue + } + + channel := make(chan config.InjectedFile) + s.maybeParseFile(*resolveResult, prettyPath, nil, logger.Range{}, nil, inputKindNormal, channel) + + // Wait for the results in parallel. The results slice is large enough so + // it is not reallocated during the computations. + injectWaitGroup.Add(1) + go func(i int) { + results[i] = <-channel + injectWaitGroup.Done() + }(j) + j++ + } + + injectWaitGroup.Wait() + injectedFiles = append(injectedFiles, results[:j]...) + + s.options.InjectedFiles = injectedFiles +} + +func (s *scanner) addEntryPoints(entryPoints []EntryPoint) []graph.EntryPoint { + s.timer.Begin("Add entry points") + defer s.timer.End("Add entry points") + + // Reserve a slot for each entry point + entryMetas := make([]graph.EntryPoint, 0, len(entryPoints)+1) + + // Treat stdin as an extra entry point + if stdin := s.options.Stdin; stdin != nil { + stdinPath := logger.Path{Text: ""} + if stdin.SourceFile != "" { + if stdin.AbsResolveDir == "" { + stdinPath = logger.Path{Text: stdin.SourceFile} + } else if s.fs.IsAbs(stdin.SourceFile) { + stdinPath = logger.Path{Text: stdin.SourceFile, Namespace: "file"} + } else { + stdinPath = logger.Path{Text: s.fs.Join(stdin.AbsResolveDir, stdin.SourceFile), Namespace: "file"} + } + } + resolveResult := resolver.ResolveResult{PathPair: resolver.PathPair{Primary: stdinPath}} + sourceIndex := s.maybeParseFile(resolveResult, s.res.PrettyPath(stdinPath), nil, logger.Range{}, nil, inputKindStdin, nil) + entryMetas = append(entryMetas, graph.EntryPoint{ + OutputPath: "stdin", + SourceIndex: sourceIndex, + }) + } + + // Check each entry point ahead of time to see if it's a real file + entryPointAbsResolveDir := s.fs.Cwd() + for i := range entryPoints { + entryPoint := &entryPoints[i] + absPath := entryPoint.InputPath + if !s.fs.IsAbs(absPath) { + absPath = s.fs.Join(entryPointAbsResolveDir, absPath) + } + dir := s.fs.Dir(absPath) + base := s.fs.Base(absPath) + if entries, err, originalError := s.fs.ReadDirectory(dir); err == nil { + if entry, _ := entries.Get(base); entry != nil && entry.Kind(s.fs) == fs.FileEntry { + entryPoint.IsFile = true + + // Entry point paths without a leading "./" are interpreted as package + // paths. This happens because they go through general path resolution + // like all other import paths so that plugins can run on them. Requiring + // a leading "./" for a relative path simplifies writing plugins because + // entry points aren't a special case. + // + // However, requiring a leading "./" also breaks backward compatibility + // and makes working with the CLI more difficult. So attempt to insert + // "./" automatically when needed. We don't want to unconditionally insert + // a leading "./" because the path may not be a file system path. For + // example, it may be a URL. So only insert a leading "./" when the path + // is an exact match for an existing file. + if !s.fs.IsAbs(entryPoint.InputPath) && resolver.IsPackagePath(entryPoint.InputPath) { + entryPoint.InputPath = "./" + entryPoint.InputPath + } + } + } else if s.log.Level <= logger.LevelDebug && originalError != nil { + s.log.AddDebug(nil, logger.Loc{}, fmt.Sprintf("Failed to read directory %q: %s", absPath, originalError.Error())) + } + } + + // Add any remaining entry points. Run resolver plugins on these entry points + // so plugins can alter where they resolve to. These are run in parallel in + // case any of these plugins block. + entryPointResolveResults := make([]*resolver.ResolveResult, len(entryPoints)) + entryPointWaitGroup := sync.WaitGroup{} + entryPointWaitGroup.Add(len(entryPoints)) + for i, entryPoint := range entryPoints { + go func(i int, entryPoint EntryPoint) { + namespace := "" + if entryPoint.IsFile { + namespace = "file" + } + + // Run the resolver and log an error if the path couldn't be resolved + resolveResult, didLogError, debug := runOnResolvePlugins( + s.options.Plugins, + s.res, + s.log, + s.fs, + &s.caches.FSCache, + nil, + logger.Range{}, + namespace, + entryPoint.InputPath, + ast.ImportEntryPoint, + entryPointAbsResolveDir, + nil, + ) + if resolveResult != nil { + if resolveResult.IsExternal { + s.log.AddError(nil, logger.Loc{}, fmt.Sprintf("The entry point %q cannot be marked as external", entryPoint.InputPath)) + } else { + entryPointResolveResults[i] = resolveResult + } + } else if !didLogError { + hint := "" + if !s.fs.IsAbs(entryPoint.InputPath) { + if strings.ContainsRune(entryPoint.InputPath, '*') { + hint = " (glob syntax must be expanded first before passing the paths to esbuild)" + } else if query := s.res.ProbeResolvePackageAsRelative(entryPointAbsResolveDir, entryPoint.InputPath, ast.ImportEntryPoint); query != nil { + hint = fmt.Sprintf(" (use %q to reference the file %q)", "./"+entryPoint.InputPath, s.res.PrettyPath(query.PathPair.Primary)) + } + } + debug.LogErrorMsg(s.log, nil, logger.Range{}, fmt.Sprintf("Could not resolve %q%s", entryPoint.InputPath, hint)) + } + entryPointWaitGroup.Done() + }(i, entryPoint) + } + entryPointWaitGroup.Wait() + + // Parse all entry points that were resolved successfully + for i, resolveResult := range entryPointResolveResults { + if resolveResult != nil { + prettyPath := s.res.PrettyPath(resolveResult.PathPair.Primary) + sourceIndex := s.maybeParseFile(*resolveResult, prettyPath, nil, logger.Range{}, resolveResult.PluginData, inputKindEntryPoint, nil) + outputPath := entryPoints[i].OutputPath + outputPathWasAutoGenerated := false + + // If the output path is missing, automatically generate one from the input path + if outputPath == "" { + outputPath = entryPoints[i].InputPath + windowsVolumeLabel := "" + + // The ":" character is invalid in file paths on Windows except when + // it's used as a volume separator. Special-case that here so volume + // labels don't break on Windows. + if s.fs.IsAbs(outputPath) && len(outputPath) >= 3 && outputPath[1] == ':' { + if c := outputPath[0]; (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') { + if c := outputPath[2]; c == '/' || c == '\\' { + windowsVolumeLabel = outputPath[:3] + outputPath = outputPath[3:] + } + } + } + + // For cross-platform robustness, do not allow characters in the output + // path that are invalid on Windows. This is especially relevant when + // the input path is something other than a file path, such as a URL. + outputPath = sanitizeFilePathForVirtualModulePath(outputPath) + if windowsVolumeLabel != "" { + outputPath = windowsVolumeLabel + outputPath + } + outputPathWasAutoGenerated = true + + // Strip the file extension from the output path if there is one so the + // "out extension" setting is used instead + if last := strings.LastIndexAny(outputPath, "/.\\"); last != -1 && outputPath[last] == '.' { + outputPath = outputPath[:last] + } + } + + entryMetas = append(entryMetas, graph.EntryPoint{ + OutputPath: outputPath, + SourceIndex: sourceIndex, + OutputPathWasAutoGenerated: outputPathWasAutoGenerated, + }) + } + } + + // Turn all automatically-generated output paths into absolute paths + for i := range entryMetas { + entryPoint := &entryMetas[i] + if entryPoint.OutputPathWasAutoGenerated && !s.fs.IsAbs(entryPoint.OutputPath) { + entryPoint.OutputPath = s.fs.Join(entryPointAbsResolveDir, entryPoint.OutputPath) + } + } + + // Automatically compute "outbase" if it wasn't provided + if s.options.AbsOutputBase == "" { + s.options.AbsOutputBase = lowestCommonAncestorDirectory(s.fs, entryMetas) + if s.options.AbsOutputBase == "" { + s.options.AbsOutputBase = entryPointAbsResolveDir + } + } + + // Turn all output paths back into relative paths, but this time relative to + // the "outbase" value we computed above + for i := range entryMetas { + entryPoint := &entryMetas[i] + if s.fs.IsAbs(entryPoint.OutputPath) { + if !entryPoint.OutputPathWasAutoGenerated { + // If an explicit absolute output path was specified, use the path + // relative to the "outdir" directory + if relPath, ok := s.fs.Rel(s.options.AbsOutputDir, entryPoint.OutputPath); ok { + entryPoint.OutputPath = relPath + } + } else { + // Otherwise if the absolute output path was derived from the input + // path, use the path relative to the "outbase" directory + if relPath, ok := s.fs.Rel(s.options.AbsOutputBase, entryPoint.OutputPath); ok { + entryPoint.OutputPath = relPath + } + } + } + } + + return entryMetas +} + +func lowestCommonAncestorDirectory(fs fs.FS, entryPoints []graph.EntryPoint) string { + // Ignore any explicitly-specified output paths + absPaths := make([]string, 0, len(entryPoints)) + for _, entryPoint := range entryPoints { + if entryPoint.OutputPathWasAutoGenerated { + absPaths = append(absPaths, entryPoint.OutputPath) + } + } + + if len(absPaths) == 0 { + return "" + } + + lowestAbsDir := fs.Dir(absPaths[0]) + + for _, absPath := range absPaths[1:] { + absDir := fs.Dir(absPath) + lastSlash := 0 + a := 0 + b := 0 + + for { + runeA, widthA := utf8.DecodeRuneInString(absDir[a:]) + runeB, widthB := utf8.DecodeRuneInString(lowestAbsDir[b:]) + boundaryA := widthA == 0 || runeA == '/' || runeA == '\\' + boundaryB := widthB == 0 || runeB == '/' || runeB == '\\' + + if boundaryA && boundaryB { + if widthA == 0 || widthB == 0 { + // Truncate to the smaller path if one path is a prefix of the other + lowestAbsDir = absDir[:a] + break + } else { + // Track the longest common directory so far + lastSlash = a + } + } else if boundaryA != boundaryB || unicode.ToLower(runeA) != unicode.ToLower(runeB) { + // If both paths are different at this point, stop and set the lowest so + // far to the common parent directory. Compare using a case-insensitive + // comparison to handle paths on Windows. + lowestAbsDir = absDir[:lastSlash] + break + } + + a += widthA + b += widthB + } + } + + return lowestAbsDir +} + +func (s *scanner) scanAllDependencies() { + s.timer.Begin("Scan all dependencies") + defer s.timer.End("Scan all dependencies") + + // Continue scanning until all dependencies have been discovered + for s.remaining > 0 { + result := <-s.resultChannel + s.remaining-- + if !result.ok { + continue + } + + // Don't try to resolve paths if we're not bundling + if s.options.Mode == config.ModeBundle { + records := *result.file.inputFile.Repr.ImportRecords() + for importRecordIndex := range records { + record := &records[importRecordIndex] + + // Skip this import record if the previous resolver call failed + resolveResult := result.resolveResults[importRecordIndex] + if resolveResult == nil { + continue + } + + path := resolveResult.PathPair.Primary + if !resolveResult.IsExternal { + // Handle a path within the bundle + sourceIndex := s.maybeParseFile(*resolveResult, s.res.PrettyPath(path), + &result.file.inputFile.Source, record.Range, resolveResult.PluginData, inputKindNormal, nil) + record.SourceIndex = ast.MakeIndex32(sourceIndex) + } else { + // If the path to the external module is relative to the source + // file, rewrite the path to be relative to the working directory + if path.Namespace == "file" { + if relPath, ok := s.fs.Rel(s.options.AbsOutputDir, path.Text); ok { + // Prevent issues with path separators being different on Windows + relPath = strings.ReplaceAll(relPath, "\\", "/") + if resolver.IsPackagePath(relPath) { + relPath = "./" + relPath + } + record.Path.Text = relPath + } else { + record.Path = path + } + } else { + record.Path = path + } + } + } + } + + s.results[result.file.inputFile.Source.Index] = result + } +} + +func (s *scanner) processScannedFiles() []scannerFile { + s.timer.Begin("Process scanned files") + defer s.timer.End("Process scanned files") + + // Now that all files have been scanned, process the final file import records + for i, result := range s.results { + if !result.ok { + continue + } + + sb := strings.Builder{} + isFirstImport := true + + // Begin the metadata chunk + if s.options.NeedsMetafile { + sb.Write(js_printer.QuoteForJSON(result.file.inputFile.Source.PrettyPath, s.options.ASCIIOnly)) + sb.WriteString(fmt.Sprintf(": {\n \"bytes\": %d,\n \"imports\": [", len(result.file.inputFile.Source.Contents))) + } + + // Don't try to resolve paths if we're not bundling + if s.options.Mode == config.ModeBundle { + records := *result.file.inputFile.Repr.ImportRecords() + tracker := logger.MakeLineColumnTracker(&result.file.inputFile.Source) + + for importRecordIndex := range records { + record := &records[importRecordIndex] + + // Skip this import record if the previous resolver call failed + resolveResult := result.resolveResults[importRecordIndex] + if resolveResult == nil || !record.SourceIndex.IsValid() { + continue + } + + // Now that all files have been scanned, look for packages that are imported + // both with "import" and "require". Rewrite any imports that reference the + // "module" package.json field to the "main" package.json field instead. + // + // This attempts to automatically avoid the "dual package hazard" where a + // package has both a CommonJS module version and an ECMAScript module + // version and exports a non-object in CommonJS (often a function). If we + // pick the "module" field and the package is imported with "require" then + // code expecting a function will crash. + if resolveResult.PathPair.HasSecondary() { + secondaryKey := resolveResult.PathPair.Secondary + if secondaryKey.Namespace == "file" { + secondaryKey.Text = canonicalFileSystemPathForWindows(secondaryKey.Text) + } + if secondarySourceIndex, ok := s.visited[secondaryKey]; ok { + record.SourceIndex = ast.MakeIndex32(secondarySourceIndex) + } + } + + // Generate metadata about each import + if s.options.NeedsMetafile { + if isFirstImport { + isFirstImport = false + sb.WriteString("\n ") + } else { + sb.WriteString(",\n ") + } + sb.WriteString(fmt.Sprintf("{\n \"path\": %s,\n \"kind\": %s\n }", + js_printer.QuoteForJSON(s.results[record.SourceIndex.GetIndex()].file.inputFile.Source.PrettyPath, s.options.ASCIIOnly), + js_printer.QuoteForJSON(record.Kind.StringForMetafile(), s.options.ASCIIOnly))) + } + + switch record.Kind { + case ast.ImportAt, ast.ImportAtConditional: + // Using a JavaScript file with CSS "@import" is not allowed + otherFile := &s.results[record.SourceIndex.GetIndex()].file + if _, ok := otherFile.inputFile.Repr.(*graph.JSRepr); ok { + s.log.AddRangeError(&tracker, record.Range, + fmt.Sprintf("Cannot import %q into a CSS file", otherFile.inputFile.Source.PrettyPath)) + } else if record.Kind == ast.ImportAtConditional { + s.log.AddRangeError(&tracker, record.Range, + "Bundling with conditional \"@import\" rules is not currently supported") + } + + case ast.ImportURL: + // Using a JavaScript or CSS file with CSS "url()" is not allowed + otherFile := &s.results[record.SourceIndex.GetIndex()].file + switch otherRepr := otherFile.inputFile.Repr.(type) { + case *graph.CSSRepr: + s.log.AddRangeError(&tracker, record.Range, + fmt.Sprintf("Cannot use %q as a URL", otherFile.inputFile.Source.PrettyPath)) + + case *graph.JSRepr: + if otherRepr.AST.URLForCSS == "" { + s.log.AddRangeError(&tracker, record.Range, + fmt.Sprintf("Cannot use %q as a URL", otherFile.inputFile.Source.PrettyPath)) + } + } + } + + // If an import from a JavaScript file targets a CSS file, generate a + // JavaScript stub to ensure that JavaScript files only ever import + // other JavaScript files. + if _, ok := result.file.inputFile.Repr.(*graph.JSRepr); ok { + otherFile := &s.results[record.SourceIndex.GetIndex()].file + if css, ok := otherFile.inputFile.Repr.(*graph.CSSRepr); ok { + if s.options.WriteToStdout { + s.log.AddRangeError(&tracker, record.Range, + fmt.Sprintf("Cannot import %q into a JavaScript file without an output path configured", otherFile.inputFile.Source.PrettyPath)) + } else if !css.JSSourceIndex.IsValid() { + stubKey := otherFile.inputFile.Source.KeyPath + if stubKey.Namespace == "file" { + stubKey.Text = canonicalFileSystemPathForWindows(stubKey.Text) + } + sourceIndex := s.allocateSourceIndex(stubKey, cache.SourceIndexJSStubForCSS) + source := logger.Source{ + Index: sourceIndex, + PrettyPath: otherFile.inputFile.Source.PrettyPath, + } + s.results[sourceIndex] = parseResult{ + file: scannerFile{ + inputFile: graph.InputFile{ + Source: source, + Repr: &graph.JSRepr{ + AST: js_parser.LazyExportAST(s.log, source, + js_parser.OptionsFromConfig(&s.options), js_ast.Expr{Data: &js_ast.EObject{}}, ""), + CSSSourceIndex: ast.MakeIndex32(record.SourceIndex.GetIndex()), + }, + }, + }, + ok: true, + } + css.JSSourceIndex = ast.MakeIndex32(sourceIndex) + } + record.SourceIndex = css.JSSourceIndex + if !css.JSSourceIndex.IsValid() { + continue + } + } + } + + // Warn about this import if it's a bare import statement without any + // imported names (i.e. a side-effect-only import) and the module has + // been marked as having no side effects. + // + // Except don't do this if this file is inside "node_modules" since + // it's a bug in the package and the user won't be able to do anything + // about it. Note that this can result in esbuild silently generating + // broken code. If this actually happens for people, it's probably worth + // re-enabling the warning about code inside "node_modules". + if record.WasOriginallyBareImport && !s.options.IgnoreDCEAnnotations && + !helpers.IsInsideNodeModules(result.file.inputFile.Source.KeyPath.Text) { + if otherModule := &s.results[record.SourceIndex.GetIndex()].file.inputFile; otherModule.SideEffects.Kind != graph.HasSideEffects && + // Do not warn if this is from a plugin, since removing the import + // would cause the plugin to not run, and running a plugin is a side + // effect. + otherModule.SideEffects.Kind != graph.NoSideEffects_PureData_FromPlugin { + var notes []logger.MsgData + var by string + if data := otherModule.SideEffects.Data; data != nil { + if data.PluginName != "" { + by = fmt.Sprintf(" by plugin %q", data.PluginName) + } else { + var text string + if data.IsSideEffectsArrayInJSON { + text = "It was excluded from the \"sideEffects\" array in the enclosing \"package.json\" file" + } else { + text = "\"sideEffects\" is false in the enclosing \"package.json\" file" + } + tracker := logger.MakeLineColumnTracker(data.Source) + notes = append(notes, logger.RangeData(&tracker, data.Range, text)) + } + } + s.log.AddRangeWarningWithNotes(&tracker, record.Range, + fmt.Sprintf("Ignoring this import because %q was marked as having no side effects%s", + otherModule.Source.PrettyPath, by), notes) + } + } + } + } + + // End the metadata chunk + if s.options.NeedsMetafile { + if !isFirstImport { + sb.WriteString("\n ") + } + sb.WriteString("]\n }") + } + + result.file.jsonMetadataChunk = sb.String() + + // If this file is from the "file" loader, generate an additional file + if result.file.inputFile.UniqueKeyForFileLoader != "" { + bytes := []byte(result.file.inputFile.Source.Contents) + + // Add a hash to the file name to prevent multiple files with the same name + // but different contents from colliding + var hash string + if config.HasPlaceholder(s.options.AssetPathTemplate, config.HashPlaceholder) { + h := xxhash.New() + h.Write(bytes) + hash = hashForFileName(h.Sum(nil)) + } + + // Generate the input for the template + _, _, originalExt := logger.PlatformIndependentPathDirBaseExt(result.file.inputFile.Source.KeyPath.Text) + dir, base, ext := pathRelativeToOutbase( + &result.file.inputFile, + &s.options, + s.fs, + originalExt, + /* avoidIndex */ false, + /* customFilePath */ "", + ) + + // Apply the asset path template + relPath := config.TemplateToString(config.SubstituteTemplate(s.options.AssetPathTemplate, config.PathPlaceholders{ + Dir: &dir, + Name: &base, + Hash: &hash, + })) + ext + + // Optionally add metadata about the file + var jsonMetadataChunk string + if s.options.NeedsMetafile { + inputs := fmt.Sprintf("{\n %s: {\n \"bytesInOutput\": %d\n }\n }", + js_printer.QuoteForJSON(result.file.inputFile.Source.PrettyPath, s.options.ASCIIOnly), + len(bytes), + ) + jsonMetadataChunk = fmt.Sprintf( + "{\n \"imports\": [],\n \"exports\": [],\n \"inputs\": %s,\n \"bytes\": %d\n }", + inputs, + len(bytes), + ) + } + + // Generate the additional file to copy into the output directory + result.file.inputFile.AdditionalFiles = []graph.OutputFile{{ + AbsPath: s.fs.Join(s.options.AbsOutputDir, relPath), + Contents: bytes, + JSONMetadataChunk: jsonMetadataChunk, + }} + } + + s.results[i] = result + } + + // The linker operates on an array of files, so construct that now. This + // can't be constructed earlier because we generate new parse results for + // JavaScript stub files for CSS imports above. + files := make([]scannerFile, len(s.results)) + for sourceIndex := range s.results { + if result := &s.results[sourceIndex]; result.ok { + s.validateTLA(uint32(sourceIndex)) + files[sourceIndex] = result.file + } + } + return files +} + +func (s *scanner) validateTLA(sourceIndex uint32) tlaCheck { + result := &s.results[sourceIndex] + + if result.ok && result.tlaCheck.depth == 0 { + if repr, ok := result.file.inputFile.Repr.(*graph.JSRepr); ok { + result.tlaCheck.depth = 1 + if repr.AST.TopLevelAwaitKeyword.Len > 0 { + result.tlaCheck.parent = ast.MakeIndex32(sourceIndex) + } + + for importRecordIndex, record := range repr.AST.ImportRecords { + if record.SourceIndex.IsValid() && (record.Kind == ast.ImportRequire || record.Kind == ast.ImportStmt) { + parent := s.validateTLA(record.SourceIndex.GetIndex()) + if !parent.parent.IsValid() { + continue + } + + // Follow any import chains + if record.Kind == ast.ImportStmt && (!result.tlaCheck.parent.IsValid() || parent.depth < result.tlaCheck.depth) { + result.tlaCheck.depth = parent.depth + 1 + result.tlaCheck.parent = record.SourceIndex + result.tlaCheck.importRecordIndex = uint32(importRecordIndex) + continue + } + + // Require of a top-level await chain is forbidden + if record.Kind == ast.ImportRequire { + var notes []logger.MsgData + var tlaPrettyPath string + otherSourceIndex := record.SourceIndex.GetIndex() + + // Build up a chain of relevant notes for all of the imports + for { + parentResult := &s.results[otherSourceIndex] + parentRepr := parentResult.file.inputFile.Repr.(*graph.JSRepr) + + if parentRepr.AST.TopLevelAwaitKeyword.Len > 0 { + tlaPrettyPath = parentResult.file.inputFile.Source.PrettyPath + tracker := logger.MakeLineColumnTracker(&parentResult.file.inputFile.Source) + notes = append(notes, logger.RangeData(&tracker, parentRepr.AST.TopLevelAwaitKeyword, + fmt.Sprintf("The top-level await in %q is here", tlaPrettyPath))) + break + } + + if !parentResult.tlaCheck.parent.IsValid() { + notes = append(notes, logger.MsgData{Text: "unexpected invalid index"}) + break + } + + otherSourceIndex = parentResult.tlaCheck.parent.GetIndex() + + tracker := logger.MakeLineColumnTracker(&parentResult.file.inputFile.Source) + notes = append(notes, logger.RangeData(&tracker, + parentRepr.AST.ImportRecords[parent.importRecordIndex].Range, + fmt.Sprintf("The file %q imports the file %q here", + parentResult.file.inputFile.Source.PrettyPath, s.results[otherSourceIndex].file.inputFile.Source.PrettyPath))) + } + + var text string + importedPrettyPath := s.results[record.SourceIndex.GetIndex()].file.inputFile.Source.PrettyPath + + if importedPrettyPath == tlaPrettyPath { + text = fmt.Sprintf("This require call is not allowed because the imported file %q contains a top-level await", + importedPrettyPath) + } else { + text = fmt.Sprintf("This require call is not allowed because the transitive dependency %q contains a top-level await", + tlaPrettyPath) + } + + tracker := logger.MakeLineColumnTracker(&result.file.inputFile.Source) + s.log.AddRangeErrorWithNotes(&tracker, record.Range, text, notes) + } + } + } + + // Make sure that if we wrap this module in a closure, the closure is also + // async. This happens when you call "import()" on this module and code + // splitting is off. + if result.tlaCheck.parent.IsValid() { + repr.Meta.IsAsyncOrHasAsyncDependency = true + } + } + } + + return result.tlaCheck +} + +func DefaultExtensionToLoaderMap() map[string]config.Loader { + return map[string]config.Loader{ + ".js": config.LoaderJS, + ".mjs": config.LoaderJS, + ".cjs": config.LoaderJS, + ".jsx": config.LoaderJSX, + ".ts": config.LoaderTS, + ".tsx": config.LoaderTSX, + ".css": config.LoaderCSS, + ".json": config.LoaderJSON, + ".txt": config.LoaderText, + } +} + +func applyOptionDefaults(options *config.Options) { + if options.ExtensionToLoader == nil { + options.ExtensionToLoader = DefaultExtensionToLoaderMap() + } + if options.OutputExtensionJS == "" { + options.OutputExtensionJS = ".js" + } + if options.OutputExtensionCSS == "" { + options.OutputExtensionCSS = ".css" + } + + // Configure default path templates + if len(options.EntryPathTemplate) == 0 { + options.EntryPathTemplate = []config.PathTemplate{ + {Data: "./", Placeholder: config.DirPlaceholder}, + {Data: "/", Placeholder: config.NamePlaceholder}, + } + } + if len(options.ChunkPathTemplate) == 0 { + options.ChunkPathTemplate = []config.PathTemplate{ + {Data: "./", Placeholder: config.NamePlaceholder}, + {Data: "-", Placeholder: config.HashPlaceholder}, + } + } + if len(options.AssetPathTemplate) == 0 { + options.AssetPathTemplate = []config.PathTemplate{ + {Data: "./", Placeholder: config.NamePlaceholder}, + {Data: "-", Placeholder: config.HashPlaceholder}, + } + } + + options.ProfilerNames = !options.MinifyIdentifiers +} + +func (b *Bundle) Compile(log logger.Log, options config.Options, timer *helpers.Timer) ([]graph.OutputFile, string) { + timer.Begin("Compile phase") + defer timer.End("Compile phase") + + applyOptionDefaults(&options) + + // The format can't be "preserve" while bundling + if options.Mode == config.ModeBundle && options.OutputFormat == config.FormatPreserve { + options.OutputFormat = config.FormatESModule + } + + files := make([]graph.InputFile, len(b.files)) + for i, file := range b.files { + files[i] = file.inputFile + } + + // Get the base path from the options or choose the lowest common ancestor of all entry points + allReachableFiles := findReachableFiles(files, b.entryPoints) + + // Compute source map data in parallel with linking + timer.Begin("Spawn source map tasks") + dataForSourceMaps := b.computeDataForSourceMapsInParallel(&options, allReachableFiles) + timer.End("Spawn source map tasks") + + var resultGroups [][]graph.OutputFile + if options.CodeSplitting || len(b.entryPoints) == 1 { + // If code splitting is enabled or if there's only one entry point, link all entry points together + resultGroups = [][]graph.OutputFile{link( + &options, timer, log, b.fs, b.res, files, b.entryPoints, b.uniqueKeyPrefix, allReachableFiles, dataForSourceMaps)} + } else { + // Otherwise, link each entry point with the runtime file separately + waitGroup := sync.WaitGroup{} + resultGroups = make([][]graph.OutputFile, len(b.entryPoints)) + for i, entryPoint := range b.entryPoints { + waitGroup.Add(1) + go func(i int, entryPoint graph.EntryPoint) { + entryPoints := []graph.EntryPoint{entryPoint} + forked := timer.Fork() + reachableFiles := findReachableFiles(files, entryPoints) + resultGroups[i] = link( + &options, forked, log, b.fs, b.res, files, entryPoints, b.uniqueKeyPrefix, reachableFiles, dataForSourceMaps) + timer.Join(forked) + waitGroup.Done() + }(i, entryPoint) + } + waitGroup.Wait() + } + + // Join the results in entry point order for determinism + var outputFiles []graph.OutputFile + for _, group := range resultGroups { + outputFiles = append(outputFiles, group...) + } + + // Also generate the metadata file if necessary + var metafileJSON string + if options.NeedsMetafile { + timer.Begin("Generate metadata JSON") + metafileJSON = b.generateMetadataJSON(outputFiles, allReachableFiles, options.ASCIIOnly) + timer.End("Generate metadata JSON") + } + + if !options.WriteToStdout { + // Make sure an output file never overwrites an input file + if !options.AllowOverwrite { + sourceAbsPaths := make(map[string]uint32) + for _, sourceIndex := range allReachableFiles { + keyPath := b.files[sourceIndex].inputFile.Source.KeyPath + if keyPath.Namespace == "file" { + absPathKey := canonicalFileSystemPathForWindows(keyPath.Text) + sourceAbsPaths[absPathKey] = sourceIndex + } + } + for _, outputFile := range outputFiles { + absPathKey := canonicalFileSystemPathForWindows(outputFile.AbsPath) + if sourceIndex, ok := sourceAbsPaths[absPathKey]; ok { + hint := "" + switch logger.API { + case logger.CLIAPI: + hint = " (use \"--allow-overwrite\" to allow this)" + case logger.JSAPI: + hint = " (use \"allowOverwrite: true\" to allow this)" + case logger.GoAPI: + hint = " (use \"AllowOverwrite: true\" to allow this)" + } + log.AddError(nil, logger.Loc{}, + fmt.Sprintf("Refusing to overwrite input file %q%s", + b.files[sourceIndex].inputFile.Source.PrettyPath, hint)) + } + } + } + + // Make sure an output file never overwrites another output file. This + // is almost certainly unintentional and would otherwise happen silently. + // + // Make an exception for files that have identical contents. In that case + // the duplicate is just silently filtered out. This can happen with the + // "file" loader, for example. + outputFileMap := make(map[string][]byte) + end := 0 + for _, outputFile := range outputFiles { + absPathKey := canonicalFileSystemPathForWindows(outputFile.AbsPath) + contents, ok := outputFileMap[absPathKey] + + // If this isn't a duplicate, keep the output file + if !ok { + outputFileMap[absPathKey] = outputFile.Contents + outputFiles[end] = outputFile + end++ + continue + } + + // If the names and contents are both the same, only keep the first one + if bytes.Equal(contents, outputFile.Contents) { + continue + } + + // Otherwise, generate an error + outputPath := outputFile.AbsPath + if relPath, ok := b.fs.Rel(b.fs.Cwd(), outputPath); ok { + outputPath = relPath + } + log.AddError(nil, logger.Loc{}, "Two output files share the same path but have different contents: "+outputPath) + } + outputFiles = outputFiles[:end] + } + + return outputFiles, metafileJSON +} + +// Find all files reachable from all entry points. This order should be +// deterministic given that the entry point order is deterministic, since the +// returned order is the postorder of the graph traversal and import record +// order within a given file is deterministic. +func findReachableFiles(files []graph.InputFile, entryPoints []graph.EntryPoint) []uint32 { + visited := make(map[uint32]bool) + var order []uint32 + var visit func(uint32) + + // Include this file and all files it imports + visit = func(sourceIndex uint32) { + if !visited[sourceIndex] { + visited[sourceIndex] = true + file := &files[sourceIndex] + if repr, ok := file.Repr.(*graph.JSRepr); ok && repr.CSSSourceIndex.IsValid() { + visit(repr.CSSSourceIndex.GetIndex()) + } + for _, record := range *file.Repr.ImportRecords() { + if record.SourceIndex.IsValid() { + visit(record.SourceIndex.GetIndex()) + } + } + + // Each file must come after its dependencies + order = append(order, sourceIndex) + } + } + + // The runtime is always included in case it's needed + visit(runtime.SourceIndex) + + // Include all files reachable from any entry point + for _, entryPoint := range entryPoints { + visit(entryPoint.SourceIndex) + } + + return order +} + +// This is done in parallel with linking because linking is a mostly serial +// phase and there are extra resources for parallelism. This could also be done +// during parsing but that would slow down parsing and delay the start of the +// linking phase, which then delays the whole bundling process. +// +// However, doing this during parsing would allow it to be cached along with +// the parsed ASTs which would then speed up incremental builds. In the future +// it could be good to optionally have this be computed during the parsing +// phase when incremental builds are active but otherwise still have it be +// computed during linking for optimal speed during non-incremental builds. +func (b *Bundle) computeDataForSourceMapsInParallel(options *config.Options, reachableFiles []uint32) func() []dataForSourceMap { + if options.SourceMap == config.SourceMapNone { + return func() []dataForSourceMap { + return nil + } + } + + var waitGroup sync.WaitGroup + results := make([]dataForSourceMap, len(b.files)) + + for _, sourceIndex := range reachableFiles { + if f := &b.files[sourceIndex]; f.inputFile.Loader.CanHaveSourceMap() { + if repr, ok := f.inputFile.Repr.(*graph.JSRepr); ok { + waitGroup.Add(1) + go func(sourceIndex uint32, f *scannerFile, repr *graph.JSRepr) { + result := &results[sourceIndex] + result.lineOffsetTables = js_printer.GenerateLineOffsetTables(f.inputFile.Source.Contents, repr.AST.ApproximateLineCount) + sm := f.inputFile.InputSourceMap + if !options.ExcludeSourcesContent { + if sm == nil { + // Simple case: no nested source map + result.quotedContents = [][]byte{js_printer.QuoteForJSON(f.inputFile.Source.Contents, options.ASCIIOnly)} + } else { + // Complex case: nested source map + result.quotedContents = make([][]byte, len(sm.Sources)) + nullContents := []byte("null") + for i := range sm.Sources { + // Missing contents become a "null" literal + quotedContents := nullContents + if i < len(sm.SourcesContent) { + if value := sm.SourcesContent[i]; value.Quoted != "" { + if options.ASCIIOnly && !isASCIIOnly(value.Quoted) { + // Re-quote non-ASCII values if output is ASCII-only + quotedContents = js_printer.QuoteForJSON(js_lexer.UTF16ToString(value.Value), options.ASCIIOnly) + } else { + // Otherwise just use the value directly from the input file + quotedContents = []byte(value.Quoted) + } + } + } + result.quotedContents[i] = quotedContents + } + } + } + waitGroup.Done() + }(sourceIndex, f, repr) + } + } + } + + return func() []dataForSourceMap { + waitGroup.Wait() + return results + } +} + +func (b *Bundle) generateMetadataJSON(results []graph.OutputFile, allReachableFiles []uint32, asciiOnly bool) string { + sb := strings.Builder{} + sb.WriteString("{\n \"inputs\": {") + + // Write inputs + isFirst := true + for _, sourceIndex := range allReachableFiles { + if sourceIndex == runtime.SourceIndex { + continue + } + if file := &b.files[sourceIndex]; len(file.jsonMetadataChunk) > 0 { + if isFirst { + isFirst = false + sb.WriteString("\n ") + } else { + sb.WriteString(",\n ") + } + sb.WriteString(file.jsonMetadataChunk) + } + } + + sb.WriteString("\n },\n \"outputs\": {") + + // Write outputs + isFirst = true + paths := make(map[string]bool) + for _, result := range results { + if len(result.JSONMetadataChunk) > 0 { + path := b.res.PrettyPath(logger.Path{Text: result.AbsPath, Namespace: "file"}) + if paths[path] { + // Don't write out the same path twice (can happen with the "file" loader) + continue + } + if isFirst { + isFirst = false + sb.WriteString("\n ") + } else { + sb.WriteString(",\n ") + } + paths[path] = true + sb.WriteString(fmt.Sprintf("%s: ", js_printer.QuoteForJSON(path, asciiOnly))) + sb.WriteString(result.JSONMetadataChunk) + } + } + + sb.WriteString("\n }\n}\n") + return sb.String() +} + +type runtimeCacheKey struct { + MangleSyntax bool + MinifyIdentifiers bool + ES6 bool +} + +type runtimeCache struct { + astMutex sync.Mutex + astMap map[runtimeCacheKey]js_ast.AST +} + +var globalRuntimeCache runtimeCache + +func (cache *runtimeCache) parseRuntime(options *config.Options) (source logger.Source, runtimeAST js_ast.AST, ok bool) { + key := runtimeCacheKey{ + // All configuration options that the runtime code depends on must go here + MangleSyntax: options.MangleSyntax, + MinifyIdentifiers: options.MinifyIdentifiers, + ES6: runtime.CanUseES6(options.UnsupportedJSFeatures), + } + + // Determine which source to use + if key.ES6 { + source = runtime.ES6Source + } else { + source = runtime.ES5Source + } + + // Cache hit? + (func() { + cache.astMutex.Lock() + defer cache.astMutex.Unlock() + if cache.astMap != nil { + runtimeAST, ok = cache.astMap[key] + } + })() + if ok { + return + } + + // Cache miss + var constraint int + if key.ES6 { + constraint = 2015 + } else { + constraint = 5 + } + log := logger.NewDeferLog(logger.DeferLogAll) + runtimeAST, ok = js_parser.Parse(log, source, js_parser.OptionsFromConfig(&config.Options{ + // These configuration options must only depend on the key + MangleSyntax: key.MangleSyntax, + MinifyIdentifiers: key.MinifyIdentifiers, + UnsupportedJSFeatures: compat.UnsupportedJSFeatures( + map[compat.Engine][]int{compat.ES: {constraint}}), + + // Always do tree shaking for the runtime because we never want to + // include unnecessary runtime code + Mode: config.ModeBundle, + })) + if log.HasErrors() { + msgs := "Internal error: failed to parse runtime:\n" + for _, msg := range log.Done() { + msgs += msg.String(logger.OutputOptions{}, logger.TerminalInfo{}) + } + panic(msgs[:len(msgs)-1]) + } + + // Cache for next time + if ok { + cache.astMutex.Lock() + defer cache.astMutex.Unlock() + if cache.astMap == nil { + cache.astMap = make(map[runtimeCacheKey]js_ast.AST) + } + cache.astMap[key] = runtimeAST + } + return +} diff --git a/vendor/github.com/evanw/esbuild/internal/bundler/debug.go b/vendor/github.com/evanw/esbuild/internal/bundler/debug.go new file mode 100644 index 0000000000..731a34fc44 --- /dev/null +++ b/vendor/github.com/evanw/esbuild/internal/bundler/debug.go @@ -0,0 +1,132 @@ +package bundler + +import ( + "fmt" + "strings" + + "github.com/evanw/esbuild/internal/ast" + "github.com/evanw/esbuild/internal/graph" + "github.com/evanw/esbuild/internal/js_ast" + "github.com/evanw/esbuild/internal/js_printer" +) + +// Set this to true and then load the resulting metafile in "graph-debugger.html" +// to debug graph information. +// +// This is deliberately not exposed in the final binary. It is *very* internal +// and only exists to help debug esbuild itself. Make sure this is always set +// back to false before committing. +const debugVerboseMetafile = false + +func (c *linkerContext) generateExtraDataForFileJS(sourceIndex uint32) string { + if !debugVerboseMetafile { + return "" + } + + file := &c.graph.Files[sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) + sb := strings.Builder{} + + quoteSym := func(ref js_ast.Ref) string { + name := fmt.Sprintf("%d:%d [%s]", ref.SourceIndex, ref.InnerIndex, c.graph.Symbols.Get(ref).OriginalName) + return string(js_printer.QuoteForJSON(name, c.options.ASCIIOnly)) + } + + sb.WriteString(`,"parts":[`) + for partIndex, part := range repr.AST.Parts { + if partIndex > 0 { + sb.WriteByte(',') + } + var isFirst bool + code := "" + + sb.WriteString(fmt.Sprintf(`{"isLive":%v`, part.IsLive)) + sb.WriteString(fmt.Sprintf(`,"canBeRemovedIfUnused":%v`, part.CanBeRemovedIfUnused)) + + if partIndex == int(js_ast.NSExportPartIndex) { + sb.WriteString(`,"nsExportPartIndex":true`) + } else if ast.MakeIndex32(uint32(partIndex)) == repr.Meta.WrapperPartIndex { + sb.WriteString(`,"wrapperPartIndex":true`) + } else if len(part.Stmts) > 0 { + start := part.Stmts[0].Loc.Start + end := len(file.InputFile.Source.Contents) + if partIndex+1 < len(repr.AST.Parts) { + if nextStmts := repr.AST.Parts[partIndex+1].Stmts; len(nextStmts) > 0 { + if nextStart := nextStmts[0].Loc.Start; nextStart >= start { + end = int(nextStart) + } + } + } + code = file.InputFile.Source.Contents[start:end] + } + + // importRecords + sb.WriteString(`,"importRecords":[`) + isFirst = true + for _, importRecordIndex := range part.ImportRecordIndices { + record := repr.AST.ImportRecords[importRecordIndex] + if !record.SourceIndex.IsValid() { + continue + } + if isFirst { + isFirst = false + } else { + sb.WriteByte(',') + } + path := c.graph.Files[record.SourceIndex.GetIndex()].InputFile.Source.PrettyPath + sb.WriteString(fmt.Sprintf(`{"source":%s}`, js_printer.QuoteForJSON(path, c.options.ASCIIOnly))) + } + sb.WriteByte(']') + + // declaredSymbols + sb.WriteString(`,"declaredSymbols":[`) + isFirst = true + for _, declSym := range part.DeclaredSymbols { + if !declSym.IsTopLevel { + continue + } + if isFirst { + isFirst = false + } else { + sb.WriteByte(',') + } + sb.WriteString(fmt.Sprintf(`{"name":%s}`, quoteSym(declSym.Ref))) + } + sb.WriteByte(']') + + // symbolUses + sb.WriteString(`,"symbolUses":[`) + isFirst = true + for ref, uses := range part.SymbolUses { + if isFirst { + isFirst = false + } else { + sb.WriteByte(',') + } + sb.WriteString(fmt.Sprintf(`{"name":%s,"countEstimate":%d}`, quoteSym(ref), uses.CountEstimate)) + } + sb.WriteByte(']') + + // dependencies + sb.WriteString(`,"dependencies":[`) + for i, dep := range part.Dependencies { + if i > 0 { + sb.WriteByte(',') + } + sb.WriteString(fmt.Sprintf(`{"source":%s,"partIndex":%d}`, + js_printer.QuoteForJSON(c.graph.Files[dep.SourceIndex].InputFile.Source.PrettyPath, c.options.ASCIIOnly), + dep.PartIndex, + )) + } + sb.WriteByte(']') + + // code + sb.WriteString(`,"code":`) + sb.Write(js_printer.QuoteForJSON(strings.TrimRight(code, "\n"), c.options.ASCIIOnly)) + + sb.WriteByte('}') + } + sb.WriteString(`]`) + + return sb.String() +} diff --git a/vendor/github.com/evanw/esbuild/internal/bundler/linker.go b/vendor/github.com/evanw/esbuild/internal/bundler/linker.go new file mode 100644 index 0000000000..8821a2f562 --- /dev/null +++ b/vendor/github.com/evanw/esbuild/internal/bundler/linker.go @@ -0,0 +1,5427 @@ +package bundler + +import ( + "bytes" + "encoding/base64" + "encoding/binary" + "fmt" + "hash" + "path" + "sort" + "strings" + "sync" + + "github.com/evanw/esbuild/internal/ast" + "github.com/evanw/esbuild/internal/compat" + "github.com/evanw/esbuild/internal/config" + "github.com/evanw/esbuild/internal/css_ast" + "github.com/evanw/esbuild/internal/css_printer" + "github.com/evanw/esbuild/internal/fs" + "github.com/evanw/esbuild/internal/graph" + "github.com/evanw/esbuild/internal/helpers" + "github.com/evanw/esbuild/internal/js_ast" + "github.com/evanw/esbuild/internal/js_lexer" + "github.com/evanw/esbuild/internal/js_printer" + "github.com/evanw/esbuild/internal/logger" + "github.com/evanw/esbuild/internal/renamer" + "github.com/evanw/esbuild/internal/resolver" + "github.com/evanw/esbuild/internal/runtime" + "github.com/evanw/esbuild/internal/sourcemap" + "github.com/evanw/esbuild/internal/xxhash" +) + +type linkerContext struct { + options *config.Options + timer *helpers.Timer + log logger.Log + fs fs.FS + res resolver.Resolver + graph graph.LinkerGraph + + // This helps avoid an infinite loop when matching imports to exports + cycleDetector []importTracker + + // We may need to refer to the CommonJS "module" symbol for exports + unboundModuleRef js_ast.Ref + + // We may need to refer to the "__esm" and/or "__commonJS" runtime symbols + cjsRuntimeRef js_ast.Ref + esmRuntimeRef js_ast.Ref + + // This represents the parallel computation of source map related data. + // Calling this will block until the computation is done. The resulting value + // is shared between threads and must be treated as immutable. + dataForSourceMaps func() []dataForSourceMap + + // This is passed to us from the bundling phase + uniqueKeyPrefix string + uniqueKeyPrefixBytes []byte // This is just "uniqueKeyPrefix" in byte form +} + +type partRange struct { + sourceIndex uint32 + partIndexBegin uint32 + partIndexEnd uint32 +} + +type chunkInfo struct { + // This is a random string and is used to represent the output path of this + // chunk before the final output path has been computed. + uniqueKey string + + filesWithPartsInChunk map[uint32]bool + entryBits helpers.BitSet + + // This information is only useful if "isEntryPoint" is true + isEntryPoint bool + sourceIndex uint32 // An index into "c.sources" + entryPointBit uint // An index into "c.graph.EntryPoints" + + // For code splitting + crossChunkImports []chunkImport + + // This is the representation-specific information + chunkRepr chunkRepr + + // This is the final path of this chunk relative to the output directory, but + // without the substitution of the final hash (since it hasn't been computed). + finalTemplate []config.PathTemplate + + // This is the final path of this chunk relative to the output directory. It + // is the substitution of the final hash into "finalTemplate". + finalRelPath string + + // If non-empty, this chunk needs to generate an external legal comments file. + externalLegalComments []byte + + // When this chunk is initially generated in isolation, the output pieces + // will contain slices of the output with the unique keys of other chunks + // omitted. + intermediateOutput intermediateOutput + + // This contains the hash for just this chunk without including information + // from the hashes of other chunks. Later on in the linking process, the + // final hash for this chunk will be constructed by merging the isolated + // hashes of all transitive dependencies of this chunk. This is separated + // into two phases like this to handle cycles in the chunk import graph. + waitForIsolatedHash func() []byte + + // Other fields relating to the output file for this chunk + jsonMetadataChunkCallback func(finalOutputSize int) helpers.Joiner + outputSourceMap sourcemap.SourceMapPieces + isExecutable bool +} + +type chunkImport struct { + chunkIndex uint32 + importKind ast.ImportKind +} + +type outputPieceIndexKind uint8 + +const ( + outputPieceNone outputPieceIndexKind = iota + outputPieceAssetIndex + outputPieceChunkIndex +) + +// This is a chunk of source code followed by a reference to another chunk. For +// example, the file "@import 'CHUNK0001'; body { color: black; }" would be +// represented by two pieces, one with the data "@import '" and another with the +// data "'; body { color: black; }". The first would have the chunk index 1 and +// the second would have an invalid chunk index. +type outputPiece struct { + data []byte + + // Note: The "kind" may be "outputPieceNone" in which case there is one piece + // with data and no chunk index. For example, the chunk may not contain any + // imports. + index uint32 + kind outputPieceIndexKind +} + +type intermediateOutput struct { + // If the chunk doesn't have any references to other chunks, then "pieces" is + // nil and "joiner" contains the contents of the chunk. This is more efficient + // because it avoids doing a join operation twice. + joiner helpers.Joiner + + // Otherwise, "pieces" contains the contents of the chunk and "joiner" should + // not be used. Another joiner will have to be constructed later when merging + // the pieces together. + pieces []outputPiece +} + +type chunkRepr interface{ isChunk() } + +func (*chunkReprJS) isChunk() {} +func (*chunkReprCSS) isChunk() {} + +type chunkReprJS struct { + filesInChunkInOrder []uint32 + partsInChunkInOrder []partRange + + // For code splitting + crossChunkPrefixStmts []js_ast.Stmt + crossChunkSuffixStmts []js_ast.Stmt + exportsToOtherChunks map[js_ast.Ref]string + importsFromOtherChunks map[uint32]crossChunkImportItemArray +} + +type chunkReprCSS struct { + externalImportsInOrder []externalImportCSS + filesInChunkInOrder []uint32 +} + +type externalImportCSS struct { + path logger.Path + conditions []css_ast.Token +} + +// Returns a log where "log.HasErrors()" only returns true if any errors have +// been logged since this call. This is useful when there have already been +// errors logged by other linkers that share the same log. +func wrappedLog(log logger.Log) logger.Log { + var mutex sync.Mutex + var hasErrors bool + addMsg := log.AddMsg + + log.AddMsg = func(msg logger.Msg) { + if msg.Kind == logger.Error { + mutex.Lock() + defer mutex.Unlock() + hasErrors = true + } + addMsg(msg) + } + + log.HasErrors = func() bool { + mutex.Lock() + defer mutex.Unlock() + return hasErrors + } + + return log +} + +func link( + options *config.Options, + timer *helpers.Timer, + log logger.Log, + fs fs.FS, + res resolver.Resolver, + inputFiles []graph.InputFile, + entryPoints []graph.EntryPoint, + uniqueKeyPrefix string, + reachableFiles []uint32, + dataForSourceMaps func() []dataForSourceMap, +) []graph.OutputFile { + timer.Begin("Link") + defer timer.End("Link") + + log = wrappedLog(log) + + timer.Begin("Clone linker graph") + c := linkerContext{ + options: options, + timer: timer, + log: log, + fs: fs, + res: res, + dataForSourceMaps: dataForSourceMaps, + uniqueKeyPrefix: uniqueKeyPrefix, + uniqueKeyPrefixBytes: []byte(uniqueKeyPrefix), + graph: graph.CloneLinkerGraph( + inputFiles, + reachableFiles, + entryPoints, + options.CodeSplitting, + ), + } + timer.End("Clone linker graph") + + // Use a smaller version of these functions if we don't need profiler names + runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr) + if c.options.ProfilerNames { + c.cjsRuntimeRef = runtimeRepr.AST.NamedExports["__commonJS"].Ref + c.esmRuntimeRef = runtimeRepr.AST.NamedExports["__esm"].Ref + } else { + c.cjsRuntimeRef = runtimeRepr.AST.NamedExports["__commonJSMin"].Ref + c.esmRuntimeRef = runtimeRepr.AST.NamedExports["__esmMin"].Ref + } + + for _, entryPoint := range entryPoints { + if repr, ok := c.graph.Files[entryPoint.SourceIndex].InputFile.Repr.(*graph.JSRepr); ok { + // Loaders default to CommonJS when they are the entry point and the output + // format is not ESM-compatible since that avoids generating the ESM-to-CJS + // machinery. + if repr.AST.HasLazyExport && (c.options.Mode == config.ModePassThrough || + (c.options.Mode == config.ModeConvertFormat && !c.options.OutputFormat.KeepES6ImportExportSyntax())) { + repr.AST.ExportsKind = js_ast.ExportsCommonJS + } + + // Entry points with ES6 exports must generate an exports object when + // targeting non-ES6 formats. Note that the IIFE format only needs this + // when the global name is present, since that's the only way the exports + // can actually be observed externally. + if repr.AST.ExportKeyword.Len > 0 && (options.OutputFormat == config.FormatCommonJS || + (options.OutputFormat == config.FormatIIFE && len(options.GlobalName) > 0)) { + repr.AST.UsesExportsRef = true + repr.Meta.ForceIncludeExportsForEntryPoint = true + } + } + } + + // Allocate a new unbound symbol called "module" in case we need it later + if c.options.OutputFormat == config.FormatCommonJS { + c.unboundModuleRef = c.graph.GenerateNewSymbol(runtime.SourceIndex, js_ast.SymbolUnbound, "module") + } else { + c.unboundModuleRef = js_ast.InvalidRef + } + + c.scanImportsAndExports() + + // Stop now if there were errors + if c.log.HasErrors() { + return []graph.OutputFile{} + } + + c.treeShakingAndCodeSplitting() + + if c.options.Mode == config.ModePassThrough { + for _, entryPoint := range c.graph.EntryPoints() { + c.preventExportsFromBeingRenamed(entryPoint.SourceIndex) + } + } + + chunks := c.computeChunks() + c.computeCrossChunkDependencies(chunks) + + // Make sure calls to "js_ast.FollowSymbols()" in parallel goroutines after this + // won't hit concurrent map mutation hazards + js_ast.FollowAllSymbols(c.graph.Symbols) + + return c.generateChunksInParallel(chunks) +} + +// Currently the automatic chunk generation algorithm should by construction +// never generate chunks that import each other since files are allocated to +// chunks based on which entry points they are reachable from. +// +// This will change in the future when we allow manual chunk labels. But before +// we allow manual chunk labels, we'll need to rework module initialization to +// allow code splitting chunks to be lazily-initialized. +// +// Since that work hasn't been finished yet, cycles in the chunk import graph +// can cause initialization bugs. So let's forbid these cycles for now to guard +// against code splitting bugs that could cause us to generate buggy chunks. +func (c *linkerContext) enforceNoCyclicChunkImports(chunks []chunkInfo) { + var validate func(int, []int) + validate = func(chunkIndex int, path []int) { + for _, otherChunkIndex := range path { + if chunkIndex == otherChunkIndex { + c.log.AddError(nil, logger.Loc{}, "Internal error: generated chunks contain a circular import") + return + } + } + path = append(path, chunkIndex) + for _, chunkImport := range chunks[chunkIndex].crossChunkImports { + // Ignore cycles caused by dynamic "import()" expressions. These are fine + // because they don't necessarily cause initialization order issues and + // they don't indicate a bug in our chunk generation algorithm. They arise + // normally in real code (e.g. two files that import each other). + if chunkImport.importKind != ast.ImportDynamic { + validate(int(chunkImport.chunkIndex), path) + } + } + } + path := make([]int, 0, len(chunks)) + for i := range chunks { + validate(i, path) + } +} + +func (c *linkerContext) generateChunksInParallel(chunks []chunkInfo) []graph.OutputFile { + c.timer.Begin("Generate chunks") + defer c.timer.End("Generate chunks") + + // Generate each chunk on a separate goroutine + generateWaitGroup := sync.WaitGroup{} + generateWaitGroup.Add(len(chunks)) + for chunkIndex := range chunks { + switch chunks[chunkIndex].chunkRepr.(type) { + case *chunkReprJS: + go c.generateChunkJS(chunks, chunkIndex, &generateWaitGroup) + case *chunkReprCSS: + go c.generateChunkCSS(chunks, chunkIndex, &generateWaitGroup) + } + } + c.enforceNoCyclicChunkImports(chunks) + generateWaitGroup.Wait() + + // Compute the final hashes of each chunk. This can technically be done in + // parallel but it probably doesn't matter so much because we're not hashing + // that much data. + visited := make([]uint32, len(chunks)) + var finalBytes []byte + for chunkIndex := range chunks { + chunk := &chunks[chunkIndex] + var hashSubstitution *string + + // Only wait for the hash if necessary + if config.HasPlaceholder(chunk.finalTemplate, config.HashPlaceholder) { + // Compute the final hash using the isolated hashes of the dependencies + hash := xxhash.New() + appendIsolatedHashesForImportedChunks(hash, chunks, uint32(chunkIndex), visited, ^uint32(chunkIndex)) + finalBytes = hash.Sum(finalBytes[:0]) + finalString := hashForFileName(finalBytes) + hashSubstitution = &finalString + } + + // Render the last remaining placeholder in the template + chunk.finalRelPath = config.TemplateToString(config.SubstituteTemplate(chunk.finalTemplate, config.PathPlaceholders{ + Hash: hashSubstitution, + })) + } + + // Generate the final output files by joining file pieces together + c.timer.Begin("Generate final output files") + var resultsWaitGroup sync.WaitGroup + results := make([][]graph.OutputFile, len(chunks)) + resultsWaitGroup.Add(len(chunks)) + for chunkIndex, chunk := range chunks { + go func(chunkIndex int, chunk chunkInfo) { + var outputFiles []graph.OutputFile + + // Each file may optionally contain additional files to be copied to the + // output directory. This is used by the "file" loader. + switch chunkRepr := chunk.chunkRepr.(type) { + case *chunkReprJS: + for _, sourceIndex := range chunkRepr.filesInChunkInOrder { + outputFiles = append(outputFiles, c.graph.Files[sourceIndex].InputFile.AdditionalFiles...) + } + + case *chunkReprCSS: + for _, sourceIndex := range chunkRepr.filesInChunkInOrder { + outputFiles = append(outputFiles, c.graph.Files[sourceIndex].InputFile.AdditionalFiles...) + } + } + + // Path substitution for the chunk itself + finalRelDir := c.fs.Dir(chunk.finalRelPath) + outputContentsJoiner, outputSourceMapShifts := c.substituteFinalPaths(chunks, chunk.intermediateOutput, + func(finalRelPathForImport string) string { + return c.pathBetweenChunks(finalRelDir, finalRelPathForImport) + }) + + // Generate the optional legal comments file for this chunk + if chunk.externalLegalComments != nil { + finalRelPathForLegalComments := chunk.finalRelPath + ".LEGAL.txt" + + // Link the file to the legal comments + if c.options.LegalComments == config.LegalCommentsLinkedWithComment { + importPath := c.pathBetweenChunks(finalRelDir, finalRelPathForLegalComments) + importPath = strings.TrimPrefix(importPath, "./") + outputContentsJoiner.EnsureNewlineAtEnd() + outputContentsJoiner.AddString("/*! For license information please see ") + outputContentsJoiner.AddString(importPath) + outputContentsJoiner.AddString(" */\n") + } + + // Write the external legal comments file + outputFiles = append(outputFiles, graph.OutputFile{ + AbsPath: c.fs.Join(c.options.AbsOutputDir, finalRelPathForLegalComments), + Contents: chunk.externalLegalComments, + JSONMetadataChunk: fmt.Sprintf( + "{\n \"imports\": [],\n \"exports\": [],\n \"inputs\": {},\n \"bytes\": %d\n }", len(chunk.externalLegalComments)), + }) + } + + // Generate the optional source map for this chunk + if c.options.SourceMap != config.SourceMapNone && chunk.outputSourceMap.HasContent() { + outputSourceMap := chunk.outputSourceMap.Finalize(outputSourceMapShifts) + finalRelPathForSourceMap := chunk.finalRelPath + ".map" + + // Potentially write a trailing source map comment + switch c.options.SourceMap { + case config.SourceMapLinkedWithComment: + importPath := c.pathBetweenChunks(finalRelDir, finalRelPathForSourceMap) + importPath = strings.TrimPrefix(importPath, "./") + outputContentsJoiner.EnsureNewlineAtEnd() + outputContentsJoiner.AddString("//# sourceMappingURL=") + outputContentsJoiner.AddString(importPath) + outputContentsJoiner.AddString("\n") + + case config.SourceMapInline, config.SourceMapInlineAndExternal: + outputContentsJoiner.EnsureNewlineAtEnd() + outputContentsJoiner.AddString("//# sourceMappingURL=data:application/json;base64,") + outputContentsJoiner.AddString(base64.StdEncoding.EncodeToString(outputSourceMap)) + outputContentsJoiner.AddString("\n") + } + + // Potentially write the external source map file + switch c.options.SourceMap { + case config.SourceMapLinkedWithComment, config.SourceMapInlineAndExternal, config.SourceMapExternalWithoutComment: + outputFiles = append(outputFiles, graph.OutputFile{ + AbsPath: c.fs.Join(c.options.AbsOutputDir, finalRelPathForSourceMap), + Contents: outputSourceMap, + JSONMetadataChunk: fmt.Sprintf( + "{\n \"imports\": [],\n \"exports\": [],\n \"inputs\": {},\n \"bytes\": %d\n }", len(outputSourceMap)), + }) + } + } + + // Finalize the output contents + outputContents := outputContentsJoiner.Done() + + // Path substitution for the JSON metadata + var jsonMetadataChunk string + if c.options.NeedsMetafile { + jsonMetadataChunkPieces := c.breakOutputIntoPieces(chunk.jsonMetadataChunkCallback(len(outputContents)), uint32(len(chunks))) + jsonMetadataChunkBytes, _ := c.substituteFinalPaths(chunks, jsonMetadataChunkPieces, func(finalRelPathForImport string) string { + return c.res.PrettyPath(logger.Path{Text: c.fs.Join(c.options.AbsOutputDir, finalRelPathForImport), Namespace: "file"}) + }) + jsonMetadataChunk = string(jsonMetadataChunkBytes.Done()) + } + + // Generate the output file for this chunk + outputFiles = append(outputFiles, graph.OutputFile{ + AbsPath: c.fs.Join(c.options.AbsOutputDir, chunk.finalRelPath), + Contents: outputContents, + JSONMetadataChunk: jsonMetadataChunk, + IsExecutable: chunk.isExecutable, + }) + + results[chunkIndex] = outputFiles + resultsWaitGroup.Done() + }(chunkIndex, chunk) + } + resultsWaitGroup.Wait() + c.timer.End("Generate final output files") + + // Merge the output files from the different goroutines together in order + outputFilesLen := 0 + for _, result := range results { + outputFilesLen += len(result) + } + outputFiles := make([]graph.OutputFile, 0, outputFilesLen) + for _, result := range results { + outputFiles = append(outputFiles, result...) + } + return outputFiles +} + +// Given a set of output pieces (i.e. a buffer already divided into the spans +// between import paths), substitute the final import paths in and then join +// everything into a single byte buffer. +func (c *linkerContext) substituteFinalPaths( + chunks []chunkInfo, + intermediateOutput intermediateOutput, + modifyPath func(string) string, +) (j helpers.Joiner, shifts []sourcemap.SourceMapShift) { + // Optimization: If there can be no substitutions, just reuse the initial + // joiner that was used when generating the intermediate chunk output + // instead of creating another one and copying the whole file into it. + if intermediateOutput.pieces == nil { + return intermediateOutput.joiner, []sourcemap.SourceMapShift{{}} + } + + var shift sourcemap.SourceMapShift + shifts = make([]sourcemap.SourceMapShift, 0, len(intermediateOutput.pieces)) + shifts = append(shifts, shift) + + for _, piece := range intermediateOutput.pieces { + var dataOffset sourcemap.LineColumnOffset + j.AddBytes(piece.data) + dataOffset.AdvanceBytes(piece.data) + shift.Before.Add(dataOffset) + shift.After.Add(dataOffset) + + switch piece.kind { + case outputPieceAssetIndex: + file := c.graph.Files[piece.index] + if len(file.InputFile.AdditionalFiles) != 1 { + panic("Internal error") + } + relPath, _ := c.fs.Rel(c.options.AbsOutputDir, file.InputFile.AdditionalFiles[0].AbsPath) + + // Make sure to always use forward slashes, even on Windows + relPath = strings.ReplaceAll(relPath, "\\", "/") + + importPath := modifyPath(relPath) + j.AddString(importPath) + shift.Before.AdvanceString(file.InputFile.UniqueKeyForFileLoader) + shift.After.AdvanceString(importPath) + shifts = append(shifts, shift) + + case outputPieceChunkIndex: + chunk := chunks[piece.index] + importPath := modifyPath(chunk.finalRelPath) + j.AddString(importPath) + shift.Before.AdvanceString(chunk.uniqueKey) + shift.After.AdvanceString(importPath) + shifts = append(shifts, shift) + } + } + + return +} + +func (c *linkerContext) pathBetweenChunks(fromRelDir string, toRelPath string) string { + // Join with the public path if it has been configured + if c.options.PublicPath != "" { + return joinWithPublicPath(c.options.PublicPath, toRelPath) + } + + // Otherwise, return a relative path + relPath, ok := c.fs.Rel(fromRelDir, toRelPath) + if !ok { + c.log.AddError(nil, logger.Loc{}, + fmt.Sprintf("Cannot traverse from directory %q to chunk %q", fromRelDir, toRelPath)) + return "" + } + + // Make sure to always use forward slashes, even on Windows + relPath = strings.ReplaceAll(relPath, "\\", "/") + + // Make sure the relative path doesn't start with a name, since that could + // be interpreted as a package path instead of a relative path + if !strings.HasPrefix(relPath, "./") && !strings.HasPrefix(relPath, "../") { + relPath = "./" + relPath + } + + return relPath +} + +// Returns the path of this file relative to "outbase", which is then ready to +// be joined with the absolute output directory path. The directory and name +// components are returned separately for convenience. +// +// This makes sure to have the directory end in a slash so that it can be +// substituted into a path template without necessarily having a "/" after it. +// Extra slashes should get cleaned up automatically when we join it with the +// output directory. +func pathRelativeToOutbase( + inputFile *graph.InputFile, + options *config.Options, + fs fs.FS, + stdExt string, + avoidIndex bool, + customFilePath string, +) (relDir string, baseName string, baseExt string) { + relDir = "/" + baseExt = stdExt + absPath := inputFile.Source.KeyPath.Text + + if customFilePath != "" { + // Use the configured output path if present + absPath = customFilePath + if !fs.IsAbs(absPath) { + absPath = fs.Join(options.AbsOutputBase, absPath) + } + } else if inputFile.Source.KeyPath.Namespace != "file" { + // Come up with a path for virtual paths (i.e. non-file-system paths) + dir, base, _ := logger.PlatformIndependentPathDirBaseExt(absPath) + if avoidIndex && base == "index" { + _, base, _ = logger.PlatformIndependentPathDirBaseExt(dir) + } + baseName = sanitizeFilePathForVirtualModulePath(base) + return + } else { + // Heuristic: If the file is named something like "index.js", then use + // the name of the parent directory instead. This helps avoid the + // situation where many chunks are named "index" because of people + // dynamically-importing npm packages that make use of node's implicit + // "index" file name feature. + if avoidIndex { + base := fs.Base(absPath) + base = base[:len(base)-len(fs.Ext(base))] + if base == "index" { + absPath = fs.Dir(absPath) + } + } + } + + // Try to get a relative path to the base directory + relPath, ok := fs.Rel(options.AbsOutputBase, absPath) + if !ok { + // This can fail in some situations such as on different drives on + // Windows. In that case we just use the file name. + baseName = fs.Base(absPath) + } else { + // Now we finally have a relative path + relDir = fs.Dir(relPath) + "/" + baseName = fs.Base(relPath) + + // Use platform-independent slashes + relDir = strings.ReplaceAll(relDir, "\\", "/") + + // Replace leading "../" so we don't try to write outside of the output + // directory. This normally can't happen because "AbsOutputBase" is + // automatically computed to contain all entry point files, but it can + // happen if someone sets it manually via the "outbase" API option. + // + // Note that we can't just strip any leading "../" because that could + // cause two separate entry point paths to collide. For example, there + // could be both "src/index.js" and "../src/index.js" as entry points. + dotDotCount := 0 + for strings.HasPrefix(relDir[dotDotCount*3:], "../") { + dotDotCount++ + } + if dotDotCount > 0 { + // The use of "_.._" here is somewhat arbitrary but it is unlikely to + // collide with a folder named by a human and it works on Windows + // (Windows doesn't like names that end with a "."). And not starting + // with a "." means that it will not be hidden on Unix. + relDir = strings.Repeat("_.._/", dotDotCount) + relDir[dotDotCount*3:] + } + relDir = "/" + relDir + } + + // Strip the file extension if the output path is an input file + if customFilePath == "" { + ext := fs.Ext(baseName) + baseName = baseName[:len(baseName)-len(ext)] + } + return +} + +func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { + c.timer.Begin("Compute cross-chunk dependencies") + defer c.timer.End("Compute cross-chunk dependencies") + + jsChunks := 0 + for _, chunk := range chunks { + if _, ok := chunk.chunkRepr.(*chunkReprJS); ok { + jsChunks++ + } + } + if jsChunks < 2 { + // No need to compute cross-chunk dependencies if there can't be any + return + } + + type chunkMeta struct { + imports map[js_ast.Ref]bool + exports map[js_ast.Ref]bool + dynamicImports map[int]bool + } + + chunkMetas := make([]chunkMeta, len(chunks)) + + // For each chunk, see what symbols it uses from other chunks. Do this in + // parallel because it's the most expensive part of this function. + waitGroup := sync.WaitGroup{} + waitGroup.Add(len(chunks)) + for chunkIndex, chunk := range chunks { + go func(chunkIndex int, chunk chunkInfo) { + chunkMeta := &chunkMetas[chunkIndex] + imports := make(map[js_ast.Ref]bool) + chunkMeta.imports = imports + chunkMeta.exports = make(map[js_ast.Ref]bool) + + // Go over each file in this chunk + for sourceIndex := range chunk.filesWithPartsInChunk { + // Go over each part in this file that's marked for inclusion in this chunk + switch repr := c.graph.Files[sourceIndex].InputFile.Repr.(type) { + case *graph.JSRepr: + for partIndex, partMeta := range repr.AST.Parts { + if !partMeta.IsLive { + continue + } + part := &repr.AST.Parts[partIndex] + + // Rewrite external dynamic imports to point to the chunk for that entry point + for _, importRecordIndex := range part.ImportRecordIndices { + record := &repr.AST.ImportRecords[importRecordIndex] + if record.SourceIndex.IsValid() && c.isExternalDynamicImport(record, sourceIndex) { + otherChunkIndex := c.graph.Files[record.SourceIndex.GetIndex()].EntryPointChunkIndex + record.Path.Text = chunks[otherChunkIndex].uniqueKey + record.SourceIndex = ast.Index32{} + + // Track this cross-chunk dynamic import so we make sure to + // include its hash when we're calculating the hashes of all + // dependencies of this chunk. + if int(otherChunkIndex) != chunkIndex { + if chunkMeta.dynamicImports == nil { + chunkMeta.dynamicImports = make(map[int]bool) + } + chunkMeta.dynamicImports[int(otherChunkIndex)] = true + } + } + } + + // Remember what chunk each top-level symbol is declared in. Symbols + // with multiple declarations such as repeated "var" statements with + // the same name should already be marked as all being in a single + // chunk. In that case this will overwrite the same value below which + // is fine. + for _, declared := range part.DeclaredSymbols { + if declared.IsTopLevel { + c.graph.Symbols.Get(declared.Ref).ChunkIndex = ast.MakeIndex32(uint32(chunkIndex)) + } + } + + // Record each symbol used in this part. This will later be matched up + // with our map of which chunk a given symbol is declared in to + // determine if the symbol needs to be imported from another chunk. + for ref := range part.SymbolUses { + symbol := c.graph.Symbols.Get(ref) + + // Ignore unbound symbols, which don't have declarations + if symbol.Kind == js_ast.SymbolUnbound { + continue + } + + // Ignore symbols that are going to be replaced by undefined + if symbol.ImportItemStatus == js_ast.ImportItemMissing { + continue + } + + // If this is imported from another file, follow the import + // reference and reference the symbol in that file instead + if importData, ok := repr.Meta.ImportsToBind[ref]; ok { + ref = importData.Ref + symbol = c.graph.Symbols.Get(ref) + } else if repr.Meta.Wrap == graph.WrapCJS && ref != repr.AST.WrapperRef { + // The only internal symbol that wrapped CommonJS files export + // is the wrapper itself. + continue + } + + // If this is an ES6 import from a CommonJS file, it will become a + // property access off the namespace symbol instead of a bare + // identifier. In that case we want to pull in the namespace symbol + // instead. The namespace symbol stores the result of "require()". + if symbol.NamespaceAlias != nil { + ref = symbol.NamespaceAlias.NamespaceRef + } + + // We must record this relationship even for symbols that are not + // imports. Due to code splitting, the definition of a symbol may + // be moved to a separate chunk than the use of a symbol even if + // the definition and use of that symbol are originally from the + // same source file. + imports[ref] = true + } + } + } + } + + // Include the exports if this is an entry point chunk + if chunk.isEntryPoint { + if repr, ok := c.graph.Files[chunk.sourceIndex].InputFile.Repr.(*graph.JSRepr); ok { + if repr.Meta.Wrap != graph.WrapCJS { + for _, alias := range repr.Meta.SortedAndFilteredExportAliases { + export := repr.Meta.ResolvedExports[alias] + targetRef := export.Ref + + // If this is an import, then target what the import points to + if importData, ok := c.graph.Files[export.SourceIndex].InputFile.Repr.(*graph.JSRepr).Meta.ImportsToBind[targetRef]; ok { + targetRef = importData.Ref + } + + // If this is an ES6 import from a CommonJS file, it will become a + // property access off the namespace symbol instead of a bare + // identifier. In that case we want to pull in the namespace symbol + // instead. The namespace symbol stores the result of "require()". + if symbol := c.graph.Symbols.Get(targetRef); symbol.NamespaceAlias != nil { + targetRef = symbol.NamespaceAlias.NamespaceRef + } + + imports[targetRef] = true + } + } + + // Ensure "exports" is included if the current output format needs it + if repr.Meta.ForceIncludeExportsForEntryPoint { + imports[repr.AST.ExportsRef] = true + } + + // Include the wrapper if present + if repr.Meta.Wrap != graph.WrapNone { + imports[repr.AST.WrapperRef] = true + } + } + } + + waitGroup.Done() + }(chunkIndex, chunk) + } + waitGroup.Wait() + + // Mark imported symbols as exported in the chunk from which they are declared + for chunkIndex := range chunks { + chunk := &chunks[chunkIndex] + chunkRepr, ok := chunk.chunkRepr.(*chunkReprJS) + if !ok { + continue + } + chunkMeta := chunkMetas[chunkIndex] + + // Find all uses in this chunk of symbols from other chunks + chunkRepr.importsFromOtherChunks = make(map[uint32]crossChunkImportItemArray) + for importRef := range chunkMeta.imports { + // Ignore uses that aren't top-level symbols + if otherChunkIndex := c.graph.Symbols.Get(importRef).ChunkIndex; otherChunkIndex.IsValid() { + if otherChunkIndex := otherChunkIndex.GetIndex(); otherChunkIndex != uint32(chunkIndex) { + chunkRepr.importsFromOtherChunks[otherChunkIndex] = + append(chunkRepr.importsFromOtherChunks[otherChunkIndex], crossChunkImportItem{ref: importRef}) + chunkMetas[otherChunkIndex].exports[importRef] = true + } + } + } + + // If this is an entry point, make sure we import all chunks belonging to + // this entry point, even if there are no imports. We need to make sure + // these chunks are evaluated for their side effects too. + if chunk.isEntryPoint { + for otherChunkIndex, otherChunk := range chunks { + if _, ok := otherChunk.chunkRepr.(*chunkReprJS); ok && chunkIndex != otherChunkIndex && otherChunk.entryBits.HasBit(chunk.entryPointBit) { + imports := chunkRepr.importsFromOtherChunks[uint32(otherChunkIndex)] + chunkRepr.importsFromOtherChunks[uint32(otherChunkIndex)] = imports + } + } + } + + // Make sure we also track dynamic cross-chunk imports. These need to be + // tracked so we count them as dependencies of this chunk for the purpose + // of hash calculation. + if chunkMeta.dynamicImports != nil { + sortedDynamicImports := make([]int, 0, len(chunkMeta.dynamicImports)) + for chunkIndex := range chunkMeta.dynamicImports { + sortedDynamicImports = append(sortedDynamicImports, chunkIndex) + } + sort.Ints(sortedDynamicImports) + for _, chunkIndex := range sortedDynamicImports { + chunk.crossChunkImports = append(chunk.crossChunkImports, chunkImport{ + importKind: ast.ImportDynamic, + chunkIndex: uint32(chunkIndex), + }) + } + } + } + + // Generate cross-chunk exports. These must be computed before cross-chunk + // imports because of export alias renaming, which must consider all export + // aliases simultaneously to avoid collisions. + for chunkIndex := range chunks { + chunk := &chunks[chunkIndex] + chunkRepr, ok := chunk.chunkRepr.(*chunkReprJS) + if !ok { + continue + } + + chunkRepr.exportsToOtherChunks = make(map[js_ast.Ref]string) + switch c.options.OutputFormat { + case config.FormatESModule: + r := renamer.ExportRenamer{} + var items []js_ast.ClauseItem + for _, export := range c.sortedCrossChunkExportItems(chunkMetas[chunkIndex].exports) { + var alias string + if c.options.MinifyIdentifiers { + alias = r.NextMinifiedName() + } else { + alias = r.NextRenamedName(c.graph.Symbols.Get(export.Ref).OriginalName) + } + items = append(items, js_ast.ClauseItem{Name: js_ast.LocRef{Ref: export.Ref}, Alias: alias}) + chunkRepr.exportsToOtherChunks[export.Ref] = alias + } + if len(items) > 0 { + chunkRepr.crossChunkSuffixStmts = []js_ast.Stmt{{Data: &js_ast.SExportClause{ + Items: items, + }}} + } + + default: + panic("Internal error") + } + } + + // Generate cross-chunk imports. These must be computed after cross-chunk + // exports because the export aliases must already be finalized so they can + // be embedded in the generated import statements. + for chunkIndex := range chunks { + chunk := &chunks[chunkIndex] + chunkRepr, ok := chunk.chunkRepr.(*chunkReprJS) + if !ok { + continue + } + + var crossChunkPrefixStmts []js_ast.Stmt + + for _, crossChunkImport := range c.sortedCrossChunkImports(chunks, chunkRepr.importsFromOtherChunks) { + switch c.options.OutputFormat { + case config.FormatESModule: + var items []js_ast.ClauseItem + for _, item := range crossChunkImport.sortedImportItems { + items = append(items, js_ast.ClauseItem{Name: js_ast.LocRef{Ref: item.ref}, Alias: item.exportAlias}) + } + importRecordIndex := uint32(len(chunk.crossChunkImports)) + chunk.crossChunkImports = append(chunk.crossChunkImports, chunkImport{ + importKind: ast.ImportStmt, + chunkIndex: crossChunkImport.chunkIndex, + }) + if len(items) > 0 { + // "import {a, b} from './chunk.js'" + crossChunkPrefixStmts = append(crossChunkPrefixStmts, js_ast.Stmt{Data: &js_ast.SImport{ + Items: &items, + ImportRecordIndex: importRecordIndex, + }}) + } else { + // "import './chunk.js'" + crossChunkPrefixStmts = append(crossChunkPrefixStmts, js_ast.Stmt{Data: &js_ast.SImport{ + ImportRecordIndex: importRecordIndex, + }}) + } + + default: + panic("Internal error") + } + } + + chunkRepr.crossChunkPrefixStmts = crossChunkPrefixStmts + } +} + +type crossChunkImport struct { + chunkIndex uint32 + sortedImportItems crossChunkImportItemArray +} + +// This type is just so we can use Go's native sort function +type crossChunkImportArray []crossChunkImport + +func (a crossChunkImportArray) Len() int { return len(a) } +func (a crossChunkImportArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] } + +func (a crossChunkImportArray) Less(i int, j int) bool { + return a[i].chunkIndex < a[j].chunkIndex +} + +// Sort cross-chunk imports by chunk name for determinism +func (c *linkerContext) sortedCrossChunkImports(chunks []chunkInfo, importsFromOtherChunks map[uint32]crossChunkImportItemArray) crossChunkImportArray { + result := make(crossChunkImportArray, 0, len(importsFromOtherChunks)) + + for otherChunkIndex, importItems := range importsFromOtherChunks { + // Sort imports from a single chunk by alias for determinism + otherChunk := &chunks[otherChunkIndex] + exportsToOtherChunks := otherChunk.chunkRepr.(*chunkReprJS).exportsToOtherChunks + for i, item := range importItems { + importItems[i].exportAlias = exportsToOtherChunks[item.ref] + } + sort.Sort(importItems) + result = append(result, crossChunkImport{ + chunkIndex: otherChunkIndex, + sortedImportItems: importItems, + }) + } + + sort.Sort(result) + return result +} + +type crossChunkImportItem struct { + ref js_ast.Ref + exportAlias string +} + +// This type is just so we can use Go's native sort function +type crossChunkImportItemArray []crossChunkImportItem + +func (a crossChunkImportItemArray) Len() int { return len(a) } +func (a crossChunkImportItemArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] } + +func (a crossChunkImportItemArray) Less(i int, j int) bool { + return a[i].exportAlias < a[j].exportAlias +} + +// The sort order here is arbitrary but needs to be consistent between builds. +// The InnerIndex should be stable because the parser for a single file is +// single-threaded and deterministically assigns out InnerIndex values +// sequentially. But the SourceIndex should be unstable because the main thread +// assigns out source index values sequentially to newly-discovered dependencies +// in a multi-threaded producer/consumer relationship. So instead we use the +// index of the source in the DFS order over all entry points for stability. +type stableRef struct { + StableSourceIndex uint32 + Ref js_ast.Ref +} + +// This type is just so we can use Go's native sort function +type stableRefArray []stableRef + +func (a stableRefArray) Len() int { return len(a) } +func (a stableRefArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] } +func (a stableRefArray) Less(i int, j int) bool { + ai, aj := a[i], a[j] + return ai.StableSourceIndex < aj.StableSourceIndex || + (ai.StableSourceIndex == aj.StableSourceIndex && ai.Ref.InnerIndex < aj.Ref.InnerIndex) +} + +// Sort cross-chunk exports by chunk name for determinism +func (c *linkerContext) sortedCrossChunkExportItems(exportRefs map[js_ast.Ref]bool) stableRefArray { + result := make(stableRefArray, 0, len(exportRefs)) + for ref := range exportRefs { + result = append(result, stableRef{ + StableSourceIndex: c.graph.StableSourceIndices[ref.SourceIndex], + Ref: ref, + }) + } + sort.Sort(result) + return result +} + +func (c *linkerContext) scanImportsAndExports() { + c.timer.Begin("Scan imports and exports") + defer c.timer.End("Scan imports and exports") + + // Step 1: Figure out what modules must be CommonJS + c.timer.Begin("Step 1") + for _, sourceIndex := range c.graph.ReachableFiles { + file := &c.graph.Files[sourceIndex] + switch repr := file.InputFile.Repr.(type) { + case *graph.CSSRepr: + // Inline URLs for non-CSS files into the CSS file + var additionalFiles []graph.OutputFile + for importRecordIndex := range repr.AST.ImportRecords { + if record := &repr.AST.ImportRecords[importRecordIndex]; record.SourceIndex.IsValid() { + otherFile := &c.graph.Files[record.SourceIndex.GetIndex()] + if otherRepr, ok := otherFile.InputFile.Repr.(*graph.JSRepr); ok { + record.Path.Text = otherRepr.AST.URLForCSS + record.Path.Namespace = "" + record.SourceIndex = ast.Index32{} + + // Copy the additional files to the output directory + additionalFiles = append(additionalFiles, otherFile.InputFile.AdditionalFiles...) + } + } + } + file.InputFile.AdditionalFiles = additionalFiles + + case *graph.JSRepr: + for importRecordIndex := range repr.AST.ImportRecords { + record := &repr.AST.ImportRecords[importRecordIndex] + if !record.SourceIndex.IsValid() { + continue + } + + otherFile := &c.graph.Files[record.SourceIndex.GetIndex()] + otherRepr := otherFile.InputFile.Repr.(*graph.JSRepr) + + switch record.Kind { + case ast.ImportStmt: + // Importing using ES6 syntax from a file without any ES6 syntax + // causes that module to be considered CommonJS-style, even if it + // doesn't have any CommonJS exports. + // + // That means the ES6 imports will become undefined instead of + // causing errors. This is for compatibility with older CommonJS- + // style bundlers. + // + // We emit a warning in this case but try to avoid turning the module + // into a CommonJS module if possible. This is possible with named + // imports (the module stays an ECMAScript module but the imports are + // rewritten with undefined) but is not possible with star or default + // imports: + // + // import * as ns from './empty-file' + // import defVal from './empty-file' + // console.log(ns, defVal) + // + // In that case the module *is* considered a CommonJS module because + // the namespace object must be created. + if (record.ContainsImportStar || record.ContainsDefaultAlias) && otherRepr.AST.ExportsKind == js_ast.ExportsNone && !otherRepr.AST.HasLazyExport { + otherRepr.Meta.Wrap = graph.WrapCJS + otherRepr.AST.ExportsKind = js_ast.ExportsCommonJS + } + + case ast.ImportRequire: + // Files that are imported with require() must be CommonJS modules + if otherRepr.AST.ExportsKind == js_ast.ExportsESM { + otherRepr.Meta.Wrap = graph.WrapESM + } else { + otherRepr.Meta.Wrap = graph.WrapCJS + otherRepr.AST.ExportsKind = js_ast.ExportsCommonJS + } + + case ast.ImportDynamic: + if !c.options.CodeSplitting { + // If we're not splitting, then import() is just a require() that + // returns a promise, so the imported file must be a CommonJS module + if otherRepr.AST.ExportsKind == js_ast.ExportsESM { + otherRepr.Meta.Wrap = graph.WrapESM + } else { + otherRepr.Meta.Wrap = graph.WrapCJS + otherRepr.AST.ExportsKind = js_ast.ExportsCommonJS + } + } + } + } + + // If the output format doesn't have an implicit CommonJS wrapper, any file + // that uses CommonJS features will need to be wrapped, even though the + // resulting wrapper won't be invoked by other files. An exception is made + // for entry point files in CommonJS format (or when in pass-through mode). + if repr.AST.ExportsKind == js_ast.ExportsCommonJS && (!file.IsEntryPoint() || + c.options.OutputFormat == config.FormatIIFE || c.options.OutputFormat == config.FormatESModule) { + repr.Meta.Wrap = graph.WrapCJS + } + } + } + c.timer.End("Step 1") + + // Step 2: Propagate dynamic export status for export star statements that + // are re-exports from a module whose exports are not statically analyzable. + // In this case the export star must be evaluated at run time instead of at + // bundle time. + c.timer.Begin("Step 2") + for _, sourceIndex := range c.graph.ReachableFiles { + repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) + if !ok { + continue + } + + if repr.Meta.Wrap != graph.WrapNone { + c.recursivelyWrapDependencies(sourceIndex) + } + + if len(repr.AST.ExportStarImportRecords) > 0 { + visited := make(map[uint32]bool) + c.hasDynamicExportsDueToExportStar(sourceIndex, visited) + } + + // Even if the output file is CommonJS-like, we may still need to wrap + // CommonJS-style files. Any file that imports a CommonJS-style file will + // cause that file to need to be wrapped. This is because the import + // method, whatever it is, will need to invoke the wrapper. Note that + // this can include entry points (e.g. an entry point that imports a file + // that imports that entry point). + for _, record := range repr.AST.ImportRecords { + if record.SourceIndex.IsValid() { + otherRepr := c.graph.Files[record.SourceIndex.GetIndex()].InputFile.Repr.(*graph.JSRepr) + if otherRepr.AST.ExportsKind == js_ast.ExportsCommonJS { + c.recursivelyWrapDependencies(record.SourceIndex.GetIndex()) + } + } + } + } + c.timer.End("Step 2") + + // Step 3: Resolve "export * from" statements. This must be done after we + // discover all modules that can have dynamic exports because export stars + // are ignored for those modules. + c.timer.Begin("Step 3") + exportStarStack := make([]uint32, 0, 32) + for _, sourceIndex := range c.graph.ReachableFiles { + repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) + if !ok { + continue + } + + // Expression-style loaders defer code generation until linking. Code + // generation is done here because at this point we know that the + // "ExportsKind" field has its final value and will not be changed. + if repr.AST.HasLazyExport { + c.generateCodeForLazyExport(sourceIndex) + } + + // Propagate exports for export star statements + if len(repr.AST.ExportStarImportRecords) > 0 { + c.addExportsForExportStar(repr.Meta.ResolvedExports, sourceIndex, exportStarStack) + } + + // Also add a special export so import stars can bind to it. This must be + // done in this step because it must come after CommonJS module discovery + // but before matching imports with exports. + repr.Meta.ResolvedExportStar = &graph.ExportData{ + Ref: repr.AST.ExportsRef, + SourceIndex: sourceIndex, + } + } + c.timer.End("Step 3") + + // Step 4: Match imports with exports. This must be done after we process all + // export stars because imports can bind to export star re-exports. + c.timer.Begin("Step 4") + for _, sourceIndex := range c.graph.ReachableFiles { + file := &c.graph.Files[sourceIndex] + repr, ok := file.InputFile.Repr.(*graph.JSRepr) + if !ok { + continue + } + + if len(repr.AST.NamedImports) > 0 { + c.matchImportsWithExportsForFile(uint32(sourceIndex)) + } + + // If we're exporting as CommonJS and this file doesn't need a wrapper, + // then we'll be using the actual CommonJS "exports" and/or "module" + // symbols. In that case make sure to mark them as such so they don't + // get minified. + if (c.options.OutputFormat == config.FormatPreserve || c.options.OutputFormat == config.FormatCommonJS) && + repr.Meta.Wrap != graph.WrapCJS && file.IsEntryPoint() { + exportsRef := js_ast.FollowSymbols(c.graph.Symbols, repr.AST.ExportsRef) + moduleRef := js_ast.FollowSymbols(c.graph.Symbols, repr.AST.ModuleRef) + c.graph.Symbols.Get(exportsRef).Kind = js_ast.SymbolUnbound + c.graph.Symbols.Get(moduleRef).Kind = js_ast.SymbolUnbound + } + + // Create the wrapper part for wrapped files. This is needed by a later step. + c.createWrapperForFile(uint32(sourceIndex)) + } + c.timer.End("Step 4") + + // Step 5: Create namespace exports for every file. This is always necessary + // for CommonJS files, and is also necessary for other files if they are + // imported using an import star statement. + c.timer.Begin("Step 5") + waitGroup := sync.WaitGroup{} + for _, sourceIndex := range c.graph.ReachableFiles { + repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) + if !ok { + continue + } + + // This is the slowest step and is also parallelizable, so do this in parallel. + waitGroup.Add(1) + go func(sourceIndex uint32, repr *graph.JSRepr) { + // Now that all exports have been resolved, sort and filter them to create + // something we can iterate over later. + aliases := make([]string, 0, len(repr.Meta.ResolvedExports)) + nextAlias: + for alias, export := range repr.Meta.ResolvedExports { + // Re-exporting multiple symbols with the same name causes an ambiguous + // export. These names cannot be used and should not end up in generated code. + otherRepr := c.graph.Files[export.SourceIndex].InputFile.Repr.(*graph.JSRepr) + if len(export.PotentiallyAmbiguousExportStarRefs) > 0 { + mainRef := export.Ref + if imported, ok := otherRepr.Meta.ImportsToBind[export.Ref]; ok { + mainRef = imported.Ref + } + for _, ambiguousExport := range export.PotentiallyAmbiguousExportStarRefs { + ambiguousRepr := c.graph.Files[ambiguousExport.SourceIndex].InputFile.Repr.(*graph.JSRepr) + ambiguousRef := ambiguousExport.Ref + if imported, ok := ambiguousRepr.Meta.ImportsToBind[ambiguousExport.Ref]; ok { + ambiguousRef = imported.Ref + } + if mainRef != ambiguousRef { + continue nextAlias + } + } + } + + // Ignore re-exported imports in TypeScript files that failed to be + // resolved. These are probably just type-only imports so the best thing to + // do is to silently omit them from the export list. + if otherRepr.Meta.IsProbablyTypeScriptType[export.Ref] { + continue + } + + aliases = append(aliases, alias) + } + sort.Strings(aliases) + repr.Meta.SortedAndFilteredExportAliases = aliases + + // Export creation uses "sortedAndFilteredExportAliases" so this must + // come second after we fill in that array + c.createExportsForFile(uint32(sourceIndex)) + + waitGroup.Done() + }(sourceIndex, repr) + } + waitGroup.Wait() + c.timer.End("Step 5") + + // Step 6: Bind imports to exports. This adds non-local dependencies on the + // parts that declare the export to all parts that use the import. Also + // generate wrapper parts for wrapped files. + c.timer.Begin("Step 6") + for _, sourceIndex := range c.graph.ReachableFiles { + file := &c.graph.Files[sourceIndex] + repr, ok := file.InputFile.Repr.(*graph.JSRepr) + if !ok { + continue + } + + // Pre-generate symbols for re-exports CommonJS symbols in case they + // are necessary later. This is done now because the symbols map cannot be + // mutated later due to parallelism. + if file.IsEntryPoint() && c.options.OutputFormat == config.FormatESModule { + copies := make([]js_ast.Ref, len(repr.Meta.SortedAndFilteredExportAliases)) + for i, alias := range repr.Meta.SortedAndFilteredExportAliases { + copies[i] = c.graph.GenerateNewSymbol(sourceIndex, js_ast.SymbolOther, "export_"+alias) + } + repr.Meta.CJSExportCopies = copies + } + + // Use "init_*" for ESM wrappers instead of "require_*" + if repr.Meta.Wrap == graph.WrapESM { + c.graph.Symbols.Get(repr.AST.WrapperRef).OriginalName = "init_" + file.InputFile.Source.IdentifierName + } + + // If this isn't CommonJS, then rename the unused "exports" and "module" + // variables to avoid them causing the identically-named variables in + // actual CommonJS files from being renamed. This is purely about + // aesthetics and is not about correctness. This is done here because by + // this point, we know the CommonJS status will not change further. + if repr.Meta.Wrap != graph.WrapCJS && repr.AST.ExportsKind != js_ast.ExportsCommonJS && (!file.IsEntryPoint() || + c.options.OutputFormat != config.FormatCommonJS) { + name := file.InputFile.Source.IdentifierName + c.graph.Symbols.Get(repr.AST.ExportsRef).OriginalName = name + "_exports" + c.graph.Symbols.Get(repr.AST.ModuleRef).OriginalName = name + "_module" + } + + // Include the "__export" symbol from the runtime if it was used in the + // previous step. The previous step can't do this because it's running in + // parallel and can't safely mutate the "importsToBind" map of another file. + if repr.Meta.NeedsExportSymbolFromRuntime || repr.Meta.NeedsMarkAsModuleSymbolFromRuntime { + runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr) + if repr.Meta.NeedsExportSymbolFromRuntime { + exportRef := runtimeRepr.AST.ModuleScope.Members["__export"].Ref + c.graph.GenerateSymbolImportAndUse(sourceIndex, js_ast.NSExportPartIndex, exportRef, 1, runtime.SourceIndex) + } + if repr.Meta.NeedsMarkAsModuleSymbolFromRuntime { + markAsModuleRef := runtimeRepr.AST.ModuleScope.Members["__markAsModule"].Ref + c.graph.GenerateSymbolImportAndUse(sourceIndex, js_ast.NSExportPartIndex, markAsModuleRef, 1, runtime.SourceIndex) + } + } + + for importRef, importData := range repr.Meta.ImportsToBind { + resolvedRepr := c.graph.Files[importData.SourceIndex].InputFile.Repr.(*graph.JSRepr) + partsDeclaringSymbol := resolvedRepr.TopLevelSymbolToParts(importData.Ref) + + for _, partIndex := range repr.AST.NamedImports[importRef].LocalPartsWithUses { + part := &repr.AST.Parts[partIndex] + + // Depend on the file containing the imported symbol + for _, resolvedPartIndex := range partsDeclaringSymbol { + part.Dependencies = append(part.Dependencies, js_ast.Dependency{ + SourceIndex: importData.SourceIndex, + PartIndex: resolvedPartIndex, + }) + } + + // Also depend on any files that re-exported this symbol in between the + // file containing the import and the file containing the imported symbol + part.Dependencies = append(part.Dependencies, importData.ReExports...) + } + + // Merge these symbols so they will share the same name + js_ast.MergeSymbols(c.graph.Symbols, importRef, importData.Ref) + } + + // If this is an entry point, depend on all exports so they are included + if file.IsEntryPoint() { + var dependencies []js_ast.Dependency + + for _, alias := range repr.Meta.SortedAndFilteredExportAliases { + export := repr.Meta.ResolvedExports[alias] + targetSourceIndex := export.SourceIndex + targetRef := export.Ref + + // If this is an import, then target what the import points to + targetRepr := c.graph.Files[targetSourceIndex].InputFile.Repr.(*graph.JSRepr) + if importData, ok := targetRepr.Meta.ImportsToBind[targetRef]; ok { + targetSourceIndex = importData.SourceIndex + targetRef = importData.Ref + targetRepr = c.graph.Files[targetSourceIndex].InputFile.Repr.(*graph.JSRepr) + dependencies = append(dependencies, importData.ReExports...) + } + + // Pull in all declarations of this symbol + for _, partIndex := range targetRepr.TopLevelSymbolToParts(targetRef) { + dependencies = append(dependencies, js_ast.Dependency{ + SourceIndex: targetSourceIndex, + PartIndex: partIndex, + }) + } + } + + // Ensure "exports" is included if the current output format needs it + if repr.Meta.ForceIncludeExportsForEntryPoint { + dependencies = append(dependencies, js_ast.Dependency{ + SourceIndex: sourceIndex, + PartIndex: js_ast.NSExportPartIndex, + }) + } + + // Include the wrapper if present + if repr.Meta.Wrap != graph.WrapNone { + dependencies = append(dependencies, js_ast.Dependency{ + SourceIndex: sourceIndex, + PartIndex: repr.Meta.WrapperPartIndex.GetIndex(), + }) + } + + // Represent these constraints with a dummy part + entryPointPartIndex := c.graph.AddPartToFile(sourceIndex, js_ast.Part{ + Dependencies: dependencies, + CanBeRemovedIfUnused: false, + }) + repr.Meta.EntryPointPartIndex = ast.MakeIndex32(entryPointPartIndex) + } + + // Encode import-specific constraints in the dependency graph + for partIndex, part := range repr.AST.Parts { + toModuleUses := uint32(0) + runtimeRequireUses := uint32(0) + + // Imports of wrapped files must depend on the wrapper + for _, importRecordIndex := range part.ImportRecordIndices { + record := &repr.AST.ImportRecords[importRecordIndex] + + // Don't follow external imports (this includes import() expressions) + if !record.SourceIndex.IsValid() || c.isExternalDynamicImport(record, sourceIndex) { + // This is an external import. Check if it will be a "require()" call. + if record.Kind == ast.ImportRequire || !c.options.OutputFormat.KeepES6ImportExportSyntax() || + (record.Kind == ast.ImportDynamic && c.options.UnsupportedJSFeatures.Has(compat.DynamicImport)) { + // We should use "__require" instead of "require" if we're not + // generating a CommonJS output file, since it won't exist otherwise + if config.ShouldCallRuntimeRequire(c.options.Mode, c.options.OutputFormat) { + record.CallRuntimeRequire = true + runtimeRequireUses++ + } + + // It needs the "__toModule" wrapper if it wasn't originally a + // CommonJS import (i.e. it wasn't a "require()" call). + if record.Kind != ast.ImportRequire { + record.WrapWithToModule = true + toModuleUses++ + } + } + continue + } + + otherSourceIndex := record.SourceIndex.GetIndex() + otherRepr := c.graph.Files[otherSourceIndex].InputFile.Repr.(*graph.JSRepr) + + if otherRepr.Meta.Wrap != graph.WrapNone { + // Depend on the automatically-generated require wrapper symbol + wrapperRef := otherRepr.AST.WrapperRef + c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), wrapperRef, 1, otherSourceIndex) + + // This is an ES6 import of a CommonJS module, so it needs the + // "__toModule" wrapper as long as it's not a bare "require()" + if record.Kind != ast.ImportRequire && otherRepr.AST.ExportsKind == js_ast.ExportsCommonJS { + record.WrapWithToModule = true + toModuleUses++ + } + + // If this is an ESM wrapper, also depend on the exports object + // since the final code will contain an inline reference to it. + // This must be done for "require()" and "import()" expressions + // but does not need to be done for "import" statements since + // those just cause us to reference the exports directly. + if otherRepr.Meta.Wrap == graph.WrapESM && record.Kind != ast.ImportStmt { + c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), otherRepr.AST.ExportsRef, 1, otherSourceIndex) + } + } else if record.Kind == ast.ImportStmt && otherRepr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback { + // This is an import of a module that has a dynamic export fallback + // object. In that case we need to depend on that object in case + // something ends up needing to use it later. This could potentially + // be omitted in some cases with more advanced analysis if this + // dynamic export fallback object doesn't end up being needed. + c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), otherRepr.AST.ExportsRef, 1, otherSourceIndex) + } + } + + // If there's an ES6 import of a non-ES6 module, then we're going to need the + // "__toModule" symbol from the runtime to wrap the result of "require()" + c.graph.GenerateRuntimeSymbolImportAndUse(sourceIndex, uint32(partIndex), "__toModule", toModuleUses) + + // If there are unbundled calls to "require()" and we're not generating + // code for node, then substitute a "__require" wrapper for "require". + c.graph.GenerateRuntimeSymbolImportAndUse(sourceIndex, uint32(partIndex), "__require", runtimeRequireUses) + + // If there's an ES6 export star statement of a non-ES6 module, then we're + // going to need the "__reExport" symbol from the runtime + reExportUses := uint32(0) + for _, importRecordIndex := range repr.AST.ExportStarImportRecords { + record := &repr.AST.ImportRecords[importRecordIndex] + + // Is this export star evaluated at run time? + happensAtRunTime := !record.SourceIndex.IsValid() && (!file.IsEntryPoint() || !c.options.OutputFormat.KeepES6ImportExportSyntax()) + if record.SourceIndex.IsValid() { + otherSourceIndex := record.SourceIndex.GetIndex() + otherRepr := c.graph.Files[otherSourceIndex].InputFile.Repr.(*graph.JSRepr) + if otherSourceIndex != sourceIndex && otherRepr.AST.ExportsKind.IsDynamic() { + happensAtRunTime = true + } + if otherRepr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback { + // This looks like "__reExport(exports_a, exports_b)". Make sure to + // pull in the "exports_b" symbol into this export star. This matters + // in code splitting situations where the "export_b" symbol might live + // in a different chunk than this export star. + c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), otherRepr.AST.ExportsRef, 1, otherSourceIndex) + } + } + if happensAtRunTime { + // Depend on this file's "exports" object for the first argument to "__reExport" + c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), repr.AST.ExportsRef, 1, sourceIndex) + record.CallsRunTimeReExportFn = true + repr.AST.UsesExportsRef = true + reExportUses++ + } + } + c.graph.GenerateRuntimeSymbolImportAndUse(sourceIndex, uint32(partIndex), "__reExport", reExportUses) + } + } + c.timer.End("Step 6") +} + +func (c *linkerContext) generateCodeForLazyExport(sourceIndex uint32) { + file := &c.graph.Files[sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) + + // Grab the lazy expression + if len(repr.AST.Parts) < 1 { + panic("Internal error") + } + part := &repr.AST.Parts[1] + if len(part.Stmts) != 1 { + panic("Internal error") + } + lazy, ok := part.Stmts[0].Data.(*js_ast.SLazyExport) + if !ok { + panic("Internal error") + } + + // Use "module.exports = value" for CommonJS-style modules + if repr.AST.ExportsKind == js_ast.ExportsCommonJS { + part.Stmts = []js_ast.Stmt{js_ast.AssignStmt( + js_ast.Expr{Loc: lazy.Value.Loc, Data: &js_ast.EDot{ + Target: js_ast.Expr{Loc: lazy.Value.Loc, Data: &js_ast.EIdentifier{Ref: repr.AST.ModuleRef}}, + Name: "exports", + NameLoc: lazy.Value.Loc, + }}, + lazy.Value, + )} + c.graph.GenerateSymbolImportAndUse(sourceIndex, 0, repr.AST.ModuleRef, 1, sourceIndex) + return + } + + // Otherwise, generate ES6 export statements. These are added as additional + // parts so they can be tree shaken individually. + part.Stmts = nil + + type prevExport struct { + ref js_ast.Ref + partIndex uint32 + } + + generateExport := func(name string, alias string, value js_ast.Expr) prevExport { + // Generate a new symbol + ref := c.graph.GenerateNewSymbol(sourceIndex, js_ast.SymbolOther, name) + + // Generate an ES6 export + var stmt js_ast.Stmt + if alias == "default" { + stmt = js_ast.Stmt{Loc: value.Loc, Data: &js_ast.SExportDefault{ + DefaultName: js_ast.LocRef{Loc: value.Loc, Ref: ref}, + Value: js_ast.Stmt{Loc: value.Loc, Data: &js_ast.SExpr{Value: value}}, + }} + } else { + stmt = js_ast.Stmt{Loc: value.Loc, Data: &js_ast.SLocal{ + IsExport: true, + Decls: []js_ast.Decl{{ + Binding: js_ast.Binding{Loc: value.Loc, Data: &js_ast.BIdentifier{Ref: ref}}, + ValueOrNil: value, + }}, + }} + } + + // Link the export into the graph for tree shaking + partIndex := c.graph.AddPartToFile(sourceIndex, js_ast.Part{ + Stmts: []js_ast.Stmt{stmt}, + DeclaredSymbols: []js_ast.DeclaredSymbol{{Ref: ref, IsTopLevel: true}}, + CanBeRemovedIfUnused: true, + }) + c.graph.GenerateSymbolImportAndUse(sourceIndex, partIndex, repr.AST.ModuleRef, 1, sourceIndex) + repr.Meta.ResolvedExports[alias] = graph.ExportData{Ref: ref, SourceIndex: sourceIndex} + return prevExport{ref: ref, partIndex: partIndex} + } + + // Unwrap JSON objects into separate top-level variables + var prevExports []js_ast.Ref + jsonValue := lazy.Value + if object, ok := jsonValue.Data.(*js_ast.EObject); ok { + clone := *object + clone.Properties = append(make([]js_ast.Property, 0, len(clone.Properties)), clone.Properties...) + for i, property := range clone.Properties { + if str, ok := property.Key.Data.(*js_ast.EString); ok && + (!file.IsEntryPoint() || js_lexer.IsIdentifierUTF16(str.Value) || + !c.options.UnsupportedJSFeatures.Has(compat.ArbitraryModuleNamespaceNames)) { + name := js_lexer.UTF16ToString(str.Value) + exportRef := generateExport(name, name, property.ValueOrNil).ref + prevExports = append(prevExports, exportRef) + clone.Properties[i].ValueOrNil = js_ast.Expr{Loc: property.Key.Loc, Data: &js_ast.EIdentifier{Ref: exportRef}} + } + } + jsonValue.Data = &clone + } + + // Generate the default export + finalExportPartIndex := generateExport(file.InputFile.Source.IdentifierName+"_default", "default", jsonValue).partIndex + + // The default export depends on all of the previous exports + for _, exportRef := range prevExports { + c.graph.GenerateSymbolImportAndUse(sourceIndex, finalExportPartIndex, exportRef, 1, sourceIndex) + } +} + +func (c *linkerContext) createExportsForFile(sourceIndex uint32) { + //////////////////////////////////////////////////////////////////////////////// + // WARNING: This method is run in parallel over all files. Do not mutate data + // for other files within this method or you will create a data race. + //////////////////////////////////////////////////////////////////////////////// + + file := &c.graph.Files[sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) + + // Generate a getter per export + properties := []js_ast.Property{} + nsExportDependencies := []js_ast.Dependency{} + nsExportSymbolUses := make(map[js_ast.Ref]js_ast.SymbolUse) + for _, alias := range repr.Meta.SortedAndFilteredExportAliases { + export := repr.Meta.ResolvedExports[alias] + + // If this is an export of an import, reference the symbol that the import + // was eventually resolved to. We need to do this because imports have + // already been resolved by this point, so we can't generate a new import + // and have that be resolved later. + if importData, ok := c.graph.Files[export.SourceIndex].InputFile.Repr.(*graph.JSRepr).Meta.ImportsToBind[export.Ref]; ok { + export.Ref = importData.Ref + export.SourceIndex = importData.SourceIndex + nsExportDependencies = append(nsExportDependencies, importData.ReExports...) + } + + // Exports of imports need EImportIdentifier in case they need to be re- + // written to a property access later on + var value js_ast.Expr + if c.graph.Symbols.Get(export.Ref).NamespaceAlias != nil { + value = js_ast.Expr{Data: &js_ast.EImportIdentifier{Ref: export.Ref}} + } else { + value = js_ast.Expr{Data: &js_ast.EIdentifier{Ref: export.Ref}} + } + + // Add a getter property + var getter js_ast.Expr + body := js_ast.FnBody{Stmts: []js_ast.Stmt{{Loc: value.Loc, Data: &js_ast.SReturn{ValueOrNil: value}}}} + if c.options.UnsupportedJSFeatures.Has(compat.Arrow) { + getter = js_ast.Expr{Data: &js_ast.EFunction{Fn: js_ast.Fn{Body: body}}} + } else { + getter = js_ast.Expr{Data: &js_ast.EArrow{PreferExpr: true, Body: body}} + } + properties = append(properties, js_ast.Property{ + Key: js_ast.Expr{Data: &js_ast.EString{Value: js_lexer.StringToUTF16(alias)}}, + ValueOrNil: getter, + }) + nsExportSymbolUses[export.Ref] = js_ast.SymbolUse{CountEstimate: 1} + + // Make sure the part that declares the export is included + for _, partIndex := range c.graph.Files[export.SourceIndex].InputFile.Repr.(*graph.JSRepr).TopLevelSymbolToParts(export.Ref) { + // Use a non-local dependency since this is likely from a different + // file if it came in through an export star + nsExportDependencies = append(nsExportDependencies, js_ast.Dependency{ + SourceIndex: export.SourceIndex, + PartIndex: partIndex, + }) + } + } + + declaredSymbols := []js_ast.DeclaredSymbol{} + var nsExportStmts []js_ast.Stmt + + // Prefix this part with "var exports = {}" if this isn't a CommonJS module + needsExportsVariable := repr.AST.ExportsKind != js_ast.ExportsCommonJS && + (!file.IsEntryPoint() || c.options.OutputFormat != config.FormatCommonJS) + if needsExportsVariable { + nsExportStmts = append(nsExportStmts, js_ast.Stmt{Data: &js_ast.SLocal{Decls: []js_ast.Decl{{ + Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.AST.ExportsRef}}, + ValueOrNil: js_ast.Expr{Data: &js_ast.EObject{}}, + }}}}) + declaredSymbols = append(declaredSymbols, js_ast.DeclaredSymbol{ + Ref: repr.AST.ExportsRef, + IsTopLevel: true, + }) + } + + // If this file was originally ESM but is now in CommonJS, add a call to + // "__markAsModule" which sets the "__esModule" property to true. This must + // be done before any to "require()" or circular imports of multiple modules + // that have been each converted from ESM to CommonJS may not work correctly. + needsMarkAsModule := + (repr.AST.ExportKeyword.Len > 0 && (repr.AST.ExportsKind == js_ast.ExportsCommonJS || + (file.IsEntryPoint() && c.options.OutputFormat == config.FormatCommonJS))) || + needsExportsVariable + + // Avoid calling "__markAsModule" if we call "__export" since the function + // "__export" already calls "__markAsModule". This is an optimization to + // reduce generated code size. + needsExportCall := len(properties) > 0 + if needsMarkAsModule && needsExportCall { + needsMarkAsModule = false + } + + if needsMarkAsModule { + runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr) + markAsModuleRef := runtimeRepr.AST.ModuleScope.Members["__markAsModule"].Ref + nsExportStmts = append(nsExportStmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: markAsModuleRef}}, + Args: []js_ast.Expr{{Data: &js_ast.EIdentifier{Ref: repr.AST.ExportsRef}}}, + }}}}) + + // Make sure this file depends on the "__markAsModule" symbol + for _, partIndex := range runtimeRepr.TopLevelSymbolToParts(markAsModuleRef) { + nsExportDependencies = append(nsExportDependencies, js_ast.Dependency{ + SourceIndex: runtime.SourceIndex, + PartIndex: partIndex, + }) + } + + // Pull in the "__markAsModule" symbol later. Also make sure the "exports" + // variable is marked as used because we used it above. + repr.Meta.NeedsMarkAsModuleSymbolFromRuntime = true + repr.AST.UsesExportsRef = true + } + + // "__export(exports, { foo: () => foo })" + exportRef := js_ast.InvalidRef + if needsExportCall { + runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr) + exportRef = runtimeRepr.AST.ModuleScope.Members["__export"].Ref + nsExportStmts = append(nsExportStmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: exportRef}}, + Args: []js_ast.Expr{ + {Data: &js_ast.EIdentifier{Ref: repr.AST.ExportsRef}}, + {Data: &js_ast.EObject{ + Properties: properties, + }}, + }, + }}}}) + + // Make sure this file depends on the "__export" symbol + for _, partIndex := range runtimeRepr.TopLevelSymbolToParts(exportRef) { + nsExportDependencies = append(nsExportDependencies, js_ast.Dependency{ + SourceIndex: runtime.SourceIndex, + PartIndex: partIndex, + }) + } + + // Make sure the CommonJS closure, if there is one, includes "exports" + repr.AST.UsesExportsRef = true + } + + // No need to generate a part if it'll be empty + if len(nsExportStmts) > 0 { + // Initialize the part that was allocated for us earlier. The information + // here will be used after this during tree shaking. + repr.AST.Parts[js_ast.NSExportPartIndex] = js_ast.Part{ + Stmts: nsExportStmts, + SymbolUses: nsExportSymbolUses, + Dependencies: nsExportDependencies, + DeclaredSymbols: declaredSymbols, + + // This can be removed if nothing uses it + CanBeRemovedIfUnused: true, + + // Make sure this is trimmed if unused even if tree shaking is disabled + ForceTreeShaking: true, + } + + // Pull in the "__export" symbol if it was used + if exportRef != js_ast.InvalidRef { + repr.Meta.NeedsExportSymbolFromRuntime = true + } + } +} + +func (c *linkerContext) createWrapperForFile(sourceIndex uint32) { + repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) + + switch repr.Meta.Wrap { + // If this is a CommonJS file, we're going to need to generate a wrapper + // for the CommonJS closure. That will end up looking something like this: + // + // var require_foo = __commonJS((exports, module) => { + // ... + // }); + // + // However, that generation is special-cased for various reasons and is + // done later on. Still, we're going to need to ensure that this file + // both depends on the "__commonJS" symbol and declares the "require_foo" + // symbol. Instead of special-casing this during the reachablity analysis + // below, we just append a dummy part to the end of the file with these + // dependencies and let the general-purpose reachablity analysis take care + // of it. + case graph.WrapCJS: + runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr) + commonJSParts := runtimeRepr.TopLevelSymbolToParts(c.cjsRuntimeRef) + + // Generate the dummy part + dependencies := make([]js_ast.Dependency, len(commonJSParts)) + for i, partIndex := range commonJSParts { + dependencies[i] = js_ast.Dependency{ + SourceIndex: runtime.SourceIndex, + PartIndex: partIndex, + } + } + partIndex := c.graph.AddPartToFile(sourceIndex, js_ast.Part{ + SymbolUses: map[js_ast.Ref]js_ast.SymbolUse{ + repr.AST.WrapperRef: {CountEstimate: 1}, + }, + DeclaredSymbols: []js_ast.DeclaredSymbol{ + {Ref: repr.AST.ExportsRef, IsTopLevel: true}, + {Ref: repr.AST.ModuleRef, IsTopLevel: true}, + {Ref: repr.AST.WrapperRef, IsTopLevel: true}, + }, + Dependencies: dependencies, + }) + repr.Meta.WrapperPartIndex = ast.MakeIndex32(partIndex) + c.graph.GenerateSymbolImportAndUse(sourceIndex, partIndex, c.cjsRuntimeRef, 1, runtime.SourceIndex) + + // If this is a lazily-initialized ESM file, we're going to need to + // generate a wrapper for the ESM closure. That will end up looking + // something like this: + // + // var init_foo = __esm(() => { + // ... + // }); + // + // This depends on the "__esm" symbol and declares the "init_foo" symbol + // for similar reasons to the CommonJS closure above. + case graph.WrapESM: + runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr) + esmParts := runtimeRepr.TopLevelSymbolToParts(c.esmRuntimeRef) + + // Generate the dummy part + dependencies := make([]js_ast.Dependency, len(esmParts)) + for i, partIndex := range esmParts { + dependencies[i] = js_ast.Dependency{ + SourceIndex: runtime.SourceIndex, + PartIndex: partIndex, + } + } + partIndex := c.graph.AddPartToFile(sourceIndex, js_ast.Part{ + SymbolUses: map[js_ast.Ref]js_ast.SymbolUse{ + repr.AST.WrapperRef: {CountEstimate: 1}, + }, + DeclaredSymbols: []js_ast.DeclaredSymbol{ + {Ref: repr.AST.WrapperRef, IsTopLevel: true}, + }, + Dependencies: dependencies, + }) + repr.Meta.WrapperPartIndex = ast.MakeIndex32(partIndex) + c.graph.GenerateSymbolImportAndUse(sourceIndex, partIndex, c.esmRuntimeRef, 1, runtime.SourceIndex) + } +} + +func (c *linkerContext) matchImportsWithExportsForFile(sourceIndex uint32) { + file := &c.graph.Files[sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) + + // Sort imports for determinism. Otherwise our unit tests will randomly + // fail sometimes when error messages are reordered. + sortedImportRefs := make([]int, 0, len(repr.AST.NamedImports)) + for ref := range repr.AST.NamedImports { + sortedImportRefs = append(sortedImportRefs, int(ref.InnerIndex)) + } + sort.Ints(sortedImportRefs) + + // Pair imports with their matching exports + for _, innerIndex := range sortedImportRefs { + // Re-use memory for the cycle detector + c.cycleDetector = c.cycleDetector[:0] + + importRef := js_ast.Ref{SourceIndex: sourceIndex, InnerIndex: uint32(innerIndex)} + result, reExports := c.matchImportWithExport(importTracker{sourceIndex: sourceIndex, importRef: importRef}, nil) + switch result.kind { + case matchImportIgnore: + + case matchImportNormal: + repr.Meta.ImportsToBind[importRef] = graph.ImportData{ + ReExports: reExports, + SourceIndex: result.sourceIndex, + Ref: result.ref, + } + + case matchImportNamespace: + c.graph.Symbols.Get(importRef).NamespaceAlias = &js_ast.NamespaceAlias{ + NamespaceRef: result.namespaceRef, + Alias: result.alias, + } + + case matchImportNormalAndNamespace: + repr.Meta.ImportsToBind[importRef] = graph.ImportData{ + ReExports: reExports, + SourceIndex: result.sourceIndex, + Ref: result.ref, + } + + c.graph.Symbols.Get(importRef).NamespaceAlias = &js_ast.NamespaceAlias{ + NamespaceRef: result.namespaceRef, + Alias: result.alias, + } + + case matchImportCycle: + namedImport := repr.AST.NamedImports[importRef] + c.log.AddRangeError(file.LineColumnTracker(), js_lexer.RangeOfIdentifier(file.InputFile.Source, namedImport.AliasLoc), + fmt.Sprintf("Detected cycle while resolving import %q", namedImport.Alias)) + + case matchImportProbablyTypeScriptType: + repr.Meta.IsProbablyTypeScriptType[importRef] = true + + case matchImportAmbiguous: + namedImport := repr.AST.NamedImports[importRef] + r := js_lexer.RangeOfIdentifier(file.InputFile.Source, namedImport.AliasLoc) + var notes []logger.MsgData + + // Provide the locations of both ambiguous exports if possible + if result.nameLoc.Start != 0 && result.otherNameLoc.Start != 0 { + a := c.graph.Files[result.sourceIndex] + b := c.graph.Files[result.otherSourceIndex] + ra := js_lexer.RangeOfIdentifier(a.InputFile.Source, result.nameLoc) + rb := js_lexer.RangeOfIdentifier(b.InputFile.Source, result.otherNameLoc) + notes = []logger.MsgData{ + logger.RangeData(a.LineColumnTracker(), ra, "One matching export is here"), + logger.RangeData(b.LineColumnTracker(), rb, "Another matching export is here"), + } + } + + symbol := c.graph.Symbols.Get(importRef) + if symbol.ImportItemStatus == js_ast.ImportItemGenerated { + // This is a warning instead of an error because although it appears + // to be a named import, it's actually an automatically-generated + // named import that was originally a property access on an import + // star namespace object. Normally this property access would just + // resolve to undefined at run-time instead of failing at binding- + // time, so we emit a warning and rewrite the value to the literal + // "undefined" instead of emitting an error. + symbol.ImportItemStatus = js_ast.ImportItemMissing + msg := fmt.Sprintf("Import %q will always be undefined because there are multiple matching exports", namedImport.Alias) + c.log.AddRangeWarningWithNotes(file.LineColumnTracker(), r, msg, notes) + } else { + msg := fmt.Sprintf("Ambiguous import %q has multiple matching exports", namedImport.Alias) + c.log.AddRangeErrorWithNotes(file.LineColumnTracker(), r, msg, notes) + } + } + } +} + +type matchImportKind uint8 + +const ( + // The import is either external or undefined + matchImportIgnore matchImportKind = iota + + // "sourceIndex" and "ref" are in use + matchImportNormal + + // "namespaceRef" and "alias" are in use + matchImportNamespace + + // Both "matchImportNormal" and "matchImportNamespace" + matchImportNormalAndNamespace + + // The import could not be evaluated due to a cycle + matchImportCycle + + // The import is missing but came from a TypeScript file + matchImportProbablyTypeScriptType + + // The import resolved to multiple symbols via "export * from" + matchImportAmbiguous +) + +type matchImportResult struct { + kind matchImportKind + namespaceRef js_ast.Ref + alias string + sourceIndex uint32 + nameLoc logger.Loc // Optional, goes with sourceIndex, ignore if zero + otherSourceIndex uint32 + otherNameLoc logger.Loc // Optional, goes with otherSourceIndex, ignore if zero + ref js_ast.Ref +} + +func (c *linkerContext) matchImportWithExport( + tracker importTracker, reExportsIn []js_ast.Dependency, +) (result matchImportResult, reExports []js_ast.Dependency) { + var ambiguousResults []matchImportResult + reExports = reExportsIn + +loop: + for { + // Make sure we avoid infinite loops trying to resolve cycles: + // + // // foo.js + // export {a as b} from './foo.js' + // export {b as c} from './foo.js' + // export {c as a} from './foo.js' + // + // This uses a O(n^2) array scan instead of a O(n) map because the vast + // majority of cases have one or two elements and Go arrays are cheap to + // reuse without allocating. + for _, previousTracker := range c.cycleDetector { + if tracker == previousTracker { + result = matchImportResult{kind: matchImportCycle} + break loop + } + } + c.cycleDetector = append(c.cycleDetector, tracker) + + // Resolve the import by one step + nextTracker, status, potentiallyAmbiguousExportStarRefs := c.advanceImportTracker(tracker) + switch status { + case importCommonJS, importCommonJSWithoutExports, importExternal, importDisabled: + if status == importExternal && c.options.OutputFormat.KeepES6ImportExportSyntax() { + // Imports from external modules should not be converted to CommonJS + // if the output format preserves the original ES6 import statements + break + } + + // If it's a CommonJS or external file, rewrite the import to a + // property access. Don't do this if the namespace reference is invalid + // though. This is the case for star imports, where the import is the + // namespace. + trackerFile := &c.graph.Files[tracker.sourceIndex] + namedImport := trackerFile.InputFile.Repr.(*graph.JSRepr).AST.NamedImports[tracker.importRef] + if namedImport.NamespaceRef != js_ast.InvalidRef { + if result.kind == matchImportNormal { + result.kind = matchImportNormalAndNamespace + result.namespaceRef = namedImport.NamespaceRef + result.alias = namedImport.Alias + } else { + result = matchImportResult{ + kind: matchImportNamespace, + namespaceRef: namedImport.NamespaceRef, + alias: namedImport.Alias, + } + } + } + + // Warn about importing from a file that is known to not have any exports + if status == importCommonJSWithoutExports { + symbol := c.graph.Symbols.Get(tracker.importRef) + symbol.ImportItemStatus = js_ast.ImportItemMissing + c.log.AddRangeWarning( + trackerFile.LineColumnTracker(), + js_lexer.RangeOfIdentifier(trackerFile.InputFile.Source, namedImport.AliasLoc), + fmt.Sprintf("Import %q will always be undefined because the file %q has no exports", + namedImport.Alias, c.graph.Files[nextTracker.sourceIndex].InputFile.Source.PrettyPath)) + } + + case importDynamicFallback: + // If it's a file with dynamic export fallback, rewrite the import to a property access + trackerFile := &c.graph.Files[tracker.sourceIndex] + namedImport := trackerFile.InputFile.Repr.(*graph.JSRepr).AST.NamedImports[tracker.importRef] + if result.kind == matchImportNormal { + result.kind = matchImportNormalAndNamespace + result.namespaceRef = nextTracker.importRef + result.alias = namedImport.Alias + } else { + result = matchImportResult{ + kind: matchImportNamespace, + namespaceRef: nextTracker.importRef, + alias: namedImport.Alias, + } + } + + case importNoMatch: + symbol := c.graph.Symbols.Get(tracker.importRef) + trackerFile := &c.graph.Files[tracker.sourceIndex] + namedImport := trackerFile.InputFile.Repr.(*graph.JSRepr).AST.NamedImports[tracker.importRef] + r := js_lexer.RangeOfIdentifier(trackerFile.InputFile.Source, namedImport.AliasLoc) + + // Report mismatched imports and exports + if symbol.ImportItemStatus == js_ast.ImportItemGenerated { + // This is a warning instead of an error because although it appears + // to be a named import, it's actually an automatically-generated + // named import that was originally a property access on an import + // star namespace object. Normally this property access would just + // resolve to undefined at run-time instead of failing at binding- + // time, so we emit a warning and rewrite the value to the literal + // "undefined" instead of emitting an error. + symbol.ImportItemStatus = js_ast.ImportItemMissing + c.log.AddRangeWarning(trackerFile.LineColumnTracker(), r, fmt.Sprintf( + "Import %q will always be undefined because there is no matching export", namedImport.Alias)) + } else { + c.log.AddRangeError(trackerFile.LineColumnTracker(), r, fmt.Sprintf("No matching export in %q for import %q", + c.graph.Files[nextTracker.sourceIndex].InputFile.Source.PrettyPath, namedImport.Alias)) + } + + case importProbablyTypeScriptType: + // Omit this import from any namespace export code we generate for + // import star statements (i.e. "import * as ns from 'path'") + result = matchImportResult{kind: matchImportProbablyTypeScriptType} + + case importFound: + // If there are multiple ambiguous results due to use of "export * from" + // statements, trace them all to see if they point to different things. + for _, ambiguousTracker := range potentiallyAmbiguousExportStarRefs { + // If this is a re-export of another import, follow the import + if _, ok := c.graph.Files[ambiguousTracker.SourceIndex].InputFile.Repr.(*graph.JSRepr).AST.NamedImports[ambiguousTracker.Ref]; ok { + // Save and restore the cycle detector to avoid mixing information + oldCycleDetector := c.cycleDetector + ambiguousResult, newReExportFiles := c.matchImportWithExport(importTracker{ + sourceIndex: ambiguousTracker.SourceIndex, + importRef: ambiguousTracker.Ref, + }, reExports) + c.cycleDetector = oldCycleDetector + ambiguousResults = append(ambiguousResults, ambiguousResult) + reExports = newReExportFiles + } else { + ambiguousResults = append(ambiguousResults, matchImportResult{ + kind: matchImportNormal, + sourceIndex: ambiguousTracker.SourceIndex, + ref: ambiguousTracker.Ref, + nameLoc: ambiguousTracker.NameLoc, + }) + } + } + + // Defer the actual binding of this import until after we generate + // namespace export code for all files. This has to be done for all + // import-to-export matches, not just the initial import to the final + // export, since all imports and re-exports must be merged together + // for correctness. + result = matchImportResult{ + kind: matchImportNormal, + sourceIndex: nextTracker.sourceIndex, + ref: nextTracker.importRef, + nameLoc: nextTracker.nameLoc, + } + + // Depend on the statement(s) that declared this import symbol in the + // original file + for _, resolvedPartIndex := range c.graph.Files[tracker.sourceIndex].InputFile.Repr.(*graph.JSRepr).TopLevelSymbolToParts(tracker.importRef) { + reExports = append(reExports, js_ast.Dependency{ + SourceIndex: tracker.sourceIndex, + PartIndex: resolvedPartIndex, + }) + } + + // If this is a re-export of another import, continue for another + // iteration of the loop to resolve that import as well + if _, ok := c.graph.Files[nextTracker.sourceIndex].InputFile.Repr.(*graph.JSRepr).AST.NamedImports[nextTracker.importRef]; ok { + tracker = nextTracker + continue + } + + default: + panic("Internal error") + } + + // Stop now if we didn't explicitly "continue" above + break + } + + // If there is a potential ambiguity, all results must be the same + for _, ambiguousResult := range ambiguousResults { + if ambiguousResult != result { + if result.kind == matchImportNormal && ambiguousResult.kind == matchImportNormal && + result.nameLoc.Start != 0 && ambiguousResult.nameLoc.Start != 0 { + return matchImportResult{ + kind: matchImportAmbiguous, + sourceIndex: result.sourceIndex, + nameLoc: result.nameLoc, + otherSourceIndex: ambiguousResult.sourceIndex, + otherNameLoc: ambiguousResult.nameLoc, + }, nil + } + return matchImportResult{kind: matchImportAmbiguous}, nil + } + } + + return +} + +func (c *linkerContext) recursivelyWrapDependencies(sourceIndex uint32) { + repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) + if repr.Meta.DidWrapDependencies { + return + } + repr.Meta.DidWrapDependencies = true + + // Never wrap the runtime file since it always comes first + if sourceIndex == runtime.SourceIndex { + return + } + + // This module must be wrapped + if repr.Meta.Wrap == graph.WrapNone { + if repr.AST.ExportsKind == js_ast.ExportsCommonJS { + repr.Meta.Wrap = graph.WrapCJS + } else { + repr.Meta.Wrap = graph.WrapESM + } + } + + // All dependencies must also be wrapped + for _, record := range repr.AST.ImportRecords { + if record.SourceIndex.IsValid() { + c.recursivelyWrapDependencies(record.SourceIndex.GetIndex()) + } + } +} + +func (c *linkerContext) hasDynamicExportsDueToExportStar(sourceIndex uint32, visited map[uint32]bool) bool { + // Terminate the traversal now if this file already has dynamic exports + repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) + if repr.AST.ExportsKind == js_ast.ExportsCommonJS || repr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback { + return true + } + + // Avoid infinite loops due to cycles in the export star graph + if visited[sourceIndex] { + return false + } + visited[sourceIndex] = true + + // Scan over the export star graph + for _, importRecordIndex := range repr.AST.ExportStarImportRecords { + record := &repr.AST.ImportRecords[importRecordIndex] + + // This file has dynamic exports if the exported imports are from a file + // that either has dynamic exports directly or transitively by itself + // having an export star from a file with dynamic exports. + if (!record.SourceIndex.IsValid() && (!c.graph.Files[sourceIndex].IsEntryPoint() || !c.options.OutputFormat.KeepES6ImportExportSyntax())) || + (record.SourceIndex.IsValid() && record.SourceIndex.GetIndex() != sourceIndex && c.hasDynamicExportsDueToExportStar(record.SourceIndex.GetIndex(), visited)) { + repr.AST.ExportsKind = js_ast.ExportsESMWithDynamicFallback + return true + } + } + + return false +} + +func (c *linkerContext) addExportsForExportStar( + resolvedExports map[string]graph.ExportData, + sourceIndex uint32, + sourceIndexStack []uint32, +) { + // Avoid infinite loops due to cycles in the export star graph + for _, prevSourceIndex := range sourceIndexStack { + if prevSourceIndex == sourceIndex { + return + } + } + sourceIndexStack = append(sourceIndexStack, sourceIndex) + repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) + + for _, importRecordIndex := range repr.AST.ExportStarImportRecords { + record := &repr.AST.ImportRecords[importRecordIndex] + if !record.SourceIndex.IsValid() { + // This will be resolved at run time instead + continue + } + otherSourceIndex := record.SourceIndex.GetIndex() + + // Export stars from a CommonJS module don't work because they can't be + // statically discovered. Just silently ignore them in this case. + // + // We could attempt to check whether the imported file still has ES6 + // exports even though it still uses CommonJS features. However, when + // doing this we'd also have to rewrite any imports of these export star + // re-exports as property accesses off of a generated require() call. + otherRepr := c.graph.Files[otherSourceIndex].InputFile.Repr.(*graph.JSRepr) + if otherRepr.AST.ExportsKind == js_ast.ExportsCommonJS { + // All exports will be resolved at run time instead + continue + } + + // Accumulate this file's exports + nextExport: + for alias, name := range otherRepr.AST.NamedExports { + // ES6 export star statements ignore exports named "default" + if alias == "default" { + continue + } + + // This export star is shadowed if any file in the stack has a matching real named export + for _, prevSourceIndex := range sourceIndexStack { + prevRepr := c.graph.Files[prevSourceIndex].InputFile.Repr.(*graph.JSRepr) + if _, ok := prevRepr.AST.NamedExports[alias]; ok { + continue nextExport + } + } + + if existing, ok := resolvedExports[alias]; !ok { + // Initialize the re-export + resolvedExports[alias] = graph.ExportData{ + Ref: name.Ref, + SourceIndex: otherSourceIndex, + NameLoc: name.AliasLoc, + } + + // Make sure the symbol is marked as imported so that code splitting + // imports it correctly if it ends up being shared with another chunk + repr.Meta.ImportsToBind[name.Ref] = graph.ImportData{ + Ref: name.Ref, + SourceIndex: otherSourceIndex, + } + } else if existing.SourceIndex != otherSourceIndex { + // Two different re-exports colliding makes it potentially ambiguous + existing.PotentiallyAmbiguousExportStarRefs = + append(existing.PotentiallyAmbiguousExportStarRefs, graph.ImportData{ + SourceIndex: otherSourceIndex, + Ref: name.Ref, + NameLoc: name.AliasLoc, + }) + resolvedExports[alias] = existing + } + } + + // Search further through this file's export stars + c.addExportsForExportStar(resolvedExports, otherSourceIndex, sourceIndexStack) + } +} + +type importTracker struct { + sourceIndex uint32 + nameLoc logger.Loc // Optional, goes with sourceIndex, ignore if zero + importRef js_ast.Ref +} + +type importStatus uint8 + +const ( + // The imported file has no matching export + importNoMatch importStatus = iota + + // The imported file has a matching export + importFound + + // The imported file is CommonJS and has unknown exports + importCommonJS + + // The import is missing but there is a dynamic fallback object + importDynamicFallback + + // The import was treated as a CommonJS import but the file is known to have no exports + importCommonJSWithoutExports + + // The imported file was disabled by mapping it to false in the "browser" + // field of package.json + importDisabled + + // The imported file is external and has unknown exports + importExternal + + // This is a missing re-export in a TypeScript file, so it's probably a type + importProbablyTypeScriptType +) + +func (c *linkerContext) advanceImportTracker(tracker importTracker) (importTracker, importStatus, []graph.ImportData) { + file := &c.graph.Files[tracker.sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) + namedImport := repr.AST.NamedImports[tracker.importRef] + + // Is this an external file? + record := &repr.AST.ImportRecords[namedImport.ImportRecordIndex] + if !record.SourceIndex.IsValid() { + return importTracker{}, importExternal, nil + } + + // Is this a disabled file? + otherSourceIndex := record.SourceIndex.GetIndex() + if c.graph.Files[otherSourceIndex].InputFile.Source.KeyPath.IsDisabled() { + return importTracker{sourceIndex: otherSourceIndex, importRef: js_ast.InvalidRef}, importDisabled, nil + } + + // Is this a named import of a file without any exports? + otherRepr := c.graph.Files[otherSourceIndex].InputFile.Repr.(*graph.JSRepr) + if !namedImport.AliasIsStar && !otherRepr.AST.HasLazyExport && + // CommonJS exports + otherRepr.AST.ExportKeyword.Len == 0 && namedImport.Alias != "default" && + // ESM exports + !otherRepr.AST.UsesExportsRef && !otherRepr.AST.UsesModuleRef { + // Just warn about it and replace the import with "undefined" + return importTracker{sourceIndex: otherSourceIndex, importRef: js_ast.InvalidRef}, importCommonJSWithoutExports, nil + } + + // Is this a CommonJS file? + if otherRepr.AST.ExportsKind == js_ast.ExportsCommonJS { + return importTracker{sourceIndex: otherSourceIndex, importRef: js_ast.InvalidRef}, importCommonJS, nil + } + + // Match this import star with an export star from the imported file + if matchingExport := otherRepr.Meta.ResolvedExportStar; namedImport.AliasIsStar && matchingExport != nil { + // Check to see if this is a re-export of another import + return importTracker{ + sourceIndex: matchingExport.SourceIndex, + importRef: matchingExport.Ref, + nameLoc: matchingExport.NameLoc, + }, importFound, matchingExport.PotentiallyAmbiguousExportStarRefs + } + + // Match this import up with an export from the imported file + if matchingExport, ok := otherRepr.Meta.ResolvedExports[namedImport.Alias]; ok { + // Check to see if this is a re-export of another import + return importTracker{ + sourceIndex: matchingExport.SourceIndex, + importRef: matchingExport.Ref, + nameLoc: matchingExport.NameLoc, + }, importFound, matchingExport.PotentiallyAmbiguousExportStarRefs + } + + // Is this a file with dynamic exports? + if otherRepr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback { + return importTracker{sourceIndex: otherSourceIndex, importRef: otherRepr.AST.ExportsRef}, importDynamicFallback, nil + } + + // Missing re-exports in TypeScript files are indistinguishable from types + if file.InputFile.Loader.IsTypeScript() && namedImport.IsExported { + return importTracker{}, importProbablyTypeScriptType, nil + } + + return importTracker{sourceIndex: otherSourceIndex}, importNoMatch, nil +} + +func (c *linkerContext) treeShakingAndCodeSplitting() { + // Tree shaking: Each entry point marks all files reachable from itself + c.timer.Begin("Tree shaking") + for _, entryPoint := range c.graph.EntryPoints() { + c.markFileLiveForTreeShaking(entryPoint.SourceIndex) + } + c.timer.End("Tree shaking") + + // Code splitting: Determine which entry points can reach which files. This + // has to happen after tree shaking because there is an implicit dependency + // between live parts within the same file. All liveness has to be computed + // first before determining which entry points can reach which files. + c.timer.Begin("Code splitting") + for i, entryPoint := range c.graph.EntryPoints() { + c.markFileReachableForCodeSplitting(entryPoint.SourceIndex, uint(i), 0) + } + c.timer.End("Code splitting") +} + +func (c *linkerContext) markFileReachableForCodeSplitting(sourceIndex uint32, entryPointBit uint, distanceFromEntryPoint uint32) { + file := &c.graph.Files[sourceIndex] + if !file.IsLive { + return + } + traverseAgain := false + + // Track the minimum distance to an entry point + if distanceFromEntryPoint < file.DistanceFromEntryPoint { + file.DistanceFromEntryPoint = distanceFromEntryPoint + traverseAgain = true + } + distanceFromEntryPoint++ + + // Don't mark this file more than once + if file.EntryBits.HasBit(entryPointBit) && !traverseAgain { + return + } + file.EntryBits.SetBit(entryPointBit) + + switch repr := file.InputFile.Repr.(type) { + case *graph.JSRepr: + // If the JavaScript stub for a CSS file is included, also include the CSS file + if repr.CSSSourceIndex.IsValid() { + c.markFileReachableForCodeSplitting(repr.CSSSourceIndex.GetIndex(), entryPointBit, distanceFromEntryPoint) + } + + // Traverse into all imported files + for _, record := range repr.AST.ImportRecords { + if record.SourceIndex.IsValid() && !c.isExternalDynamicImport(&record, sourceIndex) { + c.markFileReachableForCodeSplitting(record.SourceIndex.GetIndex(), entryPointBit, distanceFromEntryPoint) + } + } + + // Traverse into all dependencies of all parts in this file + for _, part := range repr.AST.Parts { + for _, dependency := range part.Dependencies { + if dependency.SourceIndex != sourceIndex { + c.markFileReachableForCodeSplitting(dependency.SourceIndex, entryPointBit, distanceFromEntryPoint) + } + } + } + + case *graph.CSSRepr: + // Traverse into all dependencies + for _, record := range repr.AST.ImportRecords { + if record.SourceIndex.IsValid() { + c.markFileReachableForCodeSplitting(record.SourceIndex.GetIndex(), entryPointBit, distanceFromEntryPoint) + } + } + } +} + +func (c *linkerContext) markFileLiveForTreeShaking(sourceIndex uint32) { + file := &c.graph.Files[sourceIndex] + + // Don't mark this file more than once + if file.IsLive { + return + } + file.IsLive = true + + switch repr := file.InputFile.Repr.(type) { + case *graph.JSRepr: + isTreeShakingEnabled := config.IsTreeShakingEnabled(c.options.Mode, c.options.OutputFormat) + + // If the JavaScript stub for a CSS file is included, also include the CSS file + if repr.CSSSourceIndex.IsValid() { + c.markFileLiveForTreeShaking(repr.CSSSourceIndex.GetIndex()) + } + + for partIndex, part := range repr.AST.Parts { + canBeRemovedIfUnused := part.CanBeRemovedIfUnused + + // Also include any statement-level imports + for _, importRecordIndex := range part.ImportRecordIndices { + record := &repr.AST.ImportRecords[importRecordIndex] + if record.Kind != ast.ImportStmt { + continue + } + + if record.SourceIndex.IsValid() { + otherSourceIndex := record.SourceIndex.GetIndex() + + // Don't include this module for its side effects if it can be + // considered to have no side effects + if otherFile := &c.graph.Files[otherSourceIndex]; otherFile.InputFile.SideEffects.Kind != graph.HasSideEffects && !c.options.IgnoreDCEAnnotations { + continue + } + + // Otherwise, include this module for its side effects + c.markFileLiveForTreeShaking(otherSourceIndex) + } + + // If we get here then the import was included for its side effects, so + // we must also keep this part + canBeRemovedIfUnused = false + } + + // Include all parts in this file with side effects, or just include + // everything if tree-shaking is disabled. Note that we still want to + // perform tree-shaking on the runtime even if tree-shaking is disabled. + if !canBeRemovedIfUnused || (!part.ForceTreeShaking && !isTreeShakingEnabled && file.IsEntryPoint()) { + c.markPartLiveForTreeShaking(sourceIndex, uint32(partIndex)) + } + } + + case *graph.CSSRepr: + // Include all "@import" rules + for _, record := range repr.AST.ImportRecords { + if record.SourceIndex.IsValid() { + c.markFileLiveForTreeShaking(record.SourceIndex.GetIndex()) + } + } + } +} + +func (c *linkerContext) isExternalDynamicImport(record *ast.ImportRecord, sourceIndex uint32) bool { + return record.Kind == ast.ImportDynamic && c.graph.Files[record.SourceIndex.GetIndex()].IsEntryPoint() && record.SourceIndex.GetIndex() != sourceIndex +} + +func (c *linkerContext) markPartLiveForTreeShaking(sourceIndex uint32, partIndex uint32) { + file := &c.graph.Files[sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) + part := &repr.AST.Parts[partIndex] + + // Don't mark this part more than once + if part.IsLive { + return + } + part.IsLive = true + + // Include the file containing this part + c.markFileLiveForTreeShaking(sourceIndex) + + // Also include any dependencies + for _, dep := range part.Dependencies { + c.markPartLiveForTreeShaking(dep.SourceIndex, dep.PartIndex) + } +} + +func sanitizeFilePathForVirtualModulePath(path string) string { + // Convert it to a safe file path. See: https://stackoverflow.com/a/31976060 + sb := strings.Builder{} + needsGap := false + for _, c := range path { + switch c { + case 0: + // These characters are forbidden on Unix and Windows + + case '<', '>', ':', '"', '|', '?', '*': + // These characters are forbidden on Windows + + default: + if c < 0x20 { + // These characters are forbidden on Windows + break + } + + // Turn runs of invalid characters into a '_' + if needsGap { + sb.WriteByte('_') + needsGap = false + } + + sb.WriteRune(c) + continue + } + + if sb.Len() > 0 { + needsGap = true + } + } + + // Make sure the name isn't empty + if sb.Len() == 0 { + return "_" + } + + // Note: An extension will be added to this base name, so there is no need to + // avoid forbidden file names such as ".." since ".js" is a valid file name. + return sb.String() +} + +// JavaScript modules are traversed in depth-first postorder. This is the +// order that JavaScript modules were evaluated in before the top-level await +// feature was introduced. +// +// A +// / \ +// B C +// \ / +// D +// +// If A imports B and then C, B imports D, and C imports D, then the JavaScript +// traversal order is D B C A. +// +// This function may deviate from ESM import order for dynamic imports (both +// "require()" and "import()"). This is because the import order is impossible +// to determine since the imports happen at run-time instead of compile-time. +// In this case we just pick an arbitrary but consistent order. +func (c *linkerContext) findImportedCSSFilesInJSOrder(entryPoint uint32) (order []uint32) { + visited := make(map[uint32]bool) + var visit func(uint32, ast.Index32) + + // Include this file and all files it imports + visit = func(sourceIndex uint32, importerIndex ast.Index32) { + if visited[sourceIndex] { + return + } + visited[sourceIndex] = true + file := &c.graph.Files[sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) + + // Iterate over each part in the file in order + for _, part := range repr.AST.Parts { + // Ignore dead code that has been removed from the bundle. Any code + // that's reachable from the entry point, even through lazy dynamic + // imports, could end up being activated by the bundle and needs its + // CSS to be included. This may change if/when code splitting is + // supported for CSS. + if !part.IsLive { + continue + } + + // Traverse any files imported by this part. Note that CommonJS calls + // to "require()" count as imports too, sort of as if the part has an + // ESM "import" statement in it. This may seem weird because ESM imports + // are a compile-time concept while CommonJS imports are a run-time + // concept. But we don't want to manipulate