From ad756c73a07cb455d3845c0da4cd38bab6af8f38 Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Wed, 8 Jun 2022 10:18:05 -0400 Subject: [PATCH 001/136] internal/lsp: initial test set up for inlay hints Set up the tests for inlay hints. We test inlay hints by converting them to text edits and verifying the output is as we expected it. This change does not yet deal with making sure the server settings are correct. Change-Id: I136f971a87bf9936fd44047d45fe0a3f03c9164e Reviewed-on: https://go-review.googlesource.com/c/tools/+/411095 Run-TryBot: Suzy Mueller gopls-CI: kokoro Reviewed-by: Jamal Carvalho --- internal/lsp/cmd/test/cmdtest.go | 4 ++ internal/lsp/lsp_test.go | 42 +++++++++++++++++++++ internal/lsp/source/source_test.go | 4 ++ internal/lsp/testdata/inlayHint/a.go | 9 +++++ internal/lsp/testdata/inlayHint/a.go.golden | 11 ++++++ internal/lsp/tests/tests.go | 19 ++++++++++ 6 files changed, 89 insertions(+) create mode 100644 internal/lsp/testdata/inlayHint/a.go create mode 100644 internal/lsp/testdata/inlayHint/a.go.golden diff --git a/internal/lsp/cmd/test/cmdtest.go b/internal/lsp/cmd/test/cmdtest.go index 312f7b8b435..ff0461b333f 100644 --- a/internal/lsp/cmd/test/cmdtest.go +++ b/internal/lsp/cmd/test/cmdtest.go @@ -113,6 +113,10 @@ func (r *runner) Hover(t *testing.T, spn span.Span, info string) { //TODO: hovering not supported on command line } +func (r *runner) InlayHints(t *testing.T, spn span.Span) { + // TODO: inlayHints not supported on command line +} + func (r *runner) runGoplsCmd(t testing.TB, args ...string) (string, string) { rStdout, wStdout, err := os.Pipe() if err != nil { diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go index ee364b8b034..e097100c2ff 100644 --- a/internal/lsp/lsp_test.go +++ b/internal/lsp/lsp_test.go @@ -932,6 +932,48 @@ func (r *runner) References(t *testing.T, src span.Span, itemList []span.Span) { } } +func (r *runner) InlayHints(t *testing.T, spn span.Span) { + uri := spn.URI() + filename := uri.Filename() + + hints, err := r.server.InlayHint(r.ctx, &protocol.InlayHintParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.URIFromSpanURI(uri), + }, + // TODO: add ViewPort + }) + if err != nil { + t.Fatal(err) + } + + // Map inlay hints to text edits. + edits := make([]protocol.TextEdit, len(hints)) + for i, hint := range hints { + edits[i] = protocol.TextEdit{ + Range: protocol.Range{Start: *hint.Position, End: *hint.Position}, + NewText: fmt.Sprintf("<%s>", hint.Label[0].Value), + } + } + + m, err := r.data.Mapper(uri) + if err != nil { + t.Fatal(err) + } + sedits, err := source.FromProtocolEdits(m, edits) + if err != nil { + t.Error(err) + } + got := diff.ApplyEdits(string(m.Content), sedits) + + withinlayHints := string(r.data.Golden("inlayHint", filename, func() ([]byte, error) { + return []byte(got), nil + })) + + if withinlayHints != got { + t.Errorf("format failed for %s, expected:\n%v\ngot:\n%v", filename, withinlayHints, got) + } +} + func (r *runner) Rename(t *testing.T, spn span.Span, newText string) { tag := fmt.Sprintf("%s-rename", newText) diff --git a/internal/lsp/source/source_test.go b/internal/lsp/source/source_test.go index 426bffc97b5..9218f9ddc1a 100644 --- a/internal/lsp/source/source_test.go +++ b/internal/lsp/source/source_test.go @@ -685,6 +685,10 @@ func (r *runner) Highlight(t *testing.T, src span.Span, locations []span.Span) { } } +func (r *runner) InlayHints(t *testing.T, src span.Span) { + // TODO(golang/go#53315): add source test +} + func (r *runner) Hover(t *testing.T, src span.Span, text string) { ctx := r.ctx _, srcRng, err := spanToRange(r.data, src) diff --git a/internal/lsp/testdata/inlayHint/a.go b/internal/lsp/testdata/inlayHint/a.go new file mode 100644 index 00000000000..90ef7c41d1d --- /dev/null +++ b/internal/lsp/testdata/inlayHint/a.go @@ -0,0 +1,9 @@ +package inlayHint //@inlayHint("package") + +func hello(name string) string { + return "Hello " + name +} + +func helloWorld() string { + return hello("World") +} diff --git a/internal/lsp/testdata/inlayHint/a.go.golden b/internal/lsp/testdata/inlayHint/a.go.golden new file mode 100644 index 00000000000..e4e6cc0c0cc --- /dev/null +++ b/internal/lsp/testdata/inlayHint/a.go.golden @@ -0,0 +1,11 @@ +-- inlayHint -- +package inlayHint //@inlayHint("package") + +func hello(name string) string { + return "Hello " + name +} + +func helloWorld() string { + return hello("World") +} + diff --git a/internal/lsp/tests/tests.go b/internal/lsp/tests/tests.go index 8265cf2e9b1..81a5d399029 100644 --- a/internal/lsp/tests/tests.go +++ b/internal/lsp/tests/tests.go @@ -81,6 +81,7 @@ type PrepareRenames map[span.Span]*source.PrepareItem type Symbols map[span.URI][]protocol.DocumentSymbol type SymbolsChildren map[string][]protocol.DocumentSymbol type SymbolInformation map[span.Span]protocol.SymbolInformation +type InlayHints []span.Span type WorkspaceSymbols map[WorkspaceSymbolsTestType]map[span.URI][]string type Signatures map[span.Span]*protocol.SignatureHelp type Links map[span.URI][]Link @@ -113,6 +114,7 @@ type Data struct { Highlights Highlights References References Renames Renames + InlayHints InlayHints PrepareRenames PrepareRenames Symbols Symbols symbolsChildren SymbolsChildren @@ -156,6 +158,7 @@ type Tests interface { Definition(*testing.T, span.Span, Definition) Implementation(*testing.T, span.Span, []span.Span) Highlight(*testing.T, span.Span, []span.Span) + InlayHints(*testing.T, span.Span) References(*testing.T, span.Span, []span.Span) Rename(*testing.T, span.Span, string) PrepareRename(*testing.T, span.Span, *source.PrepareItem) @@ -466,6 +469,7 @@ func load(t testing.TB, mode string, dir string) *Data { "hoverdef": datum.collectHoverDefinitions, "hover": datum.collectHovers, "highlight": datum.collectHighlights, + "inlayHint": datum.collectInlayHints, "refs": datum.collectReferences, "rename": datum.collectRenames, "prepare": datum.collectPrepareRenames, @@ -782,6 +786,17 @@ func Run(t *testing.T, tests Tests, data *Data) { } }) + t.Run("InlayHints", func(t *testing.T) { + t.Skip("Inlay Hints not yet implemented") + t.Helper() + for _, src := range data.InlayHints { + t.Run(SpanName(src), func(t *testing.T) { + t.Helper() + tests.InlayHints(t, src) + }) + } + }) + t.Run("References", func(t *testing.T) { t.Helper() for src, itemList := range data.References { @@ -1292,6 +1307,10 @@ func (data *Data) collectHighlights(src span.Span, expected []span.Span) { data.Highlights[src] = append(data.Highlights[src], expected...) } +func (data *Data) collectInlayHints(src span.Span) { + data.InlayHints = append(data.InlayHints, src) +} + func (data *Data) collectReferences(src span.Span, expected []span.Span) { data.References[src] = expected } From 697795d6a801a72ad67b8c4fab6fdd74bc9150d1 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Sun, 8 Aug 2021 20:12:22 -0400 Subject: [PATCH 002/136] internal/lsp/regtest: don't run the connection on the test context When test awaiting fails, we often fail to shut down the server because the pipe is closed. Fix this by using a detached context for running the connection. Also clean up some unnecessary context arguments. Change-Id: I535c1cc1606e44df5f8e2177c92293d57836f992 Reviewed-on: https://go-review.googlesource.com/c/tools/+/340850 TryBot-Result: Gopher Robot Run-TryBot: Robert Findley gopls-CI: kokoro Reviewed-by: Alan Donovan --- gopls/internal/regtest/misc/shared_test.go | 3 ++- internal/jsonrpc2/servertest/servertest.go | 2 +- internal/jsonrpc2/servertest/servertest_test.go | 2 +- internal/lsp/lsprpc/lsprpc_test.go | 7 +++---- internal/lsp/regtest/env.go | 14 ++++++++++---- internal/lsp/regtest/runner.go | 17 +++++++++-------- 6 files changed, 26 insertions(+), 19 deletions(-) diff --git a/gopls/internal/regtest/misc/shared_test.go b/gopls/internal/regtest/misc/shared_test.go index 6861743ff42..a6b0cd87ef1 100644 --- a/gopls/internal/regtest/misc/shared_test.go +++ b/gopls/internal/regtest/misc/shared_test.go @@ -30,7 +30,8 @@ func runShared(t *testing.T, testFunc func(env1 *Env, env2 *Env)) { WithOptions(Modes(modes)).Run(t, sharedProgram, func(t *testing.T, env1 *Env) { // Create a second test session connected to the same workspace and server // as the first. - env2 := NewEnv(env1.Ctx, t, env1.Sandbox, env1.Server, env1.Editor.Config, true) + env2, cleanup := NewEnv(env1.Ctx, t, env1.Sandbox, env1.Server, env1.Editor.Config, true) + defer cleanup() env2.Await(InitialWorkspaceLoad) testFunc(env1, env2) }) diff --git a/internal/jsonrpc2/servertest/servertest.go b/internal/jsonrpc2/servertest/servertest.go index 392e084a9ad..b879ebdf181 100644 --- a/internal/jsonrpc2/servertest/servertest.go +++ b/internal/jsonrpc2/servertest/servertest.go @@ -68,7 +68,7 @@ type PipeServer struct { } // NewPipeServer returns a test server that can be connected to via io.Pipes. -func NewPipeServer(ctx context.Context, server jsonrpc2.StreamServer, framer jsonrpc2.Framer) *PipeServer { +func NewPipeServer(server jsonrpc2.StreamServer, framer jsonrpc2.Framer) *PipeServer { if framer == nil { framer = jsonrpc2.NewRawStream } diff --git a/internal/jsonrpc2/servertest/servertest_test.go b/internal/jsonrpc2/servertest/servertest_test.go index 38fa21a24d9..1780d4f9147 100644 --- a/internal/jsonrpc2/servertest/servertest_test.go +++ b/internal/jsonrpc2/servertest/servertest_test.go @@ -26,7 +26,7 @@ func TestTestServer(t *testing.T) { server := jsonrpc2.HandlerServer(fakeHandler) tcpTS := NewTCPServer(ctx, server, nil) defer tcpTS.Close() - pipeTS := NewPipeServer(ctx, server, nil) + pipeTS := NewPipeServer(server, nil) defer pipeTS.Close() tests := []struct { diff --git a/internal/lsp/lsprpc/lsprpc_test.go b/internal/lsp/lsprpc/lsprpc_test.go index 795c887e4b4..cde641c920b 100644 --- a/internal/lsp/lsprpc/lsprpc_test.go +++ b/internal/lsp/lsprpc/lsprpc_test.go @@ -60,7 +60,7 @@ func TestClientLogging(t *testing.T) { ctx = debug.WithInstance(ctx, "", "") ss := NewStreamServer(cache.New(nil), false) ss.serverForTest = server - ts := servertest.NewPipeServer(ctx, ss, nil) + ts := servertest.NewPipeServer(ss, nil) defer checkClose(t, ts.Close) cc := ts.Connect(ctx) cc.Go(ctx, protocol.ClientHandler(client, jsonrpc2.MethodNotFound)) @@ -125,12 +125,11 @@ func setupForwarding(ctx context.Context, t *testing.T, s protocol.Server) (dire ss.serverForTest = s tsDirect := servertest.NewTCPServer(serveCtx, ss, nil) - forwarderCtx := debug.WithInstance(ctx, "", "") forwarder, err := NewForwarder("tcp;"+tsDirect.Addr, nil) if err != nil { t.Fatal(err) } - tsForwarded := servertest.NewPipeServer(forwarderCtx, forwarder, nil) + tsForwarded := servertest.NewPipeServer(forwarder, nil) return tsDirect, tsForwarded, func() { checkClose(t, tsDirect.Close) checkClose(t, tsForwarded.Close) @@ -225,7 +224,7 @@ func TestDebugInfoLifecycle(t *testing.T) { if err != nil { t.Fatal(err) } - tsForwarder := servertest.NewPipeServer(clientCtx, forwarder, nil) + tsForwarder := servertest.NewPipeServer(forwarder, nil) conn1 := tsForwarder.Connect(clientCtx) ed1, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(clientCtx, conn1, fake.ClientHooks{}) diff --git a/internal/lsp/regtest/env.go b/internal/lsp/regtest/env.go index f095c38f285..a37cbf66611 100644 --- a/internal/lsp/regtest/env.go +++ b/internal/lsp/regtest/env.go @@ -14,6 +14,7 @@ import ( "golang.org/x/tools/internal/jsonrpc2/servertest" "golang.org/x/tools/internal/lsp/fake" "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/internal/xcontext" ) // Env holds an initialized fake Editor, Workspace, and Server, which may be @@ -109,9 +110,14 @@ type condition struct { // NewEnv creates a new test environment using the given scratch environment // and gopls server. -func NewEnv(ctx context.Context, tb testing.TB, sandbox *fake.Sandbox, ts servertest.Connector, editorConfig fake.EditorConfig, withHooks bool) *Env { +// +// The resulting func must be called to close the jsonrpc2 connection. +func NewEnv(ctx context.Context, tb testing.TB, sandbox *fake.Sandbox, ts servertest.Connector, editorConfig fake.EditorConfig, withHooks bool) (_ *Env, cleanup func()) { tb.Helper() - conn := ts.Connect(ctx) + + bgCtx, cleanupConn := context.WithCancel(xcontext.Detach(ctx)) + conn := ts.Connect(bgCtx) + env := &Env{ T: tb, Ctx: ctx, @@ -138,12 +144,12 @@ func NewEnv(ctx context.Context, tb testing.TB, sandbox *fake.Sandbox, ts server OnUnregistration: env.onUnregistration, } } - editor, err := fake.NewEditor(sandbox, editorConfig).Connect(ctx, conn, hooks) + editor, err := fake.NewEditor(sandbox, editorConfig).Connect(bgCtx, conn, hooks) if err != nil { tb.Fatal(err) } env.Editor = editor - return env + return env, cleanupConn } func (e *Env) onDiagnostics(_ context.Context, d *protocol.PublishDiagnosticsParams) error { diff --git a/internal/lsp/regtest/runner.go b/internal/lsp/regtest/runner.go index 3cfeb772a19..bebec53c527 100644 --- a/internal/lsp/regtest/runner.go +++ b/internal/lsp/regtest/runner.go @@ -234,7 +234,7 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio tests := []struct { name string mode Mode - getServer func(context.Context, *testing.T, func(*source.Options)) jsonrpc2.StreamServer + getServer func(*testing.T, func(*source.Options)) jsonrpc2.StreamServer }{ {"singleton", Singleton, singletonServer}, {"forwarded", Forwarded, r.forwardedServer}, @@ -301,14 +301,15 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio // better solution to ensure that all Go processes started by gopls have // exited before we clean up. r.AddCloser(sandbox) - ss := tc.getServer(ctx, t, config.optionsHook) + ss := tc.getServer(t, config.optionsHook) framer := jsonrpc2.NewRawStream ls := &loggingFramer{} if !config.skipLogs { framer = ls.framer(jsonrpc2.NewRawStream) } - ts := servertest.NewPipeServer(ctx, ss, framer) - env := NewEnv(ctx, t, sandbox, ts, config.editor, !config.skipHooks) + ts := servertest.NewPipeServer(ss, framer) + env, cleanup := NewEnv(ctx, t, sandbox, ts, config.editor, !config.skipHooks) + defer cleanup() defer func() { if t.Failed() && r.PrintGoroutinesOnFailure { pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) @@ -406,11 +407,11 @@ func (s *loggingFramer) printBuffers(testname string, w io.Writer) { fmt.Fprintf(os.Stderr, "#### End Gopls Test Logs for %q\n", testname) } -func singletonServer(ctx context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { +func singletonServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { return lsprpc.NewStreamServer(cache.New(optsHook), false) } -func experimentalServer(_ context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { +func experimentalServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { options := func(o *source.Options) { optsHook(o) o.EnableAllExperiments() @@ -421,7 +422,7 @@ func experimentalServer(_ context.Context, t *testing.T, optsHook func(*source.O return lsprpc.NewStreamServer(cache.New(options), false) } -func (r *Runner) forwardedServer(ctx context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { +func (r *Runner) forwardedServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { ts := r.getTestServer(optsHook) return newForwarder("tcp", ts.Addr) } @@ -440,7 +441,7 @@ func (r *Runner) getTestServer(optsHook func(*source.Options)) *servertest.TCPSe return r.ts } -func (r *Runner) separateProcessServer(ctx context.Context, t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { +func (r *Runner) separateProcessServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { // TODO(rfindley): can we use the autostart behavior here, instead of // pre-starting the remote? socket := r.getRemoteSocket(t) From 9651276d64be221532f972dbbbd8e6186c784fde Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 10 Jun 2022 13:34:52 -0400 Subject: [PATCH 003/136] internal/lsp/cache: optimize Snapshot.clone This change replaces the single large map used for snapshot.goFiles by a map of 256 stripes, each of which becomes immutable once shared. This optimizes the common case in which the copy is nearly identical to the original. We still need to visit each map entry to see whether it needs to be deleted (which is rare) and to inherit the handle in the usual case. This is now done concurrently. Also, share the (logically immutable) []PackageIDs slices across old and new snapshots. This was worth 5% of CPU and 1/3 of allocations (all small). Benchmark on darwin/arm64 shows a 29% reduction for DidChange. $ go test -v ./gopls/internal/regtest/bench -run=TestBenchmarkDidChange -didchange_dir=$HOME/w/kubernetes -didchange_file=pkg/util/hash/hash.go Before: BenchmarkStatistics 100 22955469 ns/op 11308095 B/op 47412 allocs/op BenchmarkStatistics 100 23454630 ns/op 11226742 B/op 46882 allocs/op BenchmarkStatistics 100 23618532 ns/op 11258619 B/op 47068 allocs/op After goFilesMap: BenchmarkStatistics 100 16643972 ns/op 8770787 B/op 46238 allocs/op BenchmarkStatistics 100 17805864 ns/op 8862926 B/op 46762 allocs/op BenchmarkStatistics 100 18618255 ns/op 9308864 B/op 49776 allocs/op After goFilesMap and ids sharing: BenchmarkStatistics 100 16703623 ns/op 8772626 B/op 33812 allocs/op BenchmarkStatistics 100 16927378 ns/op 8529491 B/op 32328 allocs/op BenchmarkStatistics 100 16632762 ns/op 8557533 B/op 32497 allocs/op Also: - Add comments documenting findings of profiling. - preallocate slice for knownSubdirs. - remove unwanted loop over slice in Generation.Inherit Updates golang/go#45686 Change-Id: Id953699191b8404cf36ba3a7ab9cd78b1d19c0a2 Reviewed-on: https://go-review.googlesource.com/c/tools/+/410176 TryBot-Result: Gopher Robot Reviewed-by: Robert Findley gopls-CI: kokoro Run-TryBot: Alan Donovan --- internal/lsp/cache/cache.go | 8 +- internal/lsp/cache/session.go | 2 +- internal/lsp/cache/snapshot.go | 184 +++++++++++++++++++++++++++---- internal/lsp/source/view.go | 2 +- internal/memoize/memoize.go | 21 ++-- internal/memoize/memoize_test.go | 3 +- internal/span/uri.go | 8 ++ 7 files changed, 191 insertions(+), 37 deletions(-) diff --git a/internal/lsp/cache/cache.go b/internal/lsp/cache/cache.go index ac670b573e5..f5796dfefa2 100644 --- a/internal/lsp/cache/cache.go +++ b/internal/lsp/cache/cache.go @@ -183,8 +183,14 @@ func (h *fileHandle) Read() ([]byte, error) { return h.bytes, h.err } +// hashContents returns a string of hex digits denoting the hash of contents. +// +// TODO(adonovan): opt: use [32]byte array as a value more widely and convert +// to hex digits on demand (rare). The array is larger when it appears as a +// struct field (32B vs 16B) but smaller overall (string data is 64B), has +// better locality, and is more efficiently hashed by runtime maps. func hashContents(contents []byte) string { - return fmt.Sprintf("%x", sha256.Sum256(contents)) + return fmt.Sprintf("%64x", sha256.Sum256(contents)) } var cacheIndex, sessionIndex, viewIndex int64 diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index e018cb33bd8..9da5c1e69f9 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -234,7 +234,7 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, packages: make(map[packageKey]*packageHandle), meta: NewMetadataGraph(), files: make(map[span.URI]source.VersionedFileHandle), - goFiles: make(map[parseKey]*parseGoHandle), + goFiles: newGoFileMap(), symbols: make(map[span.URI]*symbolHandle), actions: make(map[actionKey]*actionHandle), workspacePackages: make(map[PackageID]PackagePath), diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index a219935aa66..0d3c869cd2e 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -76,7 +76,7 @@ type snapshot struct { files map[span.URI]source.VersionedFileHandle // goFiles maps a parseKey to its parseGoHandle. - goFiles map[parseKey]*parseGoHandle + goFiles *goFileMap // TODO(rfindley): consider merging this with files to reduce burden on clone. symbols map[span.URI]*symbolHandle @@ -663,16 +663,17 @@ func (s *snapshot) transitiveReverseDependencies(id PackageID, ids map[PackageID func (s *snapshot) getGoFile(key parseKey) *parseGoHandle { s.mu.Lock() defer s.mu.Unlock() - return s.goFiles[key] + return s.goFiles.get(key) } func (s *snapshot) addGoFile(key parseKey, pgh *parseGoHandle) *parseGoHandle { s.mu.Lock() defer s.mu.Unlock() - if existing, ok := s.goFiles[key]; ok { - return existing + + if prev := s.goFiles.get(key); prev != nil { + return prev } - s.goFiles[key] = pgh + s.goFiles.set(key, pgh) return pgh } @@ -811,6 +812,8 @@ func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]stru patterns := map[string]struct{}{ fmt.Sprintf("**/*.{%s}", extensions): {}, } + + // Add a pattern for each Go module in the workspace that is not within the view. dirs := s.workspace.dirs(ctx, s) for _, dir := range dirs { dirName := dir.Filename() @@ -830,14 +833,19 @@ func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]stru // contain Go code (golang/go#42348). To handle this, explicitly watch all // of the directories in the workspace. We find them by adding the // directories of every file in the snapshot's workspace directories. - var dirNames []string - for _, uri := range s.getKnownSubdirs(dirs) { - dirNames = append(dirNames, uri.Filename()) - } - sort.Strings(dirNames) - if len(dirNames) > 0 { + // There may be thousands. + knownSubdirs := s.getKnownSubdirs(dirs) + if n := len(knownSubdirs); n > 0 { + dirNames := make([]string, 0, n) + for _, uri := range knownSubdirs { + dirNames = append(dirNames, uri.Filename()) + } + sort.Strings(dirNames) + // The double allocation of Sprintf(Join()) accounts for 8% + // of DidChange, but specializing doesn't appear to help. :( patterns[fmt.Sprintf("{%s}", strings.Join(dirNames, ","))] = struct{}{} } + return patterns } @@ -874,7 +882,7 @@ func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) []span.URI { } s.unprocessedSubdirChanges = nil - var result []span.URI + result := make([]span.URI, 0, len(s.knownSubdirs)) for uri := range s.knownSubdirs { result = append(result, uri) } @@ -1719,7 +1727,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC packages: make(map[packageKey]*packageHandle, len(s.packages)), actions: make(map[actionKey]*actionHandle, len(s.actions)), files: make(map[span.URI]source.VersionedFileHandle, len(s.files)), - goFiles: make(map[parseKey]*parseGoHandle, len(s.goFiles)), + goFiles: s.goFiles.clone(), symbols: make(map[span.URI]*symbolHandle, len(s.symbols)), workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)), unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)), @@ -1764,12 +1772,27 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC result.parseWorkHandles[k] = v } - for k, v := range s.goFiles { - if _, ok := changes[k.file.URI]; ok { - continue + // Copy the handles of all Go source files. + // There may be tens of thousands of files, + // but changes are typically few, so we + // use a striped map optimized for this case + // and visit its stripes in parallel. + var ( + toDeleteMu sync.Mutex + toDelete []parseKey + ) + s.goFiles.forEachConcurrent(func(k parseKey, v *parseGoHandle) { + if changes[k.file.URI] == nil { + // no change (common case) + newGen.Inherit(v.handle) + } else { + toDeleteMu.Lock() + toDelete = append(toDelete, k) + toDeleteMu.Unlock() } - newGen.Inherit(v.handle) - result.goFiles[k] = v + }) + for _, k := range toDelete { + result.goFiles.delete(k) } // Copy all of the go.mod-related handles. They may be invalidated later, @@ -1975,21 +1998,34 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC deleteInvalidMetadata := forceReloadMetadata || workspaceModeChanged idsInSnapshot := map[PackageID]bool{} // track all known IDs for uri, ids := range s.meta.ids { - var resultIDs []PackageID - for _, id := range ids { + // Optimization: ids slices are typically numerous, short (<3), + // and rarely modified by this loop, so don't allocate copies + // until necessary. + var resultIDs []PackageID // nil implies equal to ids[:i:i] + for i, id := range ids { if skipID[id] || deleteInvalidMetadata && idsToInvalidate[id] { + resultIDs = ids[:i:i] // unshare continue } // The ID is not reachable from any workspace package, so it should // be deleted. if !reachableID[id] { + resultIDs = ids[:i:i] // unshare continue } idsInSnapshot[id] = true - resultIDs = append(resultIDs, id) + if resultIDs != nil { + resultIDs = append(resultIDs, id) + } + } + if resultIDs == nil { + resultIDs = ids } result.meta.ids[uri] = resultIDs } + // TODO(adonovan): opt: represent PackageID as an index into a process-global + // dup-free list of all package names ever seen, then use a bitmap instead of + // a hash table for "PackageSet" (e.g. idsInSnapshot). // Copy the package metadata. We only need to invalidate packages directly // containing the affected file, and only if it changed in a relevant way. @@ -2259,7 +2295,7 @@ func metadataChanges(ctx context.Context, lockedSnapshot *snapshot, oldFH, newFH // lockedSnapshot must be locked. func peekOrParse(ctx context.Context, lockedSnapshot *snapshot, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { key := parseKey{file: fh.FileIdentity(), mode: mode} - if pgh := lockedSnapshot.goFiles[key]; pgh != nil { + if pgh := lockedSnapshot.goFiles.get(key); pgh != nil { cached := pgh.handle.Cached(lockedSnapshot.generation) if cached != nil { cached := cached.(*parseGoData) @@ -2547,3 +2583,107 @@ func readGoSum(dst map[module.Version][]string, file string, data []byte) error } return nil } + +// -- goFileMap -- + +// A goFileMap is conceptually a map[parseKey]*parseGoHandle, +// optimized for cloning all or nearly all entries. +type goFileMap struct { + // The map is represented as a map of 256 stripes, one per + // distinct value of the top 8 bits of key.file.Hash. + // Each stripe has an associated boolean indicating whether it + // is shared, and thus immutable, and thus must be copied before any update. + // (The bits could be packed but it hasn't been worth it yet.) + stripes [256]map[parseKey]*parseGoHandle + exclusive [256]bool // exclusive[i] means stripe[i] is not shared and may be safely mutated +} + +// newGoFileMap returns a new empty goFileMap. +func newGoFileMap() *goFileMap { + return new(goFileMap) // all stripes are shared (non-exclusive) nil maps +} + +// clone returns a copy of m. +// For concurrency, it counts as an update to m. +func (m *goFileMap) clone() *goFileMap { + m.exclusive = [256]bool{} // original and copy are now nonexclusive + copy := *m + return © +} + +// get returns the value for key k. +func (m *goFileMap) get(k parseKey) *parseGoHandle { + return m.stripes[m.hash(k)][k] +} + +// set updates the value for key k to v. +func (m *goFileMap) set(k parseKey, v *parseGoHandle) { + m.unshare(k)[k] = v +} + +// delete deletes the value for key k, if any. +func (m *goFileMap) delete(k parseKey) { + // TODO(adonovan): opt?: skip unshare if k isn't present. + delete(m.unshare(k), k) +} + +// forEachConcurrent calls f for each entry in the map. +// Calls may be concurrent. +// f must not modify m. +func (m *goFileMap) forEachConcurrent(f func(parseKey, *parseGoHandle)) { + // Visit stripes in parallel chunks. + const p = 16 // concurrency level + var wg sync.WaitGroup + wg.Add(p) + for i := 0; i < p; i++ { + chunk := m.stripes[i*p : (i+1)*p] + go func() { + for _, stripe := range chunk { + for k, v := range stripe { + f(k, v) + } + } + wg.Done() + }() + } + wg.Wait() +} + +// -- internal-- + +// hash returns 8 bits from the key's file digest. +func (m *goFileMap) hash(k parseKey) int { + h := k.file.Hash + if h == "" { + // Sadly the Hash isn't always a hash because cache.GetFile may + // successfully return a *fileHandle containing an error and no hash. + // Lump the duds together for now. + // TODO(adonovan): fix the underlying bug. + return 0 + } + return unhex(h[0])<<4 | unhex(h[1]) +} + +// unhex returns the value of a valid hex digit. +func unhex(b byte) int { + if '0' <= b && b <= '9' { + return int(b - '0') + } + return int(b) & ^0x20 - 'A' + 0xA // [a-fA-F] +} + +// unshare makes k's stripe exclusive, allocating a copy if needed, and returns it. +func (m *goFileMap) unshare(k parseKey) map[parseKey]*parseGoHandle { + i := m.hash(k) + if !m.exclusive[i] { + m.exclusive[i] = true + + // Copy the map. + copy := make(map[parseKey]*parseGoHandle, len(m.stripes[i])) + for k, v := range m.stripes[i] { + copy[k] = v + } + m.stripes[i] = copy + } + return m.stripes[i] +} diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go index 94037f33fe3..5b908bc721c 100644 --- a/internal/lsp/source/view.go +++ b/internal/lsp/source/view.go @@ -532,7 +532,7 @@ type FileHandle interface { type FileIdentity struct { URI span.URI - // Identifier represents a unique identifier for the file's content. + // Hash is a string of hex digits denoting the cryptographic digest of the file's content. Hash string } diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index 89f79c68b7d..dec2fff6836 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -234,19 +234,18 @@ func (s *Store) DebugOnlyIterate(f func(k, v interface{})) { } } -func (g *Generation) Inherit(hs ...*Handle) { - for _, h := range hs { - if atomic.LoadUint32(&g.destroyed) != 0 { - panic("inherit on generation " + g.name + " destroyed by " + g.destroyedBy) - } +// Inherit makes h valid in generation g. It is concurrency-safe. +func (g *Generation) Inherit(h *Handle) { + if atomic.LoadUint32(&g.destroyed) != 0 { + panic("inherit on generation " + g.name + " destroyed by " + g.destroyedBy) + } - h.mu.Lock() - if h.state == stateDestroyed { - panic(fmt.Sprintf("inheriting destroyed handle %#v (type %T) into generation %v", h.key, h.key, g.name)) - } - h.generations[g] = struct{}{} - h.mu.Unlock() + h.mu.Lock() + if h.state == stateDestroyed { + panic(fmt.Sprintf("inheriting destroyed handle %#v (type %T) into generation %v", h.key, h.key, g.name)) } + h.generations[g] = struct{}{} + h.mu.Unlock() } // Cached returns the value associated with a handle. diff --git a/internal/memoize/memoize_test.go b/internal/memoize/memoize_test.go index f05966b4614..ee0fd23ea1d 100644 --- a/internal/memoize/memoize_test.go +++ b/internal/memoize/memoize_test.go @@ -87,7 +87,8 @@ func TestCleanup(t *testing.T) { expectGet(t, h1, g1, &v1) expectGet(t, h2, g1, &v2) g2 := s.Generation("g2") - g2.Inherit(h1, h2) + g2.Inherit(h1) + g2.Inherit(h2) g1.Destroy("TestCleanup") expectGet(t, h1, g2, &v1) diff --git a/internal/span/uri.go b/internal/span/uri.go index a9777ff8598..f2b39ca424e 100644 --- a/internal/span/uri.go +++ b/internal/span/uri.go @@ -35,6 +35,10 @@ func (uri URI) Filename() string { } func filename(uri URI) (string, error) { + // This function is frequently called and its cost is + // dominated by the allocation of a net.URL. + // TODO(adonovan): opt: replace by a bespoke parseFileURI + // function that doesn't allocate. if uri == "" { return "", nil } @@ -80,6 +84,10 @@ func URIFromURI(s string) URI { return URI(u.String()) } +// CompareURI performs a three-valued comparison of two URIs. +// Lexically unequal URIs may compare equal if they are "file:" URIs +// that share the same base name (ignoring case) and denote the same +// file device/inode, according to stat(2). func CompareURI(a, b URI) int { if equalURI(a, b) { return 0 From 65c0181b23a8a3e8980181af0d8a7bfbc35775a4 Mon Sep 17 00:00:00 2001 From: Jamal Carvalho Date: Wed, 8 Jun 2022 18:52:37 +0000 Subject: [PATCH 004/136] internal/lsp: support textDocument/inlayHint for parameter names This change implements support for textDocument/inlayHint and adds inlay hints for parameter names. For golang/go#52343. For golang/vscode-go#1631. Change-Id: I3f989838b86cef4fd2b4076cb6340010fff7c24c Reviewed-on: https://go-review.googlesource.com/c/tools/+/411094 gopls-CI: kokoro Reviewed-by: Hyang-Ah Hana Kim Reviewed-by: Suzy Mueller Run-TryBot: Jamal Carvalho TryBot-Result: Gopher Robot --- internal/lsp/inlay_hint.go | 21 +++++ internal/lsp/server_gen.go | 4 +- internal/lsp/source/inlay_hint.go | 90 +++++++++++++++++++ .../testdata/inlay_hint/parameter_names.go | 45 ++++++++++ .../inlay_hint/parameter_names.go.golden | 47 ++++++++++ 5 files changed, 205 insertions(+), 2 deletions(-) create mode 100644 internal/lsp/inlay_hint.go create mode 100644 internal/lsp/source/inlay_hint.go create mode 100644 internal/lsp/testdata/inlay_hint/parameter_names.go create mode 100644 internal/lsp/testdata/inlay_hint/parameter_names.go.golden diff --git a/internal/lsp/inlay_hint.go b/internal/lsp/inlay_hint.go new file mode 100644 index 00000000000..b2fd028d728 --- /dev/null +++ b/internal/lsp/inlay_hint.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lsp + +import ( + "context" + + "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/internal/lsp/source" +) + +func (s *Server) inlayHint(ctx context.Context, params *protocol.InlayHintParams) ([]protocol.InlayHint, error) { + snapshot, fh, ok, release, err := s.beginFileRequest(ctx, params.TextDocument.URI, source.Go) + defer release() + if !ok { + return nil, err + } + return source.InlayHint(ctx, snapshot, fh, params.ViewPort) +} diff --git a/internal/lsp/server_gen.go b/internal/lsp/server_gen.go index 93b2f9913b8..4e9db0efa19 100644 --- a/internal/lsp/server_gen.go +++ b/internal/lsp/server_gen.go @@ -160,8 +160,8 @@ func (s *Server) Initialized(ctx context.Context, params *protocol.InitializedPa return s.initialized(ctx, params) } -func (s *Server) InlayHint(context.Context, *protocol.InlayHintParams) ([]protocol.InlayHint, error) { - return nil, notImplemented("InlayHint") +func (s *Server) InlayHint(ctx context.Context, params *protocol.InlayHintParams) ([]protocol.InlayHint, error) { + return s.inlayHint(ctx, params) } func (s *Server) InlayHintRefresh(context.Context) error { diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go new file mode 100644 index 00000000000..94dc1372d04 --- /dev/null +++ b/internal/lsp/source/inlay_hint.go @@ -0,0 +1,90 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package source + +import ( + "context" + "fmt" + "go/ast" + "go/types" + + "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/lsp/lsppos" + "golang.org/x/tools/internal/lsp/protocol" +) + +const ( + maxLabelLength = 28 +) + +func InlayHint(ctx context.Context, snapshot Snapshot, fh FileHandle, _ protocol.Range) ([]protocol.InlayHint, error) { + ctx, done := event.Start(ctx, "source.InlayHint") + defer done() + + pkg, pgf, err := GetParsedFile(ctx, snapshot, fh, NarrowestPackage) + if err != nil { + return nil, fmt.Errorf("getting file for InlayHint: %w", err) + } + + tmap := lsppos.NewTokenMapper(pgf.Src, pgf.Tok) + info := pkg.GetTypesInfo() + + var hints []protocol.InlayHint + ast.Inspect(pgf.File, func(node ast.Node) bool { + switch n := node.(type) { + case *ast.CallExpr: + hints = append(hints, parameterNames(n, tmap, info)...) + } + return true + }) + return hints, nil +} + +func parameterNames(node *ast.CallExpr, tmap *lsppos.TokenMapper, info *types.Info) []protocol.InlayHint { + signature, ok := info.TypeOf(node.Fun).(*types.Signature) + if !ok { + return nil + } + + var hints []protocol.InlayHint + for i, v := range node.Args { + start, ok := tmap.Position(v.Pos()) + if !ok { + continue + } + params := signature.Params() + // When a function has variadic params, we skip args after + // params.Len(). + if i > params.Len()-1 { + break + } + value := params.At(i).Name() + // param.Name is empty for built-ins like append + if value == "" { + continue + } + if signature.Variadic() && i == params.Len()-1 { + value = value + "..." + } + hints = append(hints, protocol.InlayHint{ + Position: &start, + Label: buildLabel(value + ":"), + Kind: protocol.Parameter, + PaddingRight: true, + }) + } + return hints +} + +func buildLabel(s string) []protocol.InlayHintLabelPart { + label := protocol.InlayHintLabelPart{ + Value: s, + } + if len(s) > maxLabelLength { + label.Value = s[:maxLabelLength] + "..." + label.Tooltip = s + } + return []protocol.InlayHintLabelPart{label} +} diff --git a/internal/lsp/testdata/inlay_hint/parameter_names.go b/internal/lsp/testdata/inlay_hint/parameter_names.go new file mode 100644 index 00000000000..6fba23530aa --- /dev/null +++ b/internal/lsp/testdata/inlay_hint/parameter_names.go @@ -0,0 +1,45 @@ +package inlayHint //@inlayHint("package") + +import "fmt" + +func hello(name string) string { + return "Hello " + name +} + +func helloWorld() string { + return hello("World") +} + +type foo struct{} + +func (*foo) bar(baz string, qux int) int { + if baz != "" { + return qux + 1 + } + return qux +} + +func kase(foo int, bar bool, baz ...string) { + fmt.Println(foo, bar, baz) +} + +func kipp(foo string, bar, baz string) { + fmt.Println(foo, bar, baz) +} + +func plex(foo, bar string, baz string) { + fmt.Println(foo, bar, baz) +} + +func tars(foo string, bar, baz string) { + fmt.Println(foo, bar, baz) +} + +func foobar() { + var x foo + x.bar("", 1) + kase(0, true, "c", "d", "e") + kipp("a", "b", "c") + plex("a", "b", "c") + tars("a", "b", "c") +} diff --git a/internal/lsp/testdata/inlay_hint/parameter_names.go.golden b/internal/lsp/testdata/inlay_hint/parameter_names.go.golden new file mode 100644 index 00000000000..66351e48300 --- /dev/null +++ b/internal/lsp/testdata/inlay_hint/parameter_names.go.golden @@ -0,0 +1,47 @@ +-- inlayHint -- +package inlayHint //@inlayHint("package") + +import "fmt" + +func hello(name string) string { + return "Hello " + name +} + +func helloWorld() string { + return hello("World") +} + +type foo struct{} + +func (*foo) bar(baz string, qux int) int { + if baz != "" { + return qux + 1 + } + return qux +} + +func kase(foo int, bar bool, baz ...string) { + fmt.Println(foo, bar, baz) +} + +func kipp(foo string, bar, baz string) { + fmt.Println(foo, bar, baz) +} + +func plex(foo, bar string, baz string) { + fmt.Println(foo, bar, baz) +} + +func tars(foo string, bar, baz string) { + fmt.Println(foo, bar, baz) +} + +func foobar() { + var x foo + x.bar("", 1) + kase(0, true, "c", "d", "e") + kipp("a", "b", "c") + plex("a", "b", "c") + tars("a", "b", "c") +} + From ecc147927830bfc0ccc572bd31fa6ade1679b72b Mon Sep 17 00:00:00 2001 From: Jamal Carvalho Date: Wed, 8 Jun 2022 18:57:57 +0000 Subject: [PATCH 005/136] internal/lsp: add inlay hints for variable types For golang/go#52343. For golang/vscode-go#1631. Change-Id: I94a1b3c389d8bfaa48754e28a52ef76c29eb6ead Reviewed-on: https://go-review.googlesource.com/c/tools/+/411100 Run-TryBot: Jamal Carvalho gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Hyang-Ah Hana Kim Reviewed-by: Suzy Mueller --- internal/lsp/source/inlay_hint.go | 47 +++++++++++++++++++ .../lsp/testdata/inlay_hint/variable_types.go | 20 ++++++++ .../inlay_hint/variable_types.go.golden | 22 +++++++++ 3 files changed, 89 insertions(+) create mode 100644 internal/lsp/testdata/inlay_hint/variable_types.go create mode 100644 internal/lsp/testdata/inlay_hint/variable_types.go.golden diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go index 94dc1372d04..00a2b009db1 100644 --- a/internal/lsp/source/inlay_hint.go +++ b/internal/lsp/source/inlay_hint.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "go/ast" + "go/token" "go/types" "golang.org/x/tools/internal/event" @@ -30,12 +31,17 @@ func InlayHint(ctx context.Context, snapshot Snapshot, fh FileHandle, _ protocol tmap := lsppos.NewTokenMapper(pgf.Src, pgf.Tok) info := pkg.GetTypesInfo() + q := Qualifier(pgf.File, pkg.GetTypes(), info) var hints []protocol.InlayHint ast.Inspect(pgf.File, func(node ast.Node) bool { switch n := node.(type) { case *ast.CallExpr: hints = append(hints, parameterNames(n, tmap, info)...) + case *ast.AssignStmt: + hints = append(hints, assignVariableTypes(n, tmap, info, &q)...) + case *ast.RangeStmt: + hints = append(hints, rangeVariableTypes(n, tmap, info, &q)...) } return true }) @@ -78,6 +84,47 @@ func parameterNames(node *ast.CallExpr, tmap *lsppos.TokenMapper, info *types.In return hints } +func assignVariableTypes(node *ast.AssignStmt, tmap *lsppos.TokenMapper, info *types.Info, q *types.Qualifier) []protocol.InlayHint { + if node.Tok != token.DEFINE { + return nil + } + var hints []protocol.InlayHint + for _, v := range node.Lhs { + if h := variableType(v, tmap, info, q); h != nil { + hints = append(hints, *h) + } + } + return hints +} + +func rangeVariableTypes(node *ast.RangeStmt, tmap *lsppos.TokenMapper, info *types.Info, q *types.Qualifier) []protocol.InlayHint { + var hints []protocol.InlayHint + if h := variableType(node.Key, tmap, info, q); h != nil { + hints = append(hints, *h) + } + if h := variableType(node.Value, tmap, info, q); h != nil { + hints = append(hints, *h) + } + return hints +} + +func variableType(e ast.Expr, tmap *lsppos.TokenMapper, info *types.Info, q *types.Qualifier) *protocol.InlayHint { + typ := info.TypeOf(e) + if typ == nil { + return nil + } + end, ok := tmap.Position(e.End()) + if !ok { + return nil + } + return &protocol.InlayHint{ + Position: &end, + Label: buildLabel(types.TypeString(typ, *q)), + Kind: protocol.Type, + PaddingLeft: true, + } +} + func buildLabel(s string) []protocol.InlayHintLabelPart { label := protocol.InlayHintLabelPart{ Value: s, diff --git a/internal/lsp/testdata/inlay_hint/variable_types.go b/internal/lsp/testdata/inlay_hint/variable_types.go new file mode 100644 index 00000000000..219af7059c7 --- /dev/null +++ b/internal/lsp/testdata/inlay_hint/variable_types.go @@ -0,0 +1,20 @@ +package inlayHint //@inlayHint("package") + +func assignTypes() { + i, j := 0, len([]string{})-1 + println(i, j) +} + +func rangeTypes() { + for k, v := range []string{} { + println(k, v) + } +} + +func funcLitType() { + myFunc := func(a string) string { return "" } +} + +func compositeLitType() { + foo := map[string]interface{}{"": ""} +} diff --git a/internal/lsp/testdata/inlay_hint/variable_types.go.golden b/internal/lsp/testdata/inlay_hint/variable_types.go.golden new file mode 100644 index 00000000000..70c019caa1f --- /dev/null +++ b/internal/lsp/testdata/inlay_hint/variable_types.go.golden @@ -0,0 +1,22 @@ +-- inlayHint -- +package inlayHint //@inlayHint("package") + +func assignTypes() { + i, j := 0, len([]string{})-1 + println(i, j) +} + +func rangeTypes() { + for k, v := range []string{} { + println(k, v) + } +} + +func funcLitType() { + myFunc := func(a string) string { return "" } +} + +func compositeLitType() { + foo := map[string]interface{}{"": ""} +} + From 83b0675060419168e3e46f9dc821d9dd9a358f63 Mon Sep 17 00:00:00 2001 From: Jamal Carvalho Date: Wed, 8 Jun 2022 18:58:45 +0000 Subject: [PATCH 006/136] internal/lsp: add inlay hints for constant values For golang/go#52343. For golang/vscode-go#1631. Change-Id: Iaef0beab2837502f6428767f457d1da21848fcb6 Reviewed-on: https://go-review.googlesource.com/c/tools/+/411101 Run-TryBot: Jamal Carvalho TryBot-Result: Gopher Robot Reviewed-by: Suzy Mueller gopls-CI: kokoro --- internal/lsp/source/inlay_hint.go | 54 +++++++++++++++++++ .../testdata/inlay_hint/constant_values.go | 45 ++++++++++++++++ .../inlay_hint/constant_values.go.golden | 47 ++++++++++++++++ 3 files changed, 146 insertions(+) create mode 100644 internal/lsp/testdata/inlay_hint/constant_values.go create mode 100644 internal/lsp/testdata/inlay_hint/constant_values.go.golden diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go index 00a2b009db1..95df237ad21 100644 --- a/internal/lsp/source/inlay_hint.go +++ b/internal/lsp/source/inlay_hint.go @@ -8,8 +8,10 @@ import ( "context" "fmt" "go/ast" + "go/constant" "go/token" "go/types" + "strings" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/lsp/lsppos" @@ -42,6 +44,8 @@ func InlayHint(ctx context.Context, snapshot Snapshot, fh FileHandle, _ protocol hints = append(hints, assignVariableTypes(n, tmap, info, &q)...) case *ast.RangeStmt: hints = append(hints, rangeVariableTypes(n, tmap, info, &q)...) + case *ast.GenDecl: + hints = append(hints, constantValues(n, tmap, info)...) } return true }) @@ -125,6 +129,56 @@ func variableType(e ast.Expr, tmap *lsppos.TokenMapper, info *types.Info, q *typ } } +func constantValues(node *ast.GenDecl, tmap *lsppos.TokenMapper, info *types.Info) []protocol.InlayHint { + if node.Tok != token.CONST { + return nil + } + + var hints []protocol.InlayHint + for _, v := range node.Specs { + spec, ok := v.(*ast.ValueSpec) + if !ok { + continue + } + end, ok := tmap.Position(v.End()) + if !ok { + continue + } + // Show hints when values are missing or at least one value is not + // a basic literal. + showHints := len(spec.Values) == 0 + checkValues := len(spec.Names) == len(spec.Values) + var values []string + for i, w := range spec.Names { + obj, ok := info.ObjectOf(w).(*types.Const) + if !ok || obj.Val().Kind() == constant.Unknown { + return nil + } + if checkValues { + switch spec.Values[i].(type) { + case *ast.BadExpr: + return nil + case *ast.BasicLit: + default: + if obj.Val().Kind() != constant.Bool { + showHints = true + } + } + } + values = append(values, fmt.Sprintf("%v", obj.Val())) + } + if !showHints || len(values) == 0 { + continue + } + hints = append(hints, protocol.InlayHint{ + Position: &end, + Label: buildLabel("= " + strings.Join(values, ", ")), + PaddingLeft: true, + }) + } + return hints +} + func buildLabel(s string) []protocol.InlayHintLabelPart { label := protocol.InlayHintLabelPart{ Value: s, diff --git a/internal/lsp/testdata/inlay_hint/constant_values.go b/internal/lsp/testdata/inlay_hint/constant_values.go new file mode 100644 index 00000000000..e3339b0f303 --- /dev/null +++ b/internal/lsp/testdata/inlay_hint/constant_values.go @@ -0,0 +1,45 @@ +package inlayHint //@inlayHint("package") + +const True = true + +type Kind int + +const ( + KindNone Kind = iota + KindPrint + KindPrintf + KindErrorf +) + +const ( + u = iota * 4 + v float64 = iota * 42 + w = iota * 42 +) + +const ( + a, b = 1, 2 + c, d + e, f = 5 * 5, "hello" + "world" + g, h + i, j = true, f +) + +// No hint +const ( + Int = 3 + Float = 3.14 + Bool = true + Rune = '3' + Complex = 2.7i + String = "Hello, world!" +) + +var ( + varInt = 3 + varFloat = 3.14 + varBool = true + varRune = '3' + '4' + varComplex = 2.7i + varString = "Hello, world!" +) diff --git a/internal/lsp/testdata/inlay_hint/constant_values.go.golden b/internal/lsp/testdata/inlay_hint/constant_values.go.golden new file mode 100644 index 00000000000..69481b10d64 --- /dev/null +++ b/internal/lsp/testdata/inlay_hint/constant_values.go.golden @@ -0,0 +1,47 @@ +-- inlayHint -- +package inlayHint //@inlayHint("package") + +const True = true + +type Kind int + +const ( + KindNone Kind = iota<= 0> + KindPrint<= 1> + KindPrintf<= 2> + KindErrorf<= 3> +) + +const ( + u = iota * 4<= 0> + v float64 = iota * 42<= 42> + w = iota * 42<= 84> +) + +const ( + a, b = 1, 2 + c, d<= 1, 2> + e, f = 5 * 5, "hello" + "world"<= 25, "helloworld"> + g, h<= 25, "helloworld"> + i, j = true, f<= true, "helloworld"> +) + +// No hint +const ( + Int = 3 + Float = 3.14 + Bool = true + Rune = '3' + Complex = 2.7i + String = "Hello, world!" +) + +var ( + varInt = 3 + varFloat = 3.14 + varBool = true + varRune = '3' + '4' + varComplex = 2.7i + varString = "Hello, world!" +) + From 5e48d261e2d147593b5464e78d5a95dd9442f070 Mon Sep 17 00:00:00 2001 From: Jamal Carvalho Date: Wed, 8 Jun 2022 19:00:34 +0000 Subject: [PATCH 007/136] internal/lsp: add inlay hints for composite literal names For golang/go#52343. For golang/vscode-go#1631. Change-Id: I8fba5ddf0bd25ba0fc20f3305ce13868f426087c Reviewed-on: https://go-review.googlesource.com/c/tools/+/411102 Run-TryBot: Jamal Carvalho TryBot-Result: Gopher Robot Reviewed-by: Suzy Mueller gopls-CI: kokoro --- internal/lsp/source/inlay_hint.go | 33 +++++++++++++++++++ .../testdata/inlay_hint/composite_literals.go | 15 +++++++++ .../inlay_hint/composite_literals.go.golden | 17 ++++++++++ 3 files changed, 65 insertions(+) create mode 100644 internal/lsp/testdata/inlay_hint/composite_literals.go create mode 100644 internal/lsp/testdata/inlay_hint/composite_literals.go.golden diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go index 95df237ad21..406e4ae80e8 100644 --- a/internal/lsp/source/inlay_hint.go +++ b/internal/lsp/source/inlay_hint.go @@ -46,6 +46,8 @@ func InlayHint(ctx context.Context, snapshot Snapshot, fh FileHandle, _ protocol hints = append(hints, rangeVariableTypes(n, tmap, info, &q)...) case *ast.GenDecl: hints = append(hints, constantValues(n, tmap, info)...) + case *ast.CompositeLit: + hints = append(hints, compositeLiterals(n, tmap, info)...) } return true }) @@ -179,6 +181,37 @@ func constantValues(node *ast.GenDecl, tmap *lsppos.TokenMapper, info *types.Inf return hints } +func compositeLiterals(node *ast.CompositeLit, tmap *lsppos.TokenMapper, info *types.Info) []protocol.InlayHint { + typ := info.TypeOf(node) + if typ == nil { + return nil + } + strct, ok := typ.Underlying().(*types.Struct) + if !ok { + return nil + } + + var hints []protocol.InlayHint + for i, v := range node.Elts { + if _, ok := v.(*ast.KeyValueExpr); !ok { + start, ok := tmap.Position(v.Pos()) + if !ok { + continue + } + if i > strct.NumFields()-1 { + break + } + hints = append(hints, protocol.InlayHint{ + Position: &start, + Label: buildLabel(strct.Field(i).Name() + ":"), + Kind: protocol.Parameter, + PaddingRight: true, + }) + } + } + return hints +} + func buildLabel(s string) []protocol.InlayHintLabelPart { label := protocol.InlayHintLabelPart{ Value: s, diff --git a/internal/lsp/testdata/inlay_hint/composite_literals.go b/internal/lsp/testdata/inlay_hint/composite_literals.go new file mode 100644 index 00000000000..7eeed03e81a --- /dev/null +++ b/internal/lsp/testdata/inlay_hint/composite_literals.go @@ -0,0 +1,15 @@ +package inlayHint //@inlayHint("package") + +import "fmt" + +func fieldNames() { + for _, c := range []struct { + in, want string + }{ + {"Hello, world", "dlrow ,olleH"}, + {"Hello, 世界", "界世 ,olleH"}, + {"", ""}, + } { + fmt.Println(c.in == c.want) + } +} diff --git a/internal/lsp/testdata/inlay_hint/composite_literals.go.golden b/internal/lsp/testdata/inlay_hint/composite_literals.go.golden new file mode 100644 index 00000000000..efa87b0fea8 --- /dev/null +++ b/internal/lsp/testdata/inlay_hint/composite_literals.go.golden @@ -0,0 +1,17 @@ +-- inlayHint -- +package inlayHint //@inlayHint("package") + +import "fmt" + +func fieldNames() { + for _, c := range []struct { + in, want string + }{ + {"Hello, world", "dlrow ,olleH"}, + {"Hello, 世界", "界世 ,olleH"}, + {"", ""}, + } { + fmt.Println(c.in == c.want) + } +} + From c41ddceaa4e81aad291932bf356dca891eb9488a Mon Sep 17 00:00:00 2001 From: Jamal Carvalho Date: Fri, 10 Jun 2022 13:48:16 +0000 Subject: [PATCH 008/136] internal/lsp: include padding in inlay hint marker tests The marker tests are updated to include padding values when mapping inlay hints to text edits. Change-Id: Ieb421088238c65b07abdad12763816d3d1e757c8 Reviewed-on: https://go-review.googlesource.com/c/tools/+/411654 Run-TryBot: Jamal Carvalho gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Suzy Mueller --- internal/lsp/lsp_test.go | 9 +++++++- .../inlay_hint/composite_literals.go.golden | 10 ++++----- .../inlay_hint/constant_values.go.golden | 22 +++++++++---------- .../inlay_hint/parameter_names.go.golden | 20 ++++++++--------- .../inlay_hint/variable_types.go.golden | 8 +++---- 5 files changed, 38 insertions(+), 31 deletions(-) diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go index e097100c2ff..2f46ff304bb 100644 --- a/internal/lsp/lsp_test.go +++ b/internal/lsp/lsp_test.go @@ -949,9 +949,16 @@ func (r *runner) InlayHints(t *testing.T, spn span.Span) { // Map inlay hints to text edits. edits := make([]protocol.TextEdit, len(hints)) for i, hint := range hints { + var paddingLeft, paddingRight string + if hint.PaddingLeft { + paddingLeft = " " + } + if hint.PaddingRight { + paddingRight = " " + } edits[i] = protocol.TextEdit{ Range: protocol.Range{Start: *hint.Position, End: *hint.Position}, - NewText: fmt.Sprintf("<%s>", hint.Label[0].Value), + NewText: fmt.Sprintf("<%s%s%s>", paddingLeft, hint.Label[0].Value, paddingRight), } } diff --git a/internal/lsp/testdata/inlay_hint/composite_literals.go.golden b/internal/lsp/testdata/inlay_hint/composite_literals.go.golden index efa87b0fea8..ecff7800387 100644 --- a/internal/lsp/testdata/inlay_hint/composite_literals.go.golden +++ b/internal/lsp/testdata/inlay_hint/composite_literals.go.golden @@ -4,14 +4,14 @@ package inlayHint //@inlayHint("package") import "fmt" func fieldNames() { - for _, c := range []struct { + for _< int>, c< struct{in string; want strin...> := range []struct { in, want string }{ - {"Hello, world", "dlrow ,olleH"}, - {"Hello, 世界", "界世 ,olleH"}, - {"", ""}, + {"Hello, world", "dlrow ,olleH"}, + {"Hello, 世界", "界世 ,olleH"}, + {"", ""}, } { - fmt.Println(c.in == c.want) + fmt.Println(c.in == c.want) } } diff --git a/internal/lsp/testdata/inlay_hint/constant_values.go.golden b/internal/lsp/testdata/inlay_hint/constant_values.go.golden index 69481b10d64..edc46debc37 100644 --- a/internal/lsp/testdata/inlay_hint/constant_values.go.golden +++ b/internal/lsp/testdata/inlay_hint/constant_values.go.golden @@ -6,24 +6,24 @@ const True = true type Kind int const ( - KindNone Kind = iota<= 0> - KindPrint<= 1> - KindPrintf<= 2> - KindErrorf<= 3> + KindNone Kind = iota< = 0> + KindPrint< = 1> + KindPrintf< = 2> + KindErrorf< = 3> ) const ( - u = iota * 4<= 0> - v float64 = iota * 42<= 42> - w = iota * 42<= 84> + u = iota * 4< = 0> + v float64 = iota * 42< = 42> + w = iota * 42< = 84> ) const ( a, b = 1, 2 - c, d<= 1, 2> - e, f = 5 * 5, "hello" + "world"<= 25, "helloworld"> - g, h<= 25, "helloworld"> - i, j = true, f<= true, "helloworld"> + c, d< = 1, 2> + e, f = 5 * 5, "hello" + "world"< = 25, "helloworld"> + g, h< = 25, "helloworld"> + i, j = true, f< = true, "helloworld"> ) // No hint diff --git a/internal/lsp/testdata/inlay_hint/parameter_names.go.golden b/internal/lsp/testdata/inlay_hint/parameter_names.go.golden index 66351e48300..46d3ea4e9bf 100644 --- a/internal/lsp/testdata/inlay_hint/parameter_names.go.golden +++ b/internal/lsp/testdata/inlay_hint/parameter_names.go.golden @@ -8,7 +8,7 @@ func hello(name string) string { } func helloWorld() string { - return hello("World") + return hello("World") } type foo struct{} @@ -21,27 +21,27 @@ func (*foo) bar(baz string, qux int) int { } func kase(foo int, bar bool, baz ...string) { - fmt.Println(foo, bar, baz) + fmt.Println(foo, bar, baz) } func kipp(foo string, bar, baz string) { - fmt.Println(foo, bar, baz) + fmt.Println(foo, bar, baz) } func plex(foo, bar string, baz string) { - fmt.Println(foo, bar, baz) + fmt.Println(foo, bar, baz) } func tars(foo string, bar, baz string) { - fmt.Println(foo, bar, baz) + fmt.Println(foo, bar, baz) } func foobar() { var x foo - x.bar("", 1) - kase(0, true, "c", "d", "e") - kipp("a", "b", "c") - plex("a", "b", "c") - tars("a", "b", "c") + x.bar("", 1) + kase(0, true, "c", "d", "e") + kipp("a", "b", "c") + plex("a", "b", "c") + tars("a", "b", "c") } diff --git a/internal/lsp/testdata/inlay_hint/variable_types.go.golden b/internal/lsp/testdata/inlay_hint/variable_types.go.golden index 70c019caa1f..6039950d5f3 100644 --- a/internal/lsp/testdata/inlay_hint/variable_types.go.golden +++ b/internal/lsp/testdata/inlay_hint/variable_types.go.golden @@ -2,21 +2,21 @@ package inlayHint //@inlayHint("package") func assignTypes() { - i, j := 0, len([]string{})-1 + i< int>, j< int> := 0, len([]string{})-1 println(i, j) } func rangeTypes() { - for k, v := range []string{} { + for k< int>, v< string> := range []string{} { println(k, v) } } func funcLitType() { - myFunc := func(a string) string { return "" } + myFunc< func(a string) string> := func(a string) string { return "" } } func compositeLitType() { - foo := map[string]interface{}{"": ""} + foo< map[string]interface{}> := map[string]interface{}{"": ""} } From a41fc9869a5ab7b215c33cb7549d3900adc27c5b Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 9 Jun 2022 18:38:26 -0400 Subject: [PATCH 009/136] internal/lsp/cache: use [256]byte Hash instead of hex digit string I had hoped to see a reduction in total allocation, but it does not appear to be significant according to the included crude benchmark. Nonetheless this is a slight code clarity improvement. Change-Id: I94a503b377dd1146eb371ff11222a351cb5a43b7 Reviewed-on: https://go-review.googlesource.com/c/tools/+/411655 TryBot-Result: Gopher Robot Run-TryBot: Robert Findley gopls-CI: kokoro Reviewed-by: Robert Findley --- gopls/internal/regtest/bench/bench_test.go | 30 +++++++++++++++++++++ internal/lsp/cache/analysis.go | 4 +-- internal/lsp/cache/cache.go | 19 ++----------- internal/lsp/cache/check.go | 31 +++++++++++----------- internal/lsp/cache/imports.go | 6 ++--- internal/lsp/cache/mod.go | 8 +++--- internal/lsp/cache/mod_tidy.go | 17 +++++------- internal/lsp/cache/session.go | 4 +-- internal/lsp/cache/snapshot.go | 24 +++-------------- internal/lsp/cache/symbols.go | 2 +- internal/lsp/cache/view.go | 2 +- internal/lsp/debug/serve.go | 3 ++- internal/lsp/source/view.go | 29 +++++++++++++++++--- 13 files changed, 98 insertions(+), 81 deletions(-) diff --git a/gopls/internal/regtest/bench/bench_test.go b/gopls/internal/regtest/bench/bench_test.go index 5e4eb5fc23a..22f157f4719 100644 --- a/gopls/internal/regtest/bench/bench_test.go +++ b/gopls/internal/regtest/bench/bench_test.go @@ -8,6 +8,7 @@ import ( "flag" "fmt" "os" + "runtime" "runtime/pprof" "testing" @@ -66,6 +67,7 @@ func TestBenchmarkIWL(t *testing.T) { results := testing.Benchmark(func(b *testing.B) { for i := 0; i < b.N; i++ { WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {}) + } }) @@ -192,3 +194,31 @@ func TestBenchmarkDidChange(t *testing.T) { printBenchmarkResults(result) }) } + +// TestPrintMemStats measures the memory usage of loading a project. +// It uses the same -didchange_dir flag as above. +// Always run it in isolation since it measures global heap usage. +// +// Kubernetes example: +// $ go test -run=TestPrintMemStats -didchange_dir=$HOME/w/kubernetes +// TotalAlloc: 5766 MB +// HeapAlloc: 1984 MB +// +// Both figures exhibit variance of less than 1%. +func TestPrintMemStats(t *testing.T) { + if *benchDir == "" { + t.Skip("-didchange_dir is not set") + } + + // Load the program... + opts := benchmarkOptions(*benchDir) + WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) { + // ...and print the memory usage. + runtime.GC() + runtime.GC() + var mem runtime.MemStats + runtime.ReadMemStats(&mem) + t.Logf("TotalAlloc:\t%d MB", mem.TotalAlloc/1e6) + t.Logf("HeapAlloc:\t%d MB", mem.HeapAlloc/1e6) + }) +} diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go index e882fb46f07..9f7a19c5c60 100644 --- a/internal/lsp/cache/analysis.go +++ b/internal/lsp/cache/analysis.go @@ -54,7 +54,7 @@ func (s *snapshot) Analyze(ctx context.Context, id string, analyzers []*source.A return results, nil } -type actionHandleKey string +type actionHandleKey source.Hash // An action represents one unit of analysis work: the application of // one analysis to one package. Actions form a DAG, both within a @@ -170,7 +170,7 @@ func (act *actionHandle) analyze(ctx context.Context, snapshot *snapshot) ([]*so } func buildActionKey(a *analysis.Analyzer, ph *packageHandle) actionHandleKey { - return actionHandleKey(hashContents([]byte(fmt.Sprintf("%p %s", a, string(ph.key))))) + return actionHandleKey(source.Hashf("%p%s", a, ph.key[:])) } func (act *actionHandle) String() string { diff --git a/internal/lsp/cache/cache.go b/internal/lsp/cache/cache.go index f5796dfefa2..2a8a169d510 100644 --- a/internal/lsp/cache/cache.go +++ b/internal/lsp/cache/cache.go @@ -6,7 +6,6 @@ package cache import ( "context" - "crypto/sha256" "fmt" "go/ast" "go/token" @@ -55,7 +54,7 @@ type fileHandle struct { modTime time.Time uri span.URI bytes []byte - hash string + hash source.Hash err error // size is the file length as reported by Stat, for the purpose of @@ -139,7 +138,7 @@ func readFile(ctx context.Context, uri span.URI, fi os.FileInfo) (*fileHandle, e size: fi.Size(), uri: uri, bytes: data, - hash: hashContents(data), + hash: source.HashOf(data), }, nil } @@ -168,10 +167,6 @@ func (h *fileHandle) URI() span.URI { return h.uri } -func (h *fileHandle) Hash() string { - return h.hash -} - func (h *fileHandle) FileIdentity() source.FileIdentity { return source.FileIdentity{ URI: h.uri, @@ -183,16 +178,6 @@ func (h *fileHandle) Read() ([]byte, error) { return h.bytes, h.err } -// hashContents returns a string of hex digits denoting the hash of contents. -// -// TODO(adonovan): opt: use [32]byte array as a value more widely and convert -// to hex digits on demand (rare). The array is larger when it appears as a -// struct field (32B vs 16B) but smaller overall (string data is 64B), has -// better locality, and is more efficiently hashed by runtime maps. -func hashContents(contents []byte) string { - return fmt.Sprintf("%64x", sha256.Sum256(contents)) -} - var cacheIndex, sessionIndex, viewIndex int64 func (c *Cache) ID() string { return c.id } diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index b8a3655a9d4..f09fc298a98 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -32,7 +32,7 @@ import ( "golang.org/x/tools/internal/typesinternal" ) -type packageHandleKey string +type packageHandleKey source.Hash type packageHandle struct { handle *memoize.Handle @@ -187,7 +187,7 @@ func (s *snapshot) buildKey(ctx context.Context, id PackageID, mode source.Parse } // One bad dependency should not prevent us from checking the entire package. // Add a special key to mark a bad dependency. - depKeys = append(depKeys, packageHandleKey(fmt.Sprintf("%s import not found", depID))) + depKeys = append(depKeys, packageHandleKey(source.Hashf("%s import not found", depID))) continue } deps[depHandle.m.PkgPath] = depHandle @@ -215,6 +215,8 @@ func (s *snapshot) workspaceParseMode(id PackageID) source.ParseMode { } func checkPackageKey(id PackageID, pghs []*parseGoHandle, m *KnownMetadata, deps []packageHandleKey, mode source.ParseMode, experimentalKey bool) packageHandleKey { + // TODO(adonovan): opt: no need to materalize the bytes; hash them directly. + // Also, use field separators to avoid spurious collisions. b := bytes.NewBuffer(nil) b.WriteString(string(id)) if m.Module != nil { @@ -225,38 +227,37 @@ func checkPackageKey(id PackageID, pghs []*parseGoHandle, m *KnownMetadata, deps // files, and deps). It should not otherwise affect the inputs to the type // checker, so this experiment omits it. This should increase cache hits on // the daemon as cfg contains the environment and working directory. - b.WriteString(hashConfig(m.Config)) + hc := hashConfig(m.Config) + b.Write(hc[:]) } b.WriteByte(byte(mode)) for _, dep := range deps { - b.WriteString(string(dep)) + b.Write(dep[:]) } for _, cgf := range pghs { b.WriteString(cgf.file.FileIdentity().String()) } - return packageHandleKey(hashContents(b.Bytes())) + return packageHandleKey(source.HashOf(b.Bytes())) } // hashEnv returns a hash of the snapshot's configuration. -func hashEnv(s *snapshot) string { +func hashEnv(s *snapshot) source.Hash { s.view.optionsMu.Lock() env := s.view.options.EnvSlice() s.view.optionsMu.Unlock() - b := &bytes.Buffer{} - for _, e := range env { - b.WriteString(e) - } - return hashContents(b.Bytes()) + return source.Hashf("%s", env) } // hashConfig returns the hash for the *packages.Config. -func hashConfig(config *packages.Config) string { - b := bytes.NewBuffer(nil) +func hashConfig(config *packages.Config) source.Hash { + // TODO(adonovan): opt: don't materialize the bytes; hash them directly. + // Also, use sound field separators to avoid collisions. + var b bytes.Buffer // Dir, Mode, Env, BuildFlags are the parts of the config that can change. b.WriteString(config.Dir) - b.WriteString(string(rune(config.Mode))) + b.WriteRune(rune(config.Mode)) for _, e := range config.Env { b.WriteString(e) @@ -264,7 +265,7 @@ func hashConfig(config *packages.Config) string { for _, f := range config.BuildFlags { b.WriteString(f) } - return hashContents(b.Bytes()) + return source.HashOf(b.Bytes()) } func (ph *packageHandle) Check(ctx context.Context, s source.Snapshot) (source.Package, error) { diff --git a/internal/lsp/cache/imports.go b/internal/lsp/cache/imports.go index 01a2468ef34..f333f700ddf 100644 --- a/internal/lsp/cache/imports.go +++ b/internal/lsp/cache/imports.go @@ -27,7 +27,7 @@ type importsState struct { cleanupProcessEnv func() cacheRefreshDuration time.Duration cacheRefreshTimer *time.Timer - cachedModFileHash string + cachedModFileHash source.Hash cachedBuildFlags []string } @@ -38,7 +38,7 @@ func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot // Find the hash of the active mod file, if any. Using the unsaved content // is slightly wasteful, since we'll drop caches a little too often, but // the mod file shouldn't be changing while people are autocompleting. - var modFileHash string + var modFileHash source.Hash // If we are using 'legacyWorkspace' mode, we can just read the modfile from // the snapshot. Otherwise, we need to get the synthetic workspace mod file. // @@ -61,7 +61,7 @@ func (s *importsState) runProcessEnvFunc(ctx context.Context, snapshot *snapshot if err != nil { return err } - modFileHash = hashContents(modBytes) + modFileHash = source.HashOf(modBytes) } // view.goEnv is immutable -- changes make a new view. Options can change. diff --git a/internal/lsp/cache/mod.go b/internal/lsp/cache/mod.go index 5ac199bd96b..c076f424dc9 100644 --- a/internal/lsp/cache/mod.go +++ b/internal/lsp/cache/mod.go @@ -201,9 +201,11 @@ func sumFilename(modURI span.URI) string { // modKey is uniquely identifies cached data for `go mod why` or dependencies // to upgrade. type modKey struct { - sessionID, env, view string - mod source.FileIdentity - verb modAction + sessionID string + env source.Hash + view string + mod source.FileIdentity + verb modAction } type modAction int diff --git a/internal/lsp/cache/mod_tidy.go b/internal/lsp/cache/mod_tidy.go index aa525e7413d..bd2ff0c5f88 100644 --- a/internal/lsp/cache/mod_tidy.go +++ b/internal/lsp/cache/mod_tidy.go @@ -29,10 +29,10 @@ import ( type modTidyKey struct { sessionID string - env string + env source.Hash gomod source.FileIdentity - imports string - unsavedOverlays string + imports source.Hash + unsavedOverlays source.Hash view string } @@ -81,10 +81,6 @@ func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*sourc if err != nil { return nil, err } - importHash, err := s.hashImports(ctx, workspacePkgs) - if err != nil { - return nil, err - } s.mu.Lock() overlayHash := hashUnsavedOverlays(s.files) @@ -93,7 +89,7 @@ func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*sourc key := modTidyKey{ sessionID: s.view.session.id, view: s.view.folder.Filename(), - imports: importHash, + imports: s.hashImports(ctx, workspacePkgs), unsavedOverlays: overlayHash, gomod: fh.FileIdentity(), env: hashEnv(s), @@ -152,7 +148,7 @@ func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*sourc return mth.tidy(ctx, s) } -func (s *snapshot) hashImports(ctx context.Context, wsPackages []*packageHandle) (string, error) { +func (s *snapshot) hashImports(ctx context.Context, wsPackages []*packageHandle) source.Hash { seen := map[string]struct{}{} var imports []string for _, ph := range wsPackages { @@ -164,8 +160,7 @@ func (s *snapshot) hashImports(ctx context.Context, wsPackages []*packageHandle) } } sort.Strings(imports) - hashed := strings.Join(imports, ",") - return hashContents([]byte(hashed)), nil + return source.Hashf("%s", imports) } // modTidyDiagnostics computes the differences between the original and tidied diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 9da5c1e69f9..cbb58740621 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -44,7 +44,7 @@ type overlay struct { session *Session uri span.URI text []byte - hash string + hash source.Hash version int32 kind source.FileKind @@ -637,7 +637,7 @@ func (s *Session) updateOverlays(ctx context.Context, changes []source.FileModif if c.OnDisk || c.Action == source.Save { version = o.version } - hash := hashContents(text) + hash := source.HashOf(text) var sameContentOnDisk bool switch c.Action { case source.Delete: diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 0d3c869cd2e..6edd1dbe658 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -466,7 +466,7 @@ func (s *snapshot) buildOverlay() map[string][]byte { return overlays } -func hashUnsavedOverlays(files map[span.URI]source.VersionedFileHandle) string { +func hashUnsavedOverlays(files map[span.URI]source.VersionedFileHandle) source.Hash { var unsaved []string for uri, fh := range files { if overlay, ok := fh.(*overlay); ok && !overlay.saved { @@ -474,7 +474,7 @@ func hashUnsavedOverlays(files map[span.URI]source.VersionedFileHandle) string { } } sort.Strings(unsaved) - return hashContents([]byte(strings.Join(unsaved, ""))) + return source.Hashf("%s", unsaved) } func (s *snapshot) PackagesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, includeTestVariants bool) ([]source.Package, error) { @@ -2652,25 +2652,7 @@ func (m *goFileMap) forEachConcurrent(f func(parseKey, *parseGoHandle)) { // -- internal-- // hash returns 8 bits from the key's file digest. -func (m *goFileMap) hash(k parseKey) int { - h := k.file.Hash - if h == "" { - // Sadly the Hash isn't always a hash because cache.GetFile may - // successfully return a *fileHandle containing an error and no hash. - // Lump the duds together for now. - // TODO(adonovan): fix the underlying bug. - return 0 - } - return unhex(h[0])<<4 | unhex(h[1]) -} - -// unhex returns the value of a valid hex digit. -func unhex(b byte) int { - if '0' <= b && b <= '9' { - return int(b - '0') - } - return int(b) & ^0x20 - 'A' + 0xA // [a-fA-F] -} +func (*goFileMap) hash(k parseKey) byte { return k.file.Hash[0] } // unshare makes k's stripe exclusive, allocating a copy if needed, and returns it. func (m *goFileMap) unshare(k parseKey) map[parseKey]*parseGoHandle { diff --git a/internal/lsp/cache/symbols.go b/internal/lsp/cache/symbols.go index db68912015e..bf5e00b1648 100644 --- a/internal/lsp/cache/symbols.go +++ b/internal/lsp/cache/symbols.go @@ -33,7 +33,7 @@ type symbolData struct { err error } -type symbolHandleKey string +type symbolHandleKey source.Hash func (s *snapshot) buildSymbolHandle(ctx context.Context, fh source.FileHandle) *symbolHandle { if h := s.getSymbolHandle(fh.URI()); h != nil { diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go index b0390a3fbde..0ed9883451b 100644 --- a/internal/lsp/cache/view.go +++ b/internal/lsp/cache/view.go @@ -165,7 +165,7 @@ func (v *View) ID() string { return v.id } // given go.mod file. It is the caller's responsibility to clean up the files // when they are done using them. func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanup func(), err error) { - filenameHash := hashContents([]byte(modFh.URI().Filename())) + filenameHash := source.Hashf("%s", modFh.URI().Filename()) tmpMod, err := ioutil.TempFile("", fmt.Sprintf("go.%s.*.mod", filenameHash)) if err != nil { return "", nil, err diff --git a/internal/lsp/debug/serve.go b/internal/lsp/debug/serve.go index 0bdee92c5e0..d343a6d65a2 100644 --- a/internal/lsp/debug/serve.go +++ b/internal/lsp/debug/serve.go @@ -320,7 +320,8 @@ func (i *Instance) getFile(r *http.Request) interface{} { return nil } for _, o := range s.Overlays() { - if o.FileIdentity().Hash == identifier { + // TODO(adonovan): understand and document this comparison. + if o.FileIdentity().Hash.String() == identifier { return o } } diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go index 5b908bc721c..7960b0c0368 100644 --- a/internal/lsp/source/view.go +++ b/internal/lsp/source/view.go @@ -7,6 +7,7 @@ package source import ( "bytes" "context" + "crypto/sha256" "errors" "fmt" "go/ast" @@ -528,12 +529,32 @@ type FileHandle interface { Saved() bool } +// A Hash is a cryptographic digest of the contents of a file. +// (Although at 32B it is larger than a 16B string header, it is smaller +// and has better locality than the string header + 64B of hex digits.) +type Hash [sha256.Size]byte + +// HashOf returns the hash of some data. +func HashOf(data []byte) Hash { + return Hash(sha256.Sum256(data)) +} + +// Hashf returns the hash of a printf-formatted string. +func Hashf(format string, args ...interface{}) Hash { + // Although this looks alloc-heavy, it is faster than using + // Fprintf on sha256.New() because the allocations don't escape. + return HashOf([]byte(fmt.Sprintf(format, args...))) +} + +// String returns the digest as a string of hex digits. +func (h Hash) String() string { + return fmt.Sprintf("%64x", [sha256.Size]byte(h)) +} + // FileIdentity uniquely identifies a file at a version from a FileSystem. type FileIdentity struct { - URI span.URI - - // Hash is a string of hex digits denoting the cryptographic digest of the file's content. - Hash string + URI span.URI + Hash Hash // digest of file contents } func (id FileIdentity) String() string { From 034398994d5001d97b2e657118f00f7540c8d8fc Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Mon, 13 Jun 2022 14:47:03 -0400 Subject: [PATCH 010/136] internal/lsp: fix error message for inlay hints Fix the error message to describe inlay hints failure. Change-Id: If4597bc3e513c4dce344f11f6fa92ba20e29681a Reviewed-on: https://go-review.googlesource.com/c/tools/+/411899 gopls-CI: kokoro Run-TryBot: Suzy Mueller TryBot-Result: Gopher Robot Reviewed-by: Hyang-Ah Hana Kim --- internal/lsp/lsp_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go index 2f46ff304bb..56356e9b5a4 100644 --- a/internal/lsp/lsp_test.go +++ b/internal/lsp/lsp_test.go @@ -977,7 +977,7 @@ func (r *runner) InlayHints(t *testing.T, spn span.Span) { })) if withinlayHints != got { - t.Errorf("format failed for %s, expected:\n%v\ngot:\n%v", filename, withinlayHints, got) + t.Errorf("inlay hints failed for %s, expected:\n%v\ngot:\n%v", filename, withinlayHints, got) } } From c15c04572c9db1074d3b456ded7917ba493e8f44 Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Mon, 13 Jun 2022 14:41:22 -0400 Subject: [PATCH 011/136] internal/lsp: enable inlay hint tests The change to implement inlay hints has been merged, so we need to enable the tests. Change-Id: I47e7ab343d0ab10283caac0a3d6677dd69c7504a Reviewed-on: https://go-review.googlesource.com/c/tools/+/411898 Run-TryBot: Suzy Mueller gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Hyang-Ah Hana Kim --- internal/lsp/testdata/inlayHint/a.go | 9 --------- internal/lsp/testdata/inlayHint/a.go.golden | 11 ----------- internal/lsp/tests/tests.go | 1 - 3 files changed, 21 deletions(-) delete mode 100644 internal/lsp/testdata/inlayHint/a.go delete mode 100644 internal/lsp/testdata/inlayHint/a.go.golden diff --git a/internal/lsp/testdata/inlayHint/a.go b/internal/lsp/testdata/inlayHint/a.go deleted file mode 100644 index 90ef7c41d1d..00000000000 --- a/internal/lsp/testdata/inlayHint/a.go +++ /dev/null @@ -1,9 +0,0 @@ -package inlayHint //@inlayHint("package") - -func hello(name string) string { - return "Hello " + name -} - -func helloWorld() string { - return hello("World") -} diff --git a/internal/lsp/testdata/inlayHint/a.go.golden b/internal/lsp/testdata/inlayHint/a.go.golden deleted file mode 100644 index e4e6cc0c0cc..00000000000 --- a/internal/lsp/testdata/inlayHint/a.go.golden +++ /dev/null @@ -1,11 +0,0 @@ --- inlayHint -- -package inlayHint //@inlayHint("package") - -func hello(name string) string { - return "Hello " + name -} - -func helloWorld() string { - return hello("World") -} - diff --git a/internal/lsp/tests/tests.go b/internal/lsp/tests/tests.go index 81a5d399029..f2766a2e319 100644 --- a/internal/lsp/tests/tests.go +++ b/internal/lsp/tests/tests.go @@ -787,7 +787,6 @@ func Run(t *testing.T, tests Tests, data *Data) { }) t.Run("InlayHints", func(t *testing.T) { - t.Skip("Inlay Hints not yet implemented") t.Helper() for _, src := range data.InlayHints { t.Run(SpanName(src), func(t *testing.T) { From ebc084af8ba794babff1d58912b41608629acd72 Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Mon, 13 Jun 2022 17:08:38 -0400 Subject: [PATCH 012/136] internal/lsp: add inlay hints count to test summary Change-Id: Ia74f4a43a114715a6011405bf70f9dfa269c3318 Reviewed-on: https://go-review.googlesource.com/c/tools/+/411901 TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Hyang-Ah Hana Kim Run-TryBot: Suzy Mueller --- internal/lsp/testdata/summary.txt.golden | 1 + internal/lsp/testdata/summary_go1.18.txt.golden | 1 + internal/lsp/tests/tests.go | 1 + 3 files changed, 3 insertions(+) diff --git a/internal/lsp/testdata/summary.txt.golden b/internal/lsp/testdata/summary.txt.golden index 9e1d84d1d56..0247551f8b9 100644 --- a/internal/lsp/testdata/summary.txt.golden +++ b/internal/lsp/testdata/summary.txt.golden @@ -19,6 +19,7 @@ MethodExtractionCount = 6 DefinitionsCount = 95 TypeDefinitionsCount = 18 HighlightsCount = 69 +InlayHintsCount = 4 ReferencesCount = 27 RenamesCount = 41 PrepareRenamesCount = 7 diff --git a/internal/lsp/testdata/summary_go1.18.txt.golden b/internal/lsp/testdata/summary_go1.18.txt.golden index 1c6ad922c36..28a2672db50 100644 --- a/internal/lsp/testdata/summary_go1.18.txt.golden +++ b/internal/lsp/testdata/summary_go1.18.txt.golden @@ -19,6 +19,7 @@ MethodExtractionCount = 6 DefinitionsCount = 108 TypeDefinitionsCount = 18 HighlightsCount = 69 +InlayHintsCount = 4 ReferencesCount = 27 RenamesCount = 48 PrepareRenamesCount = 7 diff --git a/internal/lsp/tests/tests.go b/internal/lsp/tests/tests.go index f2766a2e319..ec804e5e79e 100644 --- a/internal/lsp/tests/tests.go +++ b/internal/lsp/tests/tests.go @@ -984,6 +984,7 @@ func checkData(t *testing.T, data *Data) { fmt.Fprintf(buf, "DefinitionsCount = %v\n", definitionCount) fmt.Fprintf(buf, "TypeDefinitionsCount = %v\n", typeDefinitionCount) fmt.Fprintf(buf, "HighlightsCount = %v\n", len(data.Highlights)) + fmt.Fprintf(buf, "InlayHintsCount = %v\n", len(data.InlayHints)) fmt.Fprintf(buf, "ReferencesCount = %v\n", len(data.References)) fmt.Fprintf(buf, "RenamesCount = %v\n", len(data.Renames)) fmt.Fprintf(buf, "PrepareRenamesCount = %v\n", len(data.PrepareRenames)) From ed276111079290e7e1b77db2343972dac95c4bc9 Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Mon, 13 Jun 2022 19:52:45 +0000 Subject: [PATCH 013/136] internal/lsp/cache: cache known subdirs pattern MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Known subdirs change rarely and it's quite expensive to compute a glob pattern derived from them, so cache computation result and inherit it across generations. Computation cost is divided ≈evenly between `sort.Sort` and `span.URI.Filename` calls, and there is no trivial way to optimize them away besides caching. Benchmark (didChange in kubernetes): ~37ms->30ms Change-Id: Idb1691c76b8ff163dc61f637f07229498888606c GitHub-Last-Rev: cd99a9ce5c797afb5aaa9b478fcf433edd0dc03c GitHub-Pull-Request: golang/tools#383 Reviewed-on: https://go-review.googlesource.com/c/tools/+/411636 Reviewed-by: Alan Donovan Reviewed-by: Hyang-Ah Hana Kim --- internal/lsp/cache/snapshot.go | 56 +++++++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 15 deletions(-) diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 6edd1dbe658..7b73f4b2794 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -114,7 +114,8 @@ type snapshot struct { // knownSubdirs is the set of subdirectories in the workspace, used to // create glob patterns for file watching. - knownSubdirs map[span.URI]struct{} + knownSubdirs map[span.URI]struct{} + knownSubdirsPatternCache string // unprocessedSubdirChanges are any changes that might affect the set of // subdirectories in the workspace. They are not reflected to knownSubdirs // during the snapshot cloning step as it can slow down cloning. @@ -834,19 +835,36 @@ func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]stru // of the directories in the workspace. We find them by adding the // directories of every file in the snapshot's workspace directories. // There may be thousands. - knownSubdirs := s.getKnownSubdirs(dirs) - if n := len(knownSubdirs); n > 0 { - dirNames := make([]string, 0, n) - for _, uri := range knownSubdirs { + if pattern := s.getKnownSubdirsPattern(dirs); pattern != "" { + patterns[pattern] = struct{}{} + } + + return patterns +} + +func (s *snapshot) getKnownSubdirsPattern(wsDirs []span.URI) string { + s.mu.Lock() + defer s.mu.Unlock() + + // First, process any pending changes and update the set of known + // subdirectories. + // It may change list of known subdirs and therefore invalidate the cache. + s.applyKnownSubdirsChangesLocked(wsDirs) + + if len(s.knownSubdirs) == 0 { + return "" + } + + if s.knownSubdirsPatternCache == "" { + dirNames := make([]string, 0, len(s.knownSubdirs)) + for uri := range s.knownSubdirs { dirNames = append(dirNames, uri.Filename()) } sort.Strings(dirNames) - // The double allocation of Sprintf(Join()) accounts for 8% - // of DidChange, but specializing doesn't appear to help. :( - patterns[fmt.Sprintf("{%s}", strings.Join(dirNames, ","))] = struct{}{} + s.knownSubdirsPatternCache = fmt.Sprintf("{%s}", strings.Join(dirNames, ",")) } - return patterns + return s.knownSubdirsPatternCache } // collectAllKnownSubdirs collects all of the subdirectories within the @@ -859,6 +877,7 @@ func (s *snapshot) collectAllKnownSubdirs(ctx context.Context) { defer s.mu.Unlock() s.knownSubdirs = map[span.URI]struct{}{} + s.knownSubdirsPatternCache = "" for uri := range s.files { s.addKnownSubdirLocked(uri, dirs) } @@ -870,6 +889,16 @@ func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) []span.URI { // First, process any pending changes and update the set of known // subdirectories. + s.applyKnownSubdirsChangesLocked(wsDirs) + + result := make([]span.URI, 0, len(s.knownSubdirs)) + for uri := range s.knownSubdirs { + result = append(result, uri) + } + return result +} + +func (s *snapshot) applyKnownSubdirsChangesLocked(wsDirs []span.URI) { for _, c := range s.unprocessedSubdirChanges { if c.isUnchanged { continue @@ -881,12 +910,6 @@ func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) []span.URI { } } s.unprocessedSubdirChanges = nil - - result := make([]span.URI, 0, len(s.knownSubdirs)) - for uri := range s.knownSubdirs { - result = append(result, uri) - } - return result } func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) { @@ -917,6 +940,7 @@ func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) { } s.knownSubdirs[uri] = struct{}{} dir = filepath.Dir(dir) + s.knownSubdirsPatternCache = "" } } @@ -929,6 +953,7 @@ func (s *snapshot) removeKnownSubdirLocked(uri span.URI) { } if info, _ := os.Stat(dir); info == nil { delete(s.knownSubdirs, uri) + s.knownSubdirsPatternCache = "" } dir = filepath.Dir(dir) } @@ -1816,6 +1841,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC for k, v := range s.knownSubdirs { result.knownSubdirs[k] = v } + result.knownSubdirsPatternCache = s.knownSubdirsPatternCache for _, c := range changes { result.unprocessedSubdirChanges = append(result.unprocessedSubdirChanges, c) } From c993be69238f2ed9aca41100e9ac8de62cf502a7 Mon Sep 17 00:00:00 2001 From: "Hana (Hyang-Ah) Kim" Date: Mon, 6 Jun 2022 18:50:18 -0400 Subject: [PATCH 014/136] go/analysis/internal/checker: log codeFact error, remove unused action.inputs Change-Id: I39ac785ed7666a5a1373443a2f56a1742a8c0858 Reviewed-on: https://go-review.googlesource.com/c/tools/+/410368 Reviewed-by: Alan Donovan --- go/analysis/internal/checker/checker.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/go/analysis/internal/checker/checker.go b/go/analysis/internal/checker/checker.go index 51cbf689ac0..b5148ff26db 100644 --- a/go/analysis/internal/checker/checker.go +++ b/go/analysis/internal/checker/checker.go @@ -578,7 +578,6 @@ type action struct { deps []*action objectFacts map[objectFactKey]analysis.Fact packageFacts map[packageFactKey]analysis.Fact - inputs map[*analysis.Analyzer]interface{} result interface{} diagnostics []analysis.Diagnostic err error @@ -766,7 +765,7 @@ func inheritFacts(act, dep *action) { if serialize { encodedFact, err := codeFact(fact) if err != nil { - log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err) } fact = encodedFact } @@ -894,7 +893,7 @@ func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { func (act *action) allObjectFacts() []analysis.ObjectFact { facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) for k := range act.objectFacts { - facts = append(facts, analysis.ObjectFact{k.obj, act.objectFacts[k]}) + facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]}) } return facts } @@ -940,7 +939,7 @@ func factType(fact analysis.Fact) reflect.Type { func (act *action) allPackageFacts() []analysis.PackageFact { facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) for k := range act.packageFacts { - facts = append(facts, analysis.PackageFact{k.pkg, act.packageFacts[k]}) + facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]}) } return facts } From 27db7f40b912ea6504f06b93c1776b67550729c4 Mon Sep 17 00:00:00 2001 From: "Hana (Hyang-Ah) Kim" Date: Tue, 14 Jun 2022 11:11:56 -0400 Subject: [PATCH 015/136] gopls: update golang.org/x/vuln to latest @4eb5ba4 This picks up the recent performance improvement work in vulncheck api like https://go-review.googlesource.com/c/vuln/+/410897 and fix like https://go-review.googlesource.com/c/vuln/+/411354 Change-Id: Ie595fdb14ae27bd18b5cdd69ca9977d7c14d384c Reviewed-on: https://go-review.googlesource.com/c/tools/+/411908 gopls-CI: kokoro TryBot-Result: Gopher Robot Run-TryBot: Hyang-Ah Hana Kim Reviewed-by: Jonathan Amsterdam --- gopls/go.mod | 4 ++-- gopls/go.sum | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/gopls/go.mod b/gopls/go.mod index 85fb4301c02..5dc62d3df0a 100644 --- a/gopls/go.mod +++ b/gopls/go.mod @@ -9,8 +9,8 @@ require ( github.com/sergi/go-diff v1.1.0 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 golang.org/x/sys v0.0.0-20220209214540-3681064d5158 - golang.org/x/tools v0.1.11-0.20220330174940-8e193c2ba95e - golang.org/x/vuln v0.0.0-20220503210553-a5481fb0c8be + golang.org/x/tools v0.1.11-0.20220523181440-ccb10502d1a5 + golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c honnef.co/go/tools v0.3.0 mvdan.cc/gofumpt v0.3.0 mvdan.cc/xurls/v2 v2.4.0 diff --git a/gopls/go.sum b/gopls/go.sum index 5873afa1968..91f552ef905 100644 --- a/gopls/go.sum +++ b/gopls/go.sum @@ -73,6 +73,8 @@ golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/vuln v0.0.0-20220503210553-a5481fb0c8be h1:jokAF1mfylAi1iTQx7C44B7vyXUcSEMw8eDv0PzNu8s= golang.org/x/vuln v0.0.0-20220503210553-a5481fb0c8be/go.mod h1:twca1SxmF6/i2wHY/mj1vLIkkHdp+nil/yA32ZOP4kg= +golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c h1:r5bbIROBQtRRgoutV8Q3sFY58VzHW6jMBYl48ANSyS4= +golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c/go.mod h1:UZshlUPxXeGUM9I14UOawXQg6yosDE9cr1vKY/DzgWo= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= From 654a14b5274602698564a5e9710c0778be664c7a Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 14 Jun 2022 14:15:04 -0400 Subject: [PATCH 016/136] internal/lsp/cache: reduce critical sections This change reduces the sizes of the critical sections in traces.ProcessEvent and Generation.Bind, in particular moving allocations ahead of Lock. This reduces the contention according to the trace profiler. See https://go-review.googlesource.com/c/go/+/411909 for another reduction in contention. The largest remaining contention is Handle.Get, which thousands of goroutines wait for because we initiate typechecking top down. Also, add a couple of possible optimization TODO comments, and delete a stale comment re: Bind. Change-Id: I995a0bb46e8c9bf0c23492fb62b56f4539bc32f8 Reviewed-on: https://go-review.googlesource.com/c/tools/+/411910 Run-TryBot: Hyang-Ah Hana Kim gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Hyang-Ah Hana Kim --- internal/lsp/cache/check.go | 16 +++++++-------- internal/lsp/cache/parse.go | 2 +- internal/lsp/debug/trace.go | 39 ++++++++++++++++++++++--------------- internal/memoize/memoize.go | 6 +++++- 4 files changed, 37 insertions(+), 26 deletions(-) diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index f09fc298a98..51d7d1a7ea1 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -97,14 +97,6 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so return nil, err } - // Do not close over the packageHandle or the snapshot in the Bind function. - // This creates a cycle, which causes the finalizers to never run on the handles. - // The possible cycles are: - // - // packageHandle.h.function -> packageHandle - // packageHandle.h.function -> snapshot -> packageHandle - // - m := ph.m key := ph.key @@ -121,6 +113,13 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so }(dep) } + // TODO(adonovan): opt: consider moving the Wait here, + // so that dependencies complete before we start to + // read+parse+typecheck this package. Although the + // read+parse can proceed, typechecking will block + // almost immediately until the imports are done. + // The effect is to increase contention. + data := &packageData{} data.pkg, data.err = typeCheck(ctx, snapshot, m.Metadata, mode, deps) // Make sure that the workers above have finished before we return, @@ -448,6 +447,7 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour } typeparams.InitInstanceInfo(pkg.typesInfo) + // TODO(adonovan): opt: execute this loop in parallel. for _, gf := range pkg.m.GoFiles { // In the presence of line directives, we may need to report errors in // non-compiled Go files, so we need to register them on the package. diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go index 668c437f5c9..ab55743ccf0 100644 --- a/internal/lsp/cache/parse.go +++ b/internal/lsp/cache/parse.go @@ -278,7 +278,7 @@ func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mod tok := fset.File(file.Pos()) if tok == nil { - // file.Pos is the location of the package declaration. If there was + // file.Pos is the location of the package declaration (issue #53202). If there was // none, we can't find the token.File that ParseFile created, and we // have no choice but to recreate it. tok = fset.AddFile(fh.URI().Filename(), -1, len(src)) diff --git a/internal/lsp/debug/trace.go b/internal/lsp/debug/trace.go index ca612867a5d..bb402cfaa8f 100644 --- a/internal/lsp/debug/trace.go +++ b/internal/lsp/debug/trace.go @@ -119,8 +119,6 @@ func formatEvent(ctx context.Context, ev core.Event, lm label.Map) string { } func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) context.Context { - t.mu.Lock() - defer t.mu.Unlock() span := export.GetSpan(ctx) if span == nil { return ctx @@ -128,11 +126,8 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) switch { case event.IsStart(ev): - if t.sets == nil { - t.sets = make(map[string]*traceSet) - t.unfinished = make(map[export.SpanContext]*traceData) - } - // just starting, add it to the unfinished map + // Just starting: add it to the unfinished map. + // Allocate before the critical section. td := &traceData{ TraceID: span.ID.TraceID, SpanID: span.ID.SpanID, @@ -141,6 +136,13 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) Start: span.Start().At(), Tags: renderLabels(span.Start()), } + + t.mu.Lock() + defer t.mu.Unlock() + if t.sets == nil { + t.sets = make(map[string]*traceSet) + t.unfinished = make(map[export.SpanContext]*traceData) + } t.unfinished[span.ID] = td // and wire up parents if we have them if !span.ParentID.IsValid() { @@ -155,7 +157,19 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) parent.Children = append(parent.Children, td) case event.IsEnd(ev): - // finishing, must be already in the map + // Finishing: must be already in the map. + // Allocate events before the critical section. + events := span.Events() + tdEvents := make([]traceEvent, len(events)) + for i, event := range events { + tdEvents[i] = traceEvent{ + Time: event.At(), + Tags: renderLabels(event), + } + } + + t.mu.Lock() + defer t.mu.Unlock() td, found := t.unfinished[span.ID] if !found { return ctx // if this happens we are in a bad place @@ -164,14 +178,7 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) td.Finish = span.Finish().At() td.Duration = span.Finish().At().Sub(span.Start().At()) - events := span.Events() - td.Events = make([]traceEvent, len(events)) - for i, event := range events { - td.Events[i] = traceEvent{ - Time: event.At(), - Tags: renderLabels(event), - } - } + td.Events = tdEvents set, ok := t.sets[span.Name] if !ok { diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index dec2fff6836..28d5e2c5bc8 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -181,8 +181,9 @@ func (g *Generation) Bind(key interface{}, function Function, cleanup func(inter if atomic.LoadUint32(&g.destroyed) != 0 { panic("operation on generation " + g.name + " destroyed by " + g.destroyedBy) } + + // Avoid 'defer Unlock' to reduce critical section. g.store.mu.Lock() - defer g.store.mu.Unlock() h, ok := g.store.handles[key] if !ok { h := &Handle{ @@ -192,8 +193,11 @@ func (g *Generation) Bind(key interface{}, function Function, cleanup func(inter cleanup: cleanup, } g.store.handles[key] = h + g.store.mu.Unlock() return h } + g.store.mu.Unlock() + h.mu.Lock() defer h.mu.Unlock() if _, ok := h.generations[g]; !ok { From e8b9ff129187b07b15cd001e4c72ae64a21221f1 Mon Sep 17 00:00:00 2001 From: "Hana (Hyang-Ah) Kim" Date: Tue, 14 Jun 2022 19:31:01 -0400 Subject: [PATCH 017/136] gopls/internal/govulncheck: sync x/vuln@4eb5ba4 Change-Id: Idf2147684626368116a5330fefb0a63d8c82f7a9 Reviewed-on: https://go-review.googlesource.com/c/tools/+/412456 Run-TryBot: Hyang-Ah Hana Kim gopls-CI: kokoro Reviewed-by: Jonathan Amsterdam TryBot-Result: Gopher Robot --- gopls/internal/govulncheck/source.go | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/gopls/internal/govulncheck/source.go b/gopls/internal/govulncheck/source.go index 752a8313091..23028b9eb42 100644 --- a/gopls/internal/govulncheck/source.go +++ b/gopls/internal/govulncheck/source.go @@ -59,6 +59,8 @@ func LoadPackages(cfg *packages.Config, patterns ...string) ([]*vulncheck.Packag // Source calls vulncheck.Source on the Go source in pkgs. It returns the result // with Vulns trimmed to those that are actually called. +// +// This function is being used by the Go IDE team. func Source(ctx context.Context, pkgs []*vulncheck.Package, c client.Client) (*vulncheck.Result, error) { r, err := vulncheck.Source(ctx, pkgs, &vulncheck.Config{Client: c}) if err != nil { @@ -77,14 +79,21 @@ func Source(ctx context.Context, pkgs []*vulncheck.Package, c client.Client) (*v // CallInfo is information about calls to vulnerable functions. type CallInfo struct { - CallStacks map[*vulncheck.Vuln][]vulncheck.CallStack // all call stacks - VulnGroups [][]*vulncheck.Vuln // vulns grouped by ID and package - ModuleVersions map[string]string // map from module paths to versions - TopPackages map[string]bool // top-level packages + // CallStacks contains all call stacks to vulnerable functions. + CallStacks map[*vulncheck.Vuln][]vulncheck.CallStack + + // VulnGroups contains vulnerabilities grouped by ID and package. + VulnGroups [][]*vulncheck.Vuln + + // ModuleVersions is a map of module paths to versions. + ModuleVersions map[string]string + + // TopPackages contains the top-level packages in the call info. + TopPackages map[string]bool } // GetCallInfo computes call stacks and related information from a vulncheck.Result. -// I also makes a set of top-level packages from pkgs. +// It also makes a set of top-level packages from pkgs. func GetCallInfo(r *vulncheck.Result, pkgs []*vulncheck.Package) *CallInfo { pset := map[string]bool{} for _, p := range pkgs { From d097bc9f9d0168a89c8d85f5e2110d54bcbab78e Mon Sep 17 00:00:00 2001 From: "Hana (Hyang-Ah) Kim" Date: Wed, 15 Jun 2022 18:51:42 -0400 Subject: [PATCH 018/136] gopls/internal/vulncheck: include nonaffecting vulnerability info This info is still useful to tell users that some required modules have known vulnerabilities, but the analyzed packages/workspaces are not affected. Those vulnerabilities are missing Symbol/PkgPath/CallStacks. Change-Id: I94ea0d8f9ebcb1270e05f055caff2a18ebacd034 Reviewed-on: https://go-review.googlesource.com/c/tools/+/412457 Reviewed-by: Jonathan Amsterdam --- gopls/internal/vulncheck/command.go | 88 +++++++++++++++++++++--- gopls/internal/vulncheck/command_test.go | 24 +++++++ internal/lsp/command/interface.go | 2 + 3 files changed, 106 insertions(+), 8 deletions(-) diff --git a/gopls/internal/vulncheck/command.go b/gopls/internal/vulncheck/command.go index a89354f67ee..53bf0f03860 100644 --- a/gopls/internal/vulncheck/command.go +++ b/gopls/internal/vulncheck/command.go @@ -11,12 +11,15 @@ import ( "context" "log" "os" + "sort" "strings" "golang.org/x/tools/go/packages" gvc "golang.org/x/tools/gopls/internal/govulncheck" "golang.org/x/tools/internal/lsp/command" "golang.org/x/vuln/client" + "golang.org/x/vuln/osv" + "golang.org/x/vuln/vulncheck" ) func init() { @@ -79,29 +82,84 @@ func (c *cmd) Run(ctx context.Context, cfg *packages.Config, patterns ...string) } log.Printf("loaded %d packages\n", len(loadedPkgs)) - r, err := gvc.Source(ctx, loadedPkgs, c.Client) + log.Printf("analyzing %d packages...\n", len(loadedPkgs)) + + r, err := vulncheck.Source(ctx, loadedPkgs, &vulncheck.Config{Client: c.Client}) if err != nil { return nil, err } + unaffectedMods := filterUnaffected(r.Vulns) + r.Vulns = filterCalled(r) + callInfo := gvc.GetCallInfo(r, loadedPkgs) - return toVulns(callInfo) + return toVulns(callInfo, unaffectedMods) // TODO: add import graphs. } -func toVulns(ci *gvc.CallInfo) ([]Vuln, error) { +// filterCalled returns vulnerabilities where the symbols are actually called. +func filterCalled(r *vulncheck.Result) []*vulncheck.Vuln { + var vulns []*vulncheck.Vuln + for _, v := range r.Vulns { + if v.CallSink != 0 { + vulns = append(vulns, v) + } + } + return vulns +} + +// filterUnaffected returns vulnerabilities where no symbols are called, +// grouped by module. +func filterUnaffected(vulns []*vulncheck.Vuln) map[string][]*osv.Entry { + // It is possible that the same vuln.OSV.ID has vuln.CallSink != 0 + // for one symbol, but vuln.CallSink == 0 for a different one, so + // we need to filter out ones that have been called. + called := map[string]bool{} + for _, vuln := range vulns { + if vuln.CallSink != 0 { + called[vuln.OSV.ID] = true + } + } + + modToIDs := map[string]map[string]*osv.Entry{} + for _, vuln := range vulns { + if !called[vuln.OSV.ID] { + if _, ok := modToIDs[vuln.ModPath]; !ok { + modToIDs[vuln.ModPath] = map[string]*osv.Entry{} + } + // keep only one vuln.OSV instance for the same ID. + modToIDs[vuln.ModPath][vuln.OSV.ID] = vuln.OSV + } + } + output := map[string][]*osv.Entry{} + for m, vulnSet := range modToIDs { + var vulns []*osv.Entry + for _, vuln := range vulnSet { + vulns = append(vulns, vuln) + } + sort.Slice(vulns, func(i, j int) bool { return vulns[i].ID < vulns[j].ID }) + output[m] = vulns + } + return output +} + +func fixed(v *osv.Entry) string { + lf := gvc.LatestFixed(v.Affected) + if lf != "" && lf[0] != 'v' { + lf = "v" + lf + } + return lf +} + +func toVulns(ci *gvc.CallInfo, unaffectedMods map[string][]*osv.Entry) ([]Vuln, error) { var vulns []Vuln for _, vg := range ci.VulnGroups { v0 := vg[0] - lf := gvc.LatestFixed(v0.OSV.Affected) - if lf != "" && lf[0] != 'v' { - lf = "v" + lf - } vuln := Vuln{ ID: v0.OSV.ID, PkgPath: v0.PkgPath, CurrentVersion: ci.ModuleVersions[v0.ModPath], - FixedVersion: lf, + FixedVersion: fixed(v0.OSV), Details: v0.OSV.Details, Aliases: v0.OSV.Aliases, @@ -119,5 +177,19 @@ func toVulns(ci *gvc.CallInfo) ([]Vuln, error) { } vulns = append(vulns, vuln) } + for m, vg := range unaffectedMods { + for _, v0 := range vg { + vuln := Vuln{ + ID: v0.ID, + Details: v0.Details, + Aliases: v0.Aliases, + ModPath: m, + URL: href(v0), + CurrentVersion: "", + FixedVersion: fixed(v0), + } + vulns = append(vulns, vuln) + } + } return vulns, nil } diff --git a/gopls/internal/vulncheck/command_test.go b/gopls/internal/vulncheck/command_test.go index f689ab96722..f6e2d1b7612 100644 --- a/gopls/internal/vulncheck/command_test.go +++ b/gopls/internal/vulncheck/command_test.go @@ -81,6 +81,15 @@ func TestCmd_Run(t *testing.T) { "golang.org/bmod/bvuln.Vuln (bvuln.go:2)\n", }, }, + { + Vuln: Vuln{ + ID: "GO-2022-03", + Details: "unaffecting vulnerability", + ModPath: "golang.org/amod", + URL: "https://pkg.go.dev/vuln/GO-2022-03", + FixedVersion: "v1.0.4", + }, + }, } // sort reports for stability before comparison. for _, rpts := range [][]report{got, want} { @@ -228,6 +237,21 @@ var testClient1 = &mockClient{ EcosystemSpecific: osv.EcosystemSpecific{Symbols: []string{"VulnData.Vuln1", "VulnData.Vuln2"}}, }}, }, + { + ID: "GO-2022-03", + Details: "unaffecting vulnerability", + References: []osv.Reference{ + { + Type: "href", + URL: "pkg.go.dev/vuln/GO-2022-01", + }, + }, + Affected: []osv.Affected{{ + Package: osv.Package{Name: "golang.org/amod/avuln"}, + Ranges: osv.Affects{{Type: osv.TypeSemver, Events: []osv.RangeEvent{{Introduced: "1.0.0"}, {Fixed: "1.0.4"}, {Introduced: "1.1.2"}}}}, + EcosystemSpecific: osv.EcosystemSpecific{Symbols: []string{"nonExisting"}}, + }}, + }, }, "golang.org/bmod": { { diff --git a/internal/lsp/command/interface.go b/internal/lsp/command/interface.go index 8e4b1056d32..1f3b092faba 100644 --- a/internal/lsp/command/interface.go +++ b/internal/lsp/command/interface.go @@ -359,8 +359,10 @@ type Vuln struct { Aliases []string `json:",omitempty"` // Symbol is the name of the detected vulnerable function or method. + // Can be empty if the vulnerability exists in required modules, but no vulnerable symbols are used. Symbol string `json:",omitempty"` // PkgPath is the package path of the detected Symbol. + // Can be empty if the vulnerability exists in required modules, but no vulnerable packages are used. PkgPath string `json:",omitempty"` // ModPath is the module path corresponding to PkgPath. // TODO: how do we specify standard library's vulnerability? From 041035c34a090a6505169f0dbb856b7ea9ce2ffa Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 16 Jun 2022 13:41:08 +0000 Subject: [PATCH 019/136] Revert "internal/lsp/cache: reduce critical sections" This reverts commit 654a14b5274602698564a5e9710c0778be664c7a. Reason for revert: my flawed understanding of the concurrency Change-Id: I31a35267323bb1ff4dff1d9244d3ce69c36cdda4 Reviewed-on: https://go-review.googlesource.com/c/tools/+/412694 Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot Reviewed-by: Hyang-Ah Hana Kim gopls-CI: kokoro --- internal/lsp/cache/check.go | 16 +++++++-------- internal/lsp/cache/parse.go | 2 +- internal/lsp/debug/trace.go | 39 +++++++++++++++---------------------- internal/memoize/memoize.go | 6 +----- 4 files changed, 26 insertions(+), 37 deletions(-) diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index 51d7d1a7ea1..f09fc298a98 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -97,6 +97,14 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so return nil, err } + // Do not close over the packageHandle or the snapshot in the Bind function. + // This creates a cycle, which causes the finalizers to never run on the handles. + // The possible cycles are: + // + // packageHandle.h.function -> packageHandle + // packageHandle.h.function -> snapshot -> packageHandle + // + m := ph.m key := ph.key @@ -113,13 +121,6 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so }(dep) } - // TODO(adonovan): opt: consider moving the Wait here, - // so that dependencies complete before we start to - // read+parse+typecheck this package. Although the - // read+parse can proceed, typechecking will block - // almost immediately until the imports are done. - // The effect is to increase contention. - data := &packageData{} data.pkg, data.err = typeCheck(ctx, snapshot, m.Metadata, mode, deps) // Make sure that the workers above have finished before we return, @@ -447,7 +448,6 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour } typeparams.InitInstanceInfo(pkg.typesInfo) - // TODO(adonovan): opt: execute this loop in parallel. for _, gf := range pkg.m.GoFiles { // In the presence of line directives, we may need to report errors in // non-compiled Go files, so we need to register them on the package. diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go index ab55743ccf0..668c437f5c9 100644 --- a/internal/lsp/cache/parse.go +++ b/internal/lsp/cache/parse.go @@ -278,7 +278,7 @@ func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mod tok := fset.File(file.Pos()) if tok == nil { - // file.Pos is the location of the package declaration (issue #53202). If there was + // file.Pos is the location of the package declaration. If there was // none, we can't find the token.File that ParseFile created, and we // have no choice but to recreate it. tok = fset.AddFile(fh.URI().Filename(), -1, len(src)) diff --git a/internal/lsp/debug/trace.go b/internal/lsp/debug/trace.go index bb402cfaa8f..ca612867a5d 100644 --- a/internal/lsp/debug/trace.go +++ b/internal/lsp/debug/trace.go @@ -119,6 +119,8 @@ func formatEvent(ctx context.Context, ev core.Event, lm label.Map) string { } func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) context.Context { + t.mu.Lock() + defer t.mu.Unlock() span := export.GetSpan(ctx) if span == nil { return ctx @@ -126,8 +128,11 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) switch { case event.IsStart(ev): - // Just starting: add it to the unfinished map. - // Allocate before the critical section. + if t.sets == nil { + t.sets = make(map[string]*traceSet) + t.unfinished = make(map[export.SpanContext]*traceData) + } + // just starting, add it to the unfinished map td := &traceData{ TraceID: span.ID.TraceID, SpanID: span.ID.SpanID, @@ -136,13 +141,6 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) Start: span.Start().At(), Tags: renderLabels(span.Start()), } - - t.mu.Lock() - defer t.mu.Unlock() - if t.sets == nil { - t.sets = make(map[string]*traceSet) - t.unfinished = make(map[export.SpanContext]*traceData) - } t.unfinished[span.ID] = td // and wire up parents if we have them if !span.ParentID.IsValid() { @@ -157,19 +155,7 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) parent.Children = append(parent.Children, td) case event.IsEnd(ev): - // Finishing: must be already in the map. - // Allocate events before the critical section. - events := span.Events() - tdEvents := make([]traceEvent, len(events)) - for i, event := range events { - tdEvents[i] = traceEvent{ - Time: event.At(), - Tags: renderLabels(event), - } - } - - t.mu.Lock() - defer t.mu.Unlock() + // finishing, must be already in the map td, found := t.unfinished[span.ID] if !found { return ctx // if this happens we are in a bad place @@ -178,7 +164,14 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) td.Finish = span.Finish().At() td.Duration = span.Finish().At().Sub(span.Start().At()) - td.Events = tdEvents + events := span.Events() + td.Events = make([]traceEvent, len(events)) + for i, event := range events { + td.Events[i] = traceEvent{ + Time: event.At(), + Tags: renderLabels(event), + } + } set, ok := t.sets[span.Name] if !ok { diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index 28d5e2c5bc8..dec2fff6836 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -181,9 +181,8 @@ func (g *Generation) Bind(key interface{}, function Function, cleanup func(inter if atomic.LoadUint32(&g.destroyed) != 0 { panic("operation on generation " + g.name + " destroyed by " + g.destroyedBy) } - - // Avoid 'defer Unlock' to reduce critical section. g.store.mu.Lock() + defer g.store.mu.Unlock() h, ok := g.store.handles[key] if !ok { h := &Handle{ @@ -193,11 +192,8 @@ func (g *Generation) Bind(key interface{}, function Function, cleanup func(inter cleanup: cleanup, } g.store.handles[key] = h - g.store.mu.Unlock() return h } - g.store.mu.Unlock() - h.mu.Lock() defer h.mu.Unlock() if _, ok := h.generations[g]; !ok { From 9f38ef7f15ed12bf87e5d5e13817f6a0a6ffd9cf Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Fri, 6 Aug 2021 10:50:53 -0400 Subject: [PATCH 020/136] internal/lsp/cache: derive workspace packages from metadata Now that we preserve stale metadata, we can derive workspace packages entirely from known metadata and files. This consolidates the logic to compute workspace packages into a single location, which can be invoked whenever metadata changes (via load or invalidation in clone). Additionally: - Precompute 'HasWorkspaceFiles' when loading metadata. This value should never change for a given Metadata, and our view.contains func is actually quite slow due to evaluating symlinks. - Track 'PkgFilesChanged' on KnownMetadata, since we don't include packages whose package name has changed in our workspace. Also introduce a few debug helpers, so that we can leave some instrumentation in critical functions. For golang/go#45686 Change-Id: I2c994a1e8ca05c3c42f67bd2f4519bea5095c54c Reviewed-on: https://go-review.googlesource.com/c/tools/+/340735 Run-TryBot: Robert Findley TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan gopls-CI: kokoro --- internal/lsp/cache/debug.go | 53 ++++++++++++++++++++ internal/lsp/cache/load.go | 88 ++++++++++++++++++++++++++++------ internal/lsp/cache/metadata.go | 15 ++++++ internal/lsp/cache/snapshot.go | 60 +++++------------------ 4 files changed, 153 insertions(+), 63 deletions(-) create mode 100644 internal/lsp/cache/debug.go diff --git a/internal/lsp/cache/debug.go b/internal/lsp/cache/debug.go new file mode 100644 index 00000000000..b8d207d83dd --- /dev/null +++ b/internal/lsp/cache/debug.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "fmt" + "os" + "sort" +) + +// This file contains helpers that can be used to instrument code while +// debugging. + +// debugEnabled toggles the helpers below. +const debugEnabled = false + +// If debugEnabled is true, debugf formats its arguments and prints to stderr. +// If debugEnabled is false, it is a no-op. +func debugf(format string, args ...interface{}) { + if !debugEnabled { + return + } + if false { + fmt.Sprintf(format, args...) // encourage vet to validate format strings + } + fmt.Fprintf(os.Stderr, ">>> "+format+"\n", args...) +} + +// If debugEnabled is true, dumpWorkspace prints a summary of workspace +// packages to stderr. If debugEnabled is false, it is a no-op. +func (s *snapshot) dumpWorkspace(context string) { + if !debugEnabled { + return + } + + debugf("workspace (after %s):", context) + var ids []PackageID + for id := range s.workspacePackages { + ids = append(ids, id) + } + + sort.Slice(ids, func(i, j int) bool { + return ids[i] < ids[j] + }) + + for _, id := range ids { + pkgPath := s.workspacePackages[id] + _, ok := s.meta.metadata[id] + debugf(" %s:%s (metadata: %t)", id, pkgPath, ok) + } +} diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index 96c2a0733a5..7b41d244829 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -149,6 +149,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf } moduleErrs := make(map[string][]packages.Error) // module path -> errors + var newMetadata []*Metadata for _, pkg := range pkgs { // The Go command returns synthetic list results for module queries that // encountered module errors. @@ -195,17 +196,28 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf } // Set the metadata for this package. s.mu.Lock() - m, err := s.setMetadataLocked(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, map[PackageID]struct{}{}) + seen := make(map[PackageID]struct{}) + m, err := s.setMetadataLocked(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, seen) s.mu.Unlock() if err != nil { return err } + newMetadata = append(newMetadata, m) + } + + // Rebuild package data when metadata is updated. + s.rebuildPackageData() + s.dumpWorkspace("load") + + // Now that the workspace has been rebuilt, verify that we can build package handles. + // + // TODO(rfindley): what's the point of returning an error here? Probably we + // can simply remove this step: The package handle will be rebuilt as needed. + for _, m := range newMetadata { if _, err := s.buildPackageHandle(ctx, m.ID, s.workspaceParseMode(m.ID)); err != nil { return err } } - // Rebuild the import graph when the metadata is updated. - s.clearAndRebuildImportGraph() if len(moduleErrs) > 0 { return &moduleErrorMap{moduleErrs} @@ -468,6 +480,19 @@ func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, p m.GoFiles = append(m.GoFiles, uri) uris[uri] = struct{}{} } + + for uri := range uris { + // In order for a package to be considered for the workspace, at least one + // file must be contained in the workspace and not vendored. + + // The package's files are in this view. It may be a workspace package. + // Vendored packages are not likely to be interesting to the user. + if !strings.Contains(string(uri), "/vendor/") && s.view.contains(uri) { + m.HasWorkspaceFiles = true + break + } + } + s.updateIDForURIsLocked(id, uris) // TODO(rstambler): is this still necessary? @@ -517,30 +542,65 @@ func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, p } } - // Set the workspace packages. If any of the package's files belong to the - // view, then the package may be a workspace package. - for _, uri := range append(m.CompiledGoFiles, m.GoFiles...) { - if !s.view.contains(uri) { + return m, nil +} + +// computeWorkspacePackages computes workspace packages for the given metadata +// graph. +func computeWorkspacePackages(meta *metadataGraph) map[PackageID]PackagePath { + workspacePackages := make(map[PackageID]PackagePath) + for _, m := range meta.metadata { + if !m.HasWorkspaceFiles { continue } - - // The package's files are in this view. It may be a workspace package. - if strings.Contains(string(uri), "/vendor/") { - // Vendored packages are not likely to be interesting to the user. + if m.PkgFilesChanged { + // If a package name has changed, it's possible that the package no + // longer exists. Leaving it as a workspace package can result in + // persistent stale diagnostics. + // + // If there are still valid files in the package, it will be reloaded. + // + // There may be more precise heuristics. continue } + if source.IsCommandLineArguments(string(m.ID)) { + // If all the files contained in m have a real package, we don't need to + // keep m as a workspace package. + if allFilesHaveRealPackages(meta, m) { + continue + } + } + switch { case m.ForTest == "": // A normal package. - s.workspacePackages[m.ID] = pkgPath + workspacePackages[m.ID] = m.PkgPath case m.ForTest == m.PkgPath, m.ForTest+"_test" == m.PkgPath: // The test variant of some workspace package or its x_test. // To load it, we need to load the non-test variant with -test. - s.workspacePackages[m.ID] = m.ForTest + workspacePackages[m.ID] = m.ForTest } } - return m, nil + return workspacePackages +} + +// allFilesHaveRealPackages reports whether all files referenced by m are +// contained in a "real" package (not command-line-arguments). +// +// If m is not a command-line-arguments package, this is trivially true. +func allFilesHaveRealPackages(g *metadataGraph, m *KnownMetadata) bool { + n := len(m.CompiledGoFiles) +checkURIs: + for _, uri := range append(m.CompiledGoFiles[0:n:n], m.GoFiles...) { + for _, id := range g.ids[uri] { + if !source.IsCommandLineArguments(string(id)) { + continue checkURIs + } + } + return false + } + return true } func isTestMain(pkg *packages.Package, gocache string) bool { diff --git a/internal/lsp/cache/metadata.go b/internal/lsp/cache/metadata.go index c2a21969d88..525a3e65495 100644 --- a/internal/lsp/cache/metadata.go +++ b/internal/lsp/cache/metadata.go @@ -67,6 +67,13 @@ type Metadata struct { // TODO(rfindley): this can probably just be a method, since it is derived // from other fields. IsIntermediateTestVariant bool + + // HasWorkspaceFiles reports whether m contains any files that are considered + // part of the workspace. + // + // TODO(golang/go#48929): this should be a property of the workspace + // (the go.work file), not a constant. + HasWorkspaceFiles bool } // Name implements the source.Metadata interface. @@ -92,6 +99,14 @@ type KnownMetadata struct { // Invalid metadata can still be used if a metadata reload fails. Valid bool + // PkgFilesChanged reports whether the file set of this metadata has + // potentially changed. + PkgFilesChanged bool + // ShouldLoad is true if the given metadata should be reloaded. + // + // Note that ShouldLoad is different from !Valid: when we try to load a + // package, we mark ShouldLoad = false regardless of whether the load + // succeeded, to prevent endless loads. ShouldLoad bool } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 7b73f4b2794..961a60f8aa6 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -716,13 +716,14 @@ func (s *snapshot) getImportedByLocked(id PackageID) []PackageID { return s.meta.importedBy[id] } -func (s *snapshot) clearAndRebuildImportGraph() { +func (s *snapshot) rebuildPackageData() { s.mu.Lock() defer s.mu.Unlock() // Completely invalidate the original map. s.meta.importedBy = make(map[PackageID][]PackageID) s.rebuildImportGraph() + s.workspacePackages = computeWorkspacePackages(s.meta) } func (s *snapshot) rebuildImportGraph() { @@ -1337,6 +1338,7 @@ func (s *snapshot) updateIDForURIsLocked(id PackageID, uris map[span.URI]struct{ }) s.meta.ids[uri] = newIDs } + s.dumpWorkspace("updateIDs") } func (s *snapshot) isWorkspacePackage(id PackageID) bool { @@ -1857,7 +1859,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } } - changedPkgFiles := map[PackageID]struct{}{} // packages whose file set may have changed + changedPkgFiles := map[PackageID]bool{} // packages whose file set may have changed anyImportDeleted := false for uri, change := range changes { // Maybe reinitialize the view if we see a change in the vendor @@ -1885,7 +1887,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC filePackageIDs := invalidatedPackageIDs(uri, s.meta.ids, pkgFileChanged) if pkgFileChanged { for id := range filePackageIDs { - changedPkgFiles[id] = struct{}{} + changedPkgFiles[id] = true } } for id := range filePackageIDs { @@ -2064,55 +2066,14 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC invalidateMetadata := idsToInvalidate[k] // Mark invalidated metadata rather than deleting it outright. result.meta.metadata[k] = &KnownMetadata{ - Metadata: v.Metadata, - Valid: v.Valid && !invalidateMetadata, - ShouldLoad: v.ShouldLoad || invalidateMetadata, + Metadata: v.Metadata, + Valid: v.Valid && !invalidateMetadata, + PkgFilesChanged: v.PkgFilesChanged || changedPkgFiles[k], + ShouldLoad: v.ShouldLoad || invalidateMetadata, } } - // Copy the set of initially loaded packages. - for id, pkgPath := range s.workspacePackages { - // Packages with the id "command-line-arguments" are generated by the - // go command when the user is outside of GOPATH and outside of a - // module. Do not cache them as workspace packages for longer than - // necessary. - if source.IsCommandLineArguments(string(id)) { - if invalidateMetadata, ok := idsToInvalidate[id]; invalidateMetadata && ok { - continue - } - } - - // If all the files we know about in a package have been deleted, - // the package is gone and we should no longer try to load it. - if m := s.meta.metadata[id]; m != nil { - hasFiles := false - for _, uri := range s.meta.metadata[id].GoFiles { - // For internal tests, we need _test files, not just the normal - // ones. External tests only have _test files, but we can check - // them anyway. - if m.ForTest != "" && !strings.HasSuffix(string(uri), "_test.go") { - continue - } - if _, ok := result.files[uri]; ok { - hasFiles = true - break - } - } - if !hasFiles { - continue - } - } - - // If the package name of a file in the package has changed, it's - // possible that the package ID may no longer exist. Delete it from - // the set of workspace packages, on the assumption that we will add it - // back when the relevant files are reloaded. - if _, ok := changedPkgFiles[id]; ok { - continue - } - - result.workspacePackages[id] = pkgPath - } + result.workspacePackages = computeWorkspacePackages(result.meta) // Inherit all of the go.mod-related handles. for _, v := range result.modTidyHandles { @@ -2142,6 +2103,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC result.initializeOnce = &sync.Once{} } } + result.dumpWorkspace("clone") return result } From 8a9207816c6df78e430bfeb90c41039dfeed6f34 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Sun, 8 Aug 2021 14:49:30 -0400 Subject: [PATCH 021/136] internal/lsp/cache: build a new metadata graph on load Introduce a metadataGraph.Clone method that can be used to clone a metadata graph, applying a set of updates. During clone, ids and imports are recomputed from scratch based on the known metadata. Also refine the check for "real" packages when determining whether a command-line-arguments package should be kept as a workspace package: if all other packages are invalid, but the command-line-arguments package is valid, we should keep the command-line-arguments package. Updates golang/go#45686 Change-Id: Iea8d4f19c1d1c5a2b0582b9dda5f9143482a34af Reviewed-on: https://go-review.googlesource.com/c/tools/+/340851 Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot gopls-CI: kokoro Run-TryBot: Robert Findley --- internal/lsp/cache/debug.go | 4 +- internal/lsp/cache/graph.go | 116 +++++++++++++++++++++++++++++++-- internal/lsp/cache/load.go | 105 +++++++++++++++-------------- internal/lsp/cache/snapshot.go | 47 +------------ 4 files changed, 166 insertions(+), 106 deletions(-) diff --git a/internal/lsp/cache/debug.go b/internal/lsp/cache/debug.go index b8d207d83dd..ca8b7c866e4 100644 --- a/internal/lsp/cache/debug.go +++ b/internal/lsp/cache/debug.go @@ -47,7 +47,7 @@ func (s *snapshot) dumpWorkspace(context string) { for _, id := range ids { pkgPath := s.workspacePackages[id] - _, ok := s.meta.metadata[id] - debugf(" %s:%s (metadata: %t)", id, pkgPath, ok) + m, ok := s.meta.metadata[id] + debugf(" %s:%s (metadata: %t; valid: %t)", id, pkgPath, ok, m.Valid) } } diff --git a/internal/lsp/cache/graph.go b/internal/lsp/cache/graph.go index f0f8724d375..f3fe077cf5b 100644 --- a/internal/lsp/cache/graph.go +++ b/internal/lsp/cache/graph.go @@ -4,7 +4,12 @@ package cache -import "golang.org/x/tools/internal/span" +import ( + "sort" + + "golang.org/x/tools/internal/lsp/source" + "golang.org/x/tools/internal/span" +) // A metadataGraph holds information about a transtively closed import graph of // Go packages, as obtained from go/packages. @@ -13,21 +18,122 @@ import "golang.org/x/tools/internal/span" // TODO(rfindley): make this type immutable, so that it may be shared across // snapshots. type metadataGraph struct { - // ids maps file URIs to package IDs. A single file may belong to multiple - // packages due to tests packages. - ids map[span.URI][]PackageID // metadata maps package IDs to their associated metadata. metadata map[PackageID]*KnownMetadata // importedBy maps package IDs to the list of packages that import them. importedBy map[PackageID][]PackageID + + // ids maps file URIs to package IDs. A single file may belong to multiple + // packages due to tests packages. + ids map[span.URI][]PackageID } func NewMetadataGraph() *metadataGraph { return &metadataGraph{ - ids: make(map[span.URI][]PackageID), metadata: make(map[PackageID]*KnownMetadata), importedBy: make(map[PackageID][]PackageID), + ids: make(map[span.URI][]PackageID), + } +} + +// Clone creates a new metadataGraph, applying the given updates to the +// receiver. +func (g *metadataGraph) Clone(updates map[PackageID]*KnownMetadata) *metadataGraph { + result := &metadataGraph{metadata: make(map[PackageID]*KnownMetadata, len(g.metadata))} + // Copy metadata. + for id, m := range g.metadata { + result.metadata[id] = m + } + for id, m := range updates { + if m == nil { + delete(result.metadata, id) + } else { + result.metadata[id] = m + } + } + result.build() + return result +} + +// build constructs g.importedBy and g.uris from g.metadata. +func (g *metadataGraph) build() { + // Build the import graph. + g.importedBy = make(map[PackageID][]PackageID) + for id, m := range g.metadata { + for _, importID := range m.Deps { + g.importedBy[importID] = append(g.importedBy[importID], id) + } + } + + // Collect file associations. + g.ids = make(map[span.URI][]PackageID) + for id, m := range g.metadata { + uris := map[span.URI]struct{}{} + for _, uri := range m.CompiledGoFiles { + uris[uri] = struct{}{} + } + for _, uri := range m.GoFiles { + uris[uri] = struct{}{} + } + for uri := range uris { + g.ids[uri] = append(g.ids[uri], id) + } + } + + // Sort and filter file associations. + // + // We choose the first non-empty set of package associations out of the + // following. For simplicity, call a non-command-line-arguments package a + // "real" package. + // + // 1: valid real packages + // 2: a valid command-line-arguments package + // 3: invalid real packages + // 4: an invalid command-line-arguments package + for uri, ids := range g.ids { + sort.Slice(ids, func(i, j int) bool { + // Sort valid packages first. + validi := g.metadata[ids[i]].Valid + validj := g.metadata[ids[j]].Valid + if validi != validj { + return validi + } + + cli := source.IsCommandLineArguments(string(ids[i])) + clj := source.IsCommandLineArguments(string(ids[j])) + if cli && !clj { + return false + } + if !cli && clj { + return true + } + return ids[i] < ids[j] + }) + + // Choose the best IDs for each URI, according to the following rules: + // - If there are any valid real packages, choose them. + // - Else, choose the first valid command-line-argument package, if it exists. + // - Else, keep using all the invalid metadata. + // + // TODO(rfindley): it might be better to track all IDs here, and exclude + // them later in PackagesForFile, but this is the existing behavior. + hasValidMetadata := false + for i, id := range ids { + m := g.metadata[id] + if m.Valid { + hasValidMetadata = true + } else if hasValidMetadata { + g.ids[uri] = ids[:i] + break + } + // If we've seen *anything* prior to command-line arguments package, take + // it. Note that ids[0] may itself be command-line-arguments. + if i > 0 && source.IsCommandLineArguments(string(id)) { + g.ids[uri] = ids[:i] + break + } + } } } diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index 7b41d244829..085c7c28001 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -149,7 +149,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf } moduleErrs := make(map[string][]packages.Error) // module path -> errors - var newMetadata []*Metadata + updates := make(map[PackageID]*KnownMetadata) for _, pkg := range pkgs { // The Go command returns synthetic list results for module queries that // encountered module errors. @@ -194,26 +194,36 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf if s.view.allFilesExcluded(pkg) { continue } - // Set the metadata for this package. + // TODO: once metadata is immutable, we shouldn't have to lock here. s.mu.Lock() - seen := make(map[PackageID]struct{}) - m, err := s.setMetadataLocked(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, seen) + err := s.setMetadataLocked(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, updates, nil) s.mu.Unlock() if err != nil { return err } - newMetadata = append(newMetadata, m) } - // Rebuild package data when metadata is updated. - s.rebuildPackageData() + s.mu.Lock() + s.meta = s.meta.Clone(updates) + // Invalidate any packages we may have associated with this metadata. + // + // TODO(rfindley): if we didn't already invalidate these in snapshot.clone, + // shouldn't we invalidate the reverse transitive closure? + for _, m := range updates { + for _, mode := range []source.ParseMode{source.ParseHeader, source.ParseExported, source.ParseFull} { + key := packageKey{mode, m.ID} + delete(s.packages, key) + } + } + s.workspacePackages = computeWorkspacePackages(s.meta) s.dumpWorkspace("load") + s.mu.Unlock() - // Now that the workspace has been rebuilt, verify that we can build package handles. + // Rebuild the workspace package handle for any packages we invalidated. // // TODO(rfindley): what's the point of returning an error here? Probably we // can simply remove this step: The package handle will be rebuilt as needed. - for _, m := range newMetadata { + for _, m := range updates { if _, err := s.buildPackageHandle(ctx, m.ID, s.workspaceParseMode(m.ID)); err != nil { return err } @@ -431,27 +441,41 @@ func getWorkspaceDir(ctx context.Context, h *memoize.Handle, g *memoize.Generati // setMetadataLocked extracts metadata from pkg and records it in s. It // recurs through pkg.Imports to ensure that metadata exists for all // dependencies. -func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, seen map[PackageID]struct{}) (*Metadata, error) { +func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*KnownMetadata, path []PackageID) error { id := PackageID(pkg.ID) + if new := updates[id]; new != nil { + return nil + } if source.IsCommandLineArguments(pkg.ID) { suffix := ":" + strings.Join(query, ",") id = PackageID(string(id) + suffix) pkgPath = PackagePath(string(pkgPath) + suffix) } - if _, ok := seen[id]; ok { - return nil, fmt.Errorf("import cycle detected: %q", id) + if _, ok := updates[id]; ok { + // If we've already seen this dependency, there may be an import cycle, or + // we may have reached the same package transitively via distinct paths. + // Check the path to confirm. + for _, prev := range path { + if prev == id { + return fmt.Errorf("import cycle detected: %q", id) + } + } } // Recreate the metadata rather than reusing it to avoid locking. - m := &Metadata{ - ID: id, - PkgPath: pkgPath, - Name: PackageName(pkg.Name), - ForTest: PackagePath(packagesinternal.GetForTest(pkg)), - TypesSizes: pkg.TypesSizes, - Config: cfg, - Module: pkg.Module, - depsErrors: packagesinternal.GetDepsErrors(pkg), - } + m := &KnownMetadata{ + Metadata: &Metadata{ + ID: id, + PkgPath: pkgPath, + Name: PackageName(pkg.Name), + ForTest: PackagePath(packagesinternal.GetForTest(pkg)), + TypesSizes: pkg.TypesSizes, + Config: cfg, + Module: pkg.Module, + depsErrors: packagesinternal.GetDepsErrors(pkg), + }, + Valid: true, + } + updates[id] = m // Identify intermediate test variants for later filtering. See the // documentation of IsIntermediateTestVariant for more information. @@ -493,15 +517,6 @@ func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, p } } - s.updateIDForURIsLocked(id, uris) - - // TODO(rstambler): is this still necessary? - copied := map[PackageID]struct{}{ - id: {}, - } - for k, v := range seen { - copied[k] = v - } for importPath, importPkg := range pkg.Imports { importPkgPath := PackagePath(importPath) importID := PackageID(importPkg.ID) @@ -517,32 +532,13 @@ func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, p continue } if s.noValidMetadataForIDLocked(importID) { - if _, err := s.setMetadataLocked(ctx, importPkgPath, importPkg, cfg, query, copied); err != nil { + if err := s.setMetadataLocked(ctx, importPkgPath, importPkg, cfg, query, updates, append(path, id)); err != nil { event.Error(ctx, "error in dependency", err) } } } - // Add the metadata to the cache. - - // If we've already set the metadata for this snapshot, reuse it. - if original, ok := s.meta.metadata[m.ID]; ok && original.Valid { - // Since we've just reloaded, clear out shouldLoad. - original.ShouldLoad = false - m = original.Metadata - } else { - s.meta.metadata[m.ID] = &KnownMetadata{ - Metadata: m, - Valid: true, - } - // Invalidate any packages we may have associated with this metadata. - for _, mode := range []source.ParseMode{source.ParseHeader, source.ParseExported, source.ParseFull} { - key := packageKey{mode, m.ID} - delete(s.packages, key) - } - } - - return m, nil + return nil } // computeWorkspacePackages computes workspace packages for the given metadata @@ -588,13 +584,16 @@ func computeWorkspacePackages(meta *metadataGraph) map[PackageID]PackagePath { // allFilesHaveRealPackages reports whether all files referenced by m are // contained in a "real" package (not command-line-arguments). // +// If m is valid but all "real" packages containing any file are invalid, this +// function returns false. +// // If m is not a command-line-arguments package, this is trivially true. func allFilesHaveRealPackages(g *metadataGraph, m *KnownMetadata) bool { n := len(m.CompiledGoFiles) checkURIs: for _, uri := range append(m.CompiledGoFiles[0:n:n], m.GoFiles...) { for _, id := range g.ids[uri] { - if !source.IsCommandLineArguments(string(id)) { + if !source.IsCommandLineArguments(string(id)) && (g.metadata[id].Valid || !m.Valid) { continue checkURIs } } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 961a60f8aa6..601ed450a19 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -716,16 +716,6 @@ func (s *snapshot) getImportedByLocked(id PackageID) []PackageID { return s.meta.importedBy[id] } -func (s *snapshot) rebuildPackageData() { - s.mu.Lock() - defer s.mu.Unlock() - - // Completely invalidate the original map. - s.meta.importedBy = make(map[PackageID][]PackageID) - s.rebuildImportGraph() - s.workspacePackages = computeWorkspacePackages(s.meta) -} - func (s *snapshot) rebuildImportGraph() { for id, m := range s.meta.metadata { for _, importID := range m.Deps { @@ -1306,41 +1296,6 @@ func (s *snapshot) noValidMetadataForIDLocked(id PackageID) bool { return m == nil || !m.Valid } -// updateIDForURIsLocked adds the given ID to the set of known IDs for the given URI. -// Any existing invalid IDs are removed from the set of known IDs. IDs that are -// not "command-line-arguments" are preferred, so if a new ID comes in for a -// URI that previously only had "command-line-arguments", the new ID will -// replace the "command-line-arguments" ID. -func (s *snapshot) updateIDForURIsLocked(id PackageID, uris map[span.URI]struct{}) { - for uri := range uris { - // Collect the new set of IDs, preserving any valid existing IDs. - newIDs := []PackageID{id} - for _, existingID := range s.meta.ids[uri] { - // Don't set duplicates of the same ID. - if existingID == id { - continue - } - // If the package previously only had a command-line-arguments ID, - // delete the command-line-arguments workspace package. - if source.IsCommandLineArguments(string(existingID)) { - delete(s.workspacePackages, existingID) - continue - } - // If the metadata for an existing ID is invalid, and we are - // setting metadata for a new, valid ID--don't preserve the old ID. - if m, ok := s.meta.metadata[existingID]; !ok || !m.Valid { - continue - } - newIDs = append(newIDs, existingID) - } - sort.Slice(newIDs, func(i, j int) bool { - return newIDs[i] < newIDs[j] - }) - s.meta.ids[uri] = newIDs - } - s.dumpWorkspace("updateIDs") -} - func (s *snapshot) isWorkspacePackage(id PackageID) bool { s.mu.Lock() defer s.mu.Unlock() @@ -1984,7 +1939,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC // If the workspace mode has changed, we must delete all metadata, as it // is unusable and may produce confusing or incorrect diagnostics. - // If a file has been deleted, we must delete metadata all packages + // If a file has been deleted, we must delete metadata for all packages // containing that file. workspaceModeChanged := s.workspaceMode() != result.workspaceMode() skipID := map[PackageID]bool{} From 39d3d492601a90d889fee47076f09e3798734942 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 9 Aug 2021 10:39:21 -0400 Subject: [PATCH 022/136] internal/lsp/cache: use metadataGraph.Clone in snapshot.clone Rather than updating metadata directly in snapshot.clone, build a set of updates to apply and call metadata.Clone. After this change, metadata is only updated by cloning, so we can eliminate some code that works with mutable metadata. In the next CL we'll only update the metadata if something changed, but this is intentionally left out of this CL to isolate the change. Benchmark (didChange in kubernetes): ~55ms->65ms, because it is now more work to compute uris. For golang/go#45686 Change-Id: I048bed65760b266a209f67111c57fae29bd3e6f0 Reviewed-on: https://go-review.googlesource.com/c/tools/+/340852 gopls-CI: kokoro TryBot-Result: Gopher Robot Run-TryBot: Robert Findley Reviewed-by: Alan Donovan --- internal/lsp/cache/graph.go | 8 ---- internal/lsp/cache/session.go | 2 +- internal/lsp/cache/snapshot.go | 67 ++++++++++++---------------------- 3 files changed, 25 insertions(+), 52 deletions(-) diff --git a/internal/lsp/cache/graph.go b/internal/lsp/cache/graph.go index f3fe077cf5b..36e658b3a86 100644 --- a/internal/lsp/cache/graph.go +++ b/internal/lsp/cache/graph.go @@ -30,14 +30,6 @@ type metadataGraph struct { ids map[span.URI][]PackageID } -func NewMetadataGraph() *metadataGraph { - return &metadataGraph{ - metadata: make(map[PackageID]*KnownMetadata), - importedBy: make(map[PackageID][]PackageID), - ids: make(map[span.URI][]PackageID), - } -} - // Clone creates a new metadataGraph, applying the given updates to the // receiver. func (g *metadataGraph) Clone(updates map[PackageID]*KnownMetadata) *metadataGraph { diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index cbb58740621..286d8f12c46 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -232,7 +232,7 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, initializeOnce: &sync.Once{}, generation: s.cache.store.Generation(generationName(v, 0)), packages: make(map[packageKey]*packageHandle), - meta: NewMetadataGraph(), + meta: &metadataGraph{}, files: make(map[span.URI]source.VersionedFileHandle), goFiles: newGoFileMap(), symbols: make(map[span.URI]*symbolHandle), diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 601ed450a19..369baca8def 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -705,25 +705,9 @@ func (s *snapshot) getModTidyHandle(uri span.URI) *modTidyHandle { func (s *snapshot) getImportedBy(id PackageID) []PackageID { s.mu.Lock() defer s.mu.Unlock() - return s.getImportedByLocked(id) -} - -func (s *snapshot) getImportedByLocked(id PackageID) []PackageID { - // If we haven't rebuilt the import graph since creating the snapshot. - if len(s.meta.importedBy) == 0 { - s.rebuildImportGraph() - } return s.meta.importedBy[id] } -func (s *snapshot) rebuildImportGraph() { - for id, m := range s.meta.metadata { - for _, importID := range m.Deps { - s.meta.importedBy[importID] = append(s.meta.importedBy[importID], id) - } - } -} - func (s *snapshot) addPackageHandle(ph *packageHandle) *packageHandle { s.mu.Lock() defer s.mu.Unlock() @@ -1705,7 +1689,6 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC builtin: s.builtin, initializeOnce: s.initializeOnce, initializedErr: s.initializedErr, - meta: NewMetadataGraph(), packages: make(map[packageKey]*packageHandle, len(s.packages)), actions: make(map[actionKey]*actionHandle, len(s.actions)), files: make(map[span.URI]source.VersionedFileHandle, len(s.files)), @@ -1912,7 +1895,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC return } idsToInvalidate[id] = newInvalidateMetadata - for _, rid := range s.getImportedByLocked(id) { + for _, rid := range s.meta.importedBy[id] { addRevDeps(rid, invalidateMetadata) } } @@ -1976,58 +1959,56 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC addForwardDeps(id) } - // Copy the URI to package ID mappings, skipping only those URIs whose - // metadata will be reloaded in future calls to load. + // Compute which IDs are in the snapshot. + // + // TODO(rfindley): this step shouldn't be necessary, since we compute skipID + // above based on meta.ids. deleteInvalidMetadata := forceReloadMetadata || workspaceModeChanged idsInSnapshot := map[PackageID]bool{} // track all known IDs - for uri, ids := range s.meta.ids { - // Optimization: ids slices are typically numerous, short (<3), - // and rarely modified by this loop, so don't allocate copies - // until necessary. - var resultIDs []PackageID // nil implies equal to ids[:i:i] - for i, id := range ids { + for _, ids := range s.meta.ids { + for _, id := range ids { if skipID[id] || deleteInvalidMetadata && idsToInvalidate[id] { - resultIDs = ids[:i:i] // unshare continue } // The ID is not reachable from any workspace package, so it should // be deleted. if !reachableID[id] { - resultIDs = ids[:i:i] // unshare continue } idsInSnapshot[id] = true - if resultIDs != nil { - resultIDs = append(resultIDs, id) - } - } - if resultIDs == nil { - resultIDs = ids } - result.meta.ids[uri] = resultIDs } // TODO(adonovan): opt: represent PackageID as an index into a process-global // dup-free list of all package names ever seen, then use a bitmap instead of // a hash table for "PackageSet" (e.g. idsInSnapshot). - // Copy the package metadata. We only need to invalidate packages directly - // containing the affected file, and only if it changed in a relevant way. + // Compute which metadata updates are required. We only need to invalidate + // packages directly containing the affected file, and only if it changed in + // a relevant way. + metadataUpdates := make(map[PackageID]*KnownMetadata) for k, v := range s.meta.metadata { if !idsInSnapshot[k] { // Delete metadata for IDs that are no longer reachable from files // in the snapshot. + metadataUpdates[k] = nil continue } invalidateMetadata := idsToInvalidate[k] - // Mark invalidated metadata rather than deleting it outright. - result.meta.metadata[k] = &KnownMetadata{ - Metadata: v.Metadata, - Valid: v.Valid && !invalidateMetadata, - PkgFilesChanged: v.PkgFilesChanged || changedPkgFiles[k], - ShouldLoad: v.ShouldLoad || invalidateMetadata, + valid := v.Valid && !invalidateMetadata + pkgFilesChanged := v.PkgFilesChanged || changedPkgFiles[k] + shouldLoad := v.ShouldLoad || invalidateMetadata + if valid != v.Valid || pkgFilesChanged != v.PkgFilesChanged || shouldLoad != v.ShouldLoad { + // Mark invalidated metadata rather than deleting it outright. + metadataUpdates[k] = &KnownMetadata{ + Metadata: v.Metadata, + Valid: valid, + PkgFilesChanged: pkgFilesChanged, + ShouldLoad: shouldLoad, + } } } + result.meta = s.meta.Clone(metadataUpdates) result.workspacePackages = computeWorkspacePackages(result.meta) // Inherit all of the go.mod-related handles. From 4ba3d2217f15fbd2daf23f631a1c402900512912 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Fri, 18 Mar 2022 15:45:23 -0400 Subject: [PATCH 023/136] internal/lsp/cache: clone the metadata graph when clearing ShouldLoad The Metadata type was mutated in exactly one place: when setting the ShouldLoad bit after package loading completes. Address this by cloning the metadata graph when clearing ShouldLoad. After this change, metadata graphs and the data within them are immutable. This also fixes a range-variable capture bug in load.go: previously we were deferring a call to clearShouldLoad for the range variable scope. After this change, we properly clear the ShouldLoad bit for all scopes. Change-Id: I8f9140a490f81fbabacfc9e0102d9c638c7fbb37 Reviewed-on: https://go-review.googlesource.com/c/tools/+/400821 Run-TryBot: Robert Findley Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot gopls-CI: kokoro --- internal/lsp/cache/load.go | 29 +++++++++--------- internal/lsp/cache/snapshot.go | 56 +++++++++++++++++++--------------- 2 files changed, 47 insertions(+), 38 deletions(-) diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index 085c7c28001..5f24d0f08ef 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -37,6 +37,15 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf var query []string var containsDir bool // for logging + // Unless the context was canceled, set "shouldLoad" to false for all + // of the metadata we attempted to load. + defer func() { + if errors.Is(err, context.Canceled) { + return + } + s.clearShouldLoad(scopes...) + }() + // Keep track of module query -> module path so that we can later correlate query // errors with errors. moduleQueries := make(map[string]string) @@ -44,14 +53,6 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf if !s.shouldLoad(scope) { continue } - // Unless the context was canceled, set "shouldLoad" to false for all - // of the metadata we attempted to load. - defer func() { - if errors.Is(err, context.Canceled) { - return - } - s.clearShouldLoad(scope) - }() switch scope := scope.(type) { case PackagePath: if source.IsCommandLineArguments(string(scope)) { @@ -196,7 +197,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf } // TODO: once metadata is immutable, we shouldn't have to lock here. s.mu.Lock() - err := s.setMetadataLocked(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, updates, nil) + err := s.computeMetadataUpdates(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, updates, nil) s.mu.Unlock() if err != nil { return err @@ -438,10 +439,10 @@ func getWorkspaceDir(ctx context.Context, h *memoize.Handle, g *memoize.Generati return span.URIFromPath(v.(*workspaceDirData).dir), nil } -// setMetadataLocked extracts metadata from pkg and records it in s. It -// recurs through pkg.Imports to ensure that metadata exists for all -// dependencies. -func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*KnownMetadata, path []PackageID) error { +// computeMetadataUpdates populates the updates map with metadata updates to +// apply, based on the given pkg. It recurs through pkg.Imports to ensure that +// metadata exists for all dependencies. +func (s *snapshot) computeMetadataUpdates(ctx context.Context, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*KnownMetadata, path []PackageID) error { id := PackageID(pkg.ID) if new := updates[id]; new != nil { return nil @@ -532,7 +533,7 @@ func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, p continue } if s.noValidMetadataForIDLocked(importID) { - if err := s.setMetadataLocked(ctx, importPkgPath, importPkg, cfg, query, updates, append(path, id)); err != nil { + if err := s.computeMetadataUpdates(ctx, importPkgPath, importPkg, cfg, query, updates, append(path, id)); err != nil { event.Error(ctx, "error in dependency", err) } } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 369baca8def..ef06a10f6b4 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -1191,10 +1191,12 @@ func (s *snapshot) shouldLoad(scope interface{}) bool { s.mu.Lock() defer s.mu.Unlock() + g := s.meta + switch scope := scope.(type) { case PackagePath: var meta *KnownMetadata - for _, m := range s.meta.metadata { + for _, m := range g.metadata { if m.PkgPath != scope { continue } @@ -1206,12 +1208,12 @@ func (s *snapshot) shouldLoad(scope interface{}) bool { return false case fileURI: uri := span.URI(scope) - ids := s.meta.ids[uri] + ids := g.ids[uri] if len(ids) == 0 { return true } for _, id := range ids { - m, ok := s.meta.metadata[id] + m, ok := g.metadata[id] if !ok || m.ShouldLoad { return true } @@ -1222,34 +1224,40 @@ func (s *snapshot) shouldLoad(scope interface{}) bool { } } -func (s *snapshot) clearShouldLoad(scope interface{}) { +func (s *snapshot) clearShouldLoad(scopes ...interface{}) { s.mu.Lock() defer s.mu.Unlock() - switch scope := scope.(type) { - case PackagePath: - var meta *KnownMetadata - for _, m := range s.meta.metadata { - if m.PkgPath == scope { - meta = m - } - } - if meta == nil { - return - } - meta.ShouldLoad = false - case fileURI: - uri := span.URI(scope) - ids := s.meta.ids[uri] - if len(ids) == 0 { - return + g := s.meta + + var updates map[PackageID]*KnownMetadata + markLoaded := func(m *KnownMetadata) { + if updates == nil { + updates = make(map[PackageID]*KnownMetadata) } - for _, id := range ids { - if m, ok := s.meta.metadata[id]; ok { - m.ShouldLoad = false + next := *m + next.ShouldLoad = false + updates[next.ID] = &next + } + for _, scope := range scopes { + switch scope := scope.(type) { + case PackagePath: + for _, m := range g.metadata { + if m.PkgPath == scope { + markLoaded(m) + } + } + case fileURI: + uri := span.URI(scope) + ids := g.ids[uri] + for _, id := range ids { + if m, ok := g.metadata[id]; ok { + markLoaded(m) + } } } } + s.meta = g.Clone(updates) } // noValidMetadataForURILocked reports whether there is any valid metadata for From dffd6452c0f18fe993650d0aa76747dbe9d21052 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 9 Aug 2021 10:54:27 -0400 Subject: [PATCH 024/136] internal/lsp/cache: only clone metadata if something changed We with immutable metadata, we don't need to clone if nothing was invalidated. Benchmark (didChange in k8s): 65ms->45ms For golang/go#45686 Change-Id: I6b5e764c53a35784fd8c7b43bc26361f4ee8d928 Reviewed-on: https://go-review.googlesource.com/c/tools/+/340853 Run-TryBot: Robert Findley Reviewed-by: Alan Donovan gopls-CI: kokoro TryBot-Result: Gopher Robot --- internal/lsp/cache/snapshot.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index ef06a10f6b4..56ffdf88126 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -2016,8 +2016,15 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } } - result.meta = s.meta.Clone(metadataUpdates) - result.workspacePackages = computeWorkspacePackages(result.meta) + if len(metadataUpdates) > 0 { + result.meta = s.meta.Clone(metadataUpdates) + result.workspacePackages = computeWorkspacePackages(result.meta) + } else { + // No metadata changes. Since metadata is only updated by cloning, it is + // safe to re-use the existing metadata here. + result.meta = s.meta + result.workspacePackages = s.workspacePackages + } // Inherit all of the go.mod-related handles. for _, v := range result.modTidyHandles { From 567c98ba1a680254947db41413f78fe8e4451086 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 9 Aug 2021 11:04:34 -0400 Subject: [PATCH 025/136] internal/lsp/cache: don't walk URIs to invalidate metadata Since ids is derived from metadata, we should not have to walk ids to see which metadata is still active. Just compute metadata updates directly. Benchmark (didChange in k8s): ~45ms->41ms For golang/go#45686 Change-Id: Id557ed3f2e05c903e4bb3f3f6a4af864751c4546 Reviewed-on: https://go-review.googlesource.com/c/tools/+/340854 TryBot-Result: Gopher Robot Run-TryBot: Robert Findley Reviewed-by: Alan Donovan gopls-CI: kokoro --- internal/lsp/cache/snapshot.go | 42 ++++++++++++---------------------- 1 file changed, 15 insertions(+), 27 deletions(-) diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 56ffdf88126..72403571972 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -1933,6 +1933,12 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC // If a file has been deleted, we must delete metadata for all packages // containing that file. workspaceModeChanged := s.workspaceMode() != result.workspaceMode() + + // Don't keep package metadata for packages that have lost files. + // + // TODO(rfindley): why not keep invalid metadata in this case? If we + // otherwise allow operate on invalid metadata, why not continue to do so, + // skipping the missing file? skipID := map[PackageID]bool{} for _, c := range changes { if c.exists { @@ -1967,41 +1973,23 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC addForwardDeps(id) } - // Compute which IDs are in the snapshot. - // - // TODO(rfindley): this step shouldn't be necessary, since we compute skipID - // above based on meta.ids. - deleteInvalidMetadata := forceReloadMetadata || workspaceModeChanged - idsInSnapshot := map[PackageID]bool{} // track all known IDs - for _, ids := range s.meta.ids { - for _, id := range ids { - if skipID[id] || deleteInvalidMetadata && idsToInvalidate[id] { - continue - } - // The ID is not reachable from any workspace package, so it should - // be deleted. - if !reachableID[id] { - continue - } - idsInSnapshot[id] = true - } - } - // TODO(adonovan): opt: represent PackageID as an index into a process-global - // dup-free list of all package names ever seen, then use a bitmap instead of - // a hash table for "PackageSet" (e.g. idsInSnapshot). - // Compute which metadata updates are required. We only need to invalidate // packages directly containing the affected file, and only if it changed in // a relevant way. metadataUpdates := make(map[PackageID]*KnownMetadata) + deleteInvalidMetadata := forceReloadMetadata || workspaceModeChanged for k, v := range s.meta.metadata { - if !idsInSnapshot[k] { - // Delete metadata for IDs that are no longer reachable from files - // in the snapshot. + invalidateMetadata := idsToInvalidate[k] + if skipID[k] || (invalidateMetadata && deleteInvalidMetadata) { + metadataUpdates[k] = nil + continue + } + // The ID is not reachable from any workspace package, so it should + // be deleted. + if !reachableID[k] { metadataUpdates[k] = nil continue } - invalidateMetadata := idsToInvalidate[k] valid := v.Valid && !invalidateMetadata pkgFilesChanged := v.PkgFilesChanged || changedPkgFiles[k] shouldLoad := v.ShouldLoad || invalidateMetadata From c353b054c48e6ce082f2cde31c5d6815c4b6126b Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Mon, 9 Aug 2021 11:06:54 -0400 Subject: [PATCH 026/136] internal/lsp/cache: delete checkSnapshotLocked Now that the entire metadata graph and workspace packages are derived from metadata, there should be no need to validate coherency. This results in a small improvement to didChange benchmarking (within statistical noise). For golang/go#45686 Change-Id: I32683e025f42d768d62864683e55d4c00146a31c Reviewed-on: https://go-review.googlesource.com/c/tools/+/340855 TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Alan Donovan Run-TryBot: Robert Findley --- internal/lsp/cache/snapshot.go | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 72403571972..bdb73e31dc0 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -30,7 +30,6 @@ import ( "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/lsp/bug" - "golang.org/x/tools/internal/lsp/debug/log" "golang.org/x/tools/internal/lsp/debug/tag" "golang.org/x/tools/internal/lsp/source" "golang.org/x/tools/internal/memoize" @@ -1638,29 +1637,6 @@ func generationName(v *View, snapshotID uint64) string { return fmt.Sprintf("v%v/%v", v.id, snapshotID) } -// checkSnapshotLocked verifies that some invariants are preserved on the -// snapshot. -func checkSnapshotLocked(ctx context.Context, s *snapshot) { - // Check that every go file for a workspace package is identified as - // belonging to that workspace package. - for wsID := range s.workspacePackages { - if m, ok := s.meta.metadata[wsID]; ok { - for _, uri := range m.GoFiles { - found := false - for _, id := range s.meta.ids[uri] { - if id == wsID { - found = true - break - } - } - if !found { - log.Error.Logf(ctx, "workspace package %v not associated with %v", wsID, uri) - } - } - } - } -} - // unappliedChanges is a file source that handles an uncloned snapshot. type unappliedChanges struct { originalSnapshot *snapshot @@ -1684,8 +1660,6 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC s.mu.Lock() defer s.mu.Unlock() - checkSnapshotLocked(ctx, s) - newGen := s.view.session.cache.store.Generation(generationName(s.view, s.id+1)) bgCtx, cancel := context.WithCancel(bgCtx) result := &snapshot{ From 88325aa063540896255ba32a031e726c1fb0e545 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 16 Jun 2022 15:41:24 -0400 Subject: [PATCH 027/136] internal/memoize: add trace region for Handle.run The region name includes the type of the key, such as packageHandleKey or actionHandleKey, so we can separate these deferred computations into their own buckets. Change-Id: I0359127ccf47b158f353fae2bf74ba000668a40b Reviewed-on: https://go-review.googlesource.com/c/tools/+/412817 Run-TryBot: Alan Donovan gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Robert Findley --- internal/memoize/memoize.go | 65 +++++++++++++++++++------------------ 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index dec2fff6836..480b87f5ce9 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -14,6 +14,7 @@ import ( "flag" "fmt" "reflect" + "runtime/trace" "sync" "sync/atomic" @@ -316,40 +317,42 @@ func (h *Handle) run(ctx context.Context, g *Generation, arg Arg) (interface{}, // Make sure that the generation isn't destroyed while we're running in it. release := g.Acquire() go func() { - defer release() - // Just in case the function does something expensive without checking - // the context, double-check we're still alive. - if childCtx.Err() != nil { - return - } - v := function(childCtx, arg) - if childCtx.Err() != nil { - // It's possible that v was computed despite the context cancellation. In - // this case we should ensure that it is cleaned up. - if h.cleanup != nil && v != nil { - h.cleanup(v) + trace.WithRegion(childCtx, fmt.Sprintf("Handle.run %T", h.key), func() { + defer release() + // Just in case the function does something expensive without checking + // the context, double-check we're still alive. + if childCtx.Err() != nil { + return + } + v := function(childCtx, arg) + if childCtx.Err() != nil { + // It's possible that v was computed despite the context cancellation. In + // this case we should ensure that it is cleaned up. + if h.cleanup != nil && v != nil { + h.cleanup(v) + } + return } - return - } - h.mu.Lock() - defer h.mu.Unlock() - // It's theoretically possible that the handle has been cancelled out - // of the run that started us, and then started running again since we - // checked childCtx above. Even so, that should be harmless, since each - // run should produce the same results. - if h.state != stateRunning { - // v will never be used, so ensure that it is cleaned up. - if h.cleanup != nil && v != nil { - h.cleanup(v) + h.mu.Lock() + defer h.mu.Unlock() + // It's theoretically possible that the handle has been cancelled out + // of the run that started us, and then started running again since we + // checked childCtx above. Even so, that should be harmless, since each + // run should produce the same results. + if h.state != stateRunning { + // v will never be used, so ensure that it is cleaned up. + if h.cleanup != nil && v != nil { + h.cleanup(v) + } + return } - return - } - // At this point v will be cleaned up whenever h is destroyed. - h.value = v - h.function = nil - h.state = stateCompleted - close(h.done) + // At this point v will be cleaned up whenever h is destroyed. + h.value = v + h.function = nil + h.state = stateCompleted + close(h.done) + }) }() return h.wait(ctx) From e9870152b0e8539a2ef361f87257bc7db693a30b Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 16 Jun 2022 14:03:39 -0400 Subject: [PATCH 028/136] internal/lsp/cache: symbolize in parallel This change parallelizes the buildSymbolHandle().Get computation for each file, with 2xGOMAXPROCS goroutines, since it is a mix of I/O (read) and CPU (parse). (The symbolize AST walk happens in other goroutines.) This reduces the time for the source.WorkspaceSymbols trace task applied to kubernetes from 3981ms to 630ms (6x faster). Change-Id: I5f1ee4afc2f6b2dd752791a30d33a21f50180a9c Reviewed-on: https://go-review.googlesource.com/c/tools/+/412818 Reviewed-by: Robert Findley --- internal/lsp/cache/snapshot.go | 43 +++++++++++++++++++++------------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index bdb73e31dc0..9875ae4bd70 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -17,6 +17,7 @@ import ( "os" "path/filepath" "regexp" + "runtime" "sort" "strconv" "strings" @@ -25,6 +26,7 @@ import ( "golang.org/x/mod/modfile" "golang.org/x/mod/module" "golang.org/x/mod/semver" + "golang.org/x/sync/errgroup" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" "golang.org/x/tools/internal/event" @@ -994,26 +996,35 @@ func (s *snapshot) activePackageHandles(ctx context.Context) ([]*packageHandle, return phs, nil } +// Symbols extracts and returns the symbols for each file in all the snapshot's views. func (s *snapshot) Symbols(ctx context.Context) (map[span.URI][]source.Symbol, error) { - result := make(map[span.URI][]source.Symbol) - - // Keep going on errors, but log the first failure. Partial symbol results - // are better than no symbol results. - var firstErr error + // Keep going on errors, but log the first failure. + // Partial results are better than no symbol results. + var ( + group errgroup.Group + nprocs = 2 * runtime.GOMAXPROCS(-1) // symbolize is a mix of I/O and CPU + iolimit = make(chan struct{}, nprocs) // I/O limiting counting semaphore + resultMu sync.Mutex + result = make(map[span.URI][]source.Symbol) + ) for uri, f := range s.files { - sh := s.buildSymbolHandle(ctx, f) - v, err := sh.handle.Get(ctx, s.generation, s) - if err != nil { - if firstErr == nil { - firstErr = err + uri, f := uri, f + // TODO(adonovan): upgrade errgroup and use group.SetLimit(nprocs). + iolimit <- struct{}{} // acquire token + group.Go(func() error { + defer func() { <-iolimit }() // release token + v, err := s.buildSymbolHandle(ctx, f).handle.Get(ctx, s.generation, s) + if err != nil { + return err } - continue - } - data := v.(*symbolData) - result[uri] = data.symbols + resultMu.Lock() + result[uri] = v.(*symbolData).symbols + resultMu.Unlock() + return nil + }) } - if firstErr != nil { - event.Error(ctx, "getting snapshot symbols", firstErr) + if err := group.Wait(); err != nil { + event.Error(ctx, "getting snapshot symbols", err) } return result, nil } From 1e14d994d8902691ecce91c8f344cd228f8c7fb7 Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Mon, 13 Jun 2022 18:16:50 -0400 Subject: [PATCH 029/136] internal/lsp: add inlay hints for composite literal types Add inlay hints for composite literal types. This will show type information for composite literals with no explicit types. Example: {"hello", "goodbye"} For golang/go#52343 Change-Id: Ia1f03b82669387c864353b8033940759fa1128e7 Reviewed-on: https://go-review.googlesource.com/c/tools/+/411905 gopls-CI: kokoro Run-TryBot: Suzy Mueller Reviewed-by: Jamal Carvalho TryBot-Result: Gopher Robot --- internal/lsp/source/inlay_hint.go | 24 ++++++++++++++++--- .../testdata/inlay_hint/composite_literals.go | 14 ++++++++++- .../inlay_hint/composite_literals.go.golden | 20 ++++++++++++---- 3 files changed, 50 insertions(+), 8 deletions(-) diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go index 406e4ae80e8..af9e715c91a 100644 --- a/internal/lsp/source/inlay_hint.go +++ b/internal/lsp/source/inlay_hint.go @@ -47,7 +47,7 @@ func InlayHint(ctx context.Context, snapshot Snapshot, fh FileHandle, _ protocol case *ast.GenDecl: hints = append(hints, constantValues(n, tmap, info)...) case *ast.CompositeLit: - hints = append(hints, compositeLiterals(n, tmap, info)...) + hints = append(hints, compositeLiterals(n, tmap, info, &q)...) } return true }) @@ -181,17 +181,35 @@ func constantValues(node *ast.GenDecl, tmap *lsppos.TokenMapper, info *types.Inf return hints } -func compositeLiterals(node *ast.CompositeLit, tmap *lsppos.TokenMapper, info *types.Info) []protocol.InlayHint { +func compositeLiterals(node *ast.CompositeLit, tmap *lsppos.TokenMapper, info *types.Info, q *types.Qualifier) []protocol.InlayHint { typ := info.TypeOf(node) if typ == nil { return nil } + + prefix := "" + if t, ok := typ.(*types.Pointer); ok { + typ = t.Elem() + prefix = "&" + } + strct, ok := typ.Underlying().(*types.Struct) if !ok { return nil } var hints []protocol.InlayHint + if node.Type == nil { + // The type for this struct is implicit, add an inlay hint. + if start, ok := tmap.Position(node.Lbrace); ok { + hints = append(hints, protocol.InlayHint{ + Position: &start, + Label: buildLabel(fmt.Sprintf("%s%s", prefix, types.TypeString(typ, *q))), + Kind: protocol.Type, + }) + } + } + for i, v := range node.Elts { if _, ok := v.(*ast.KeyValueExpr); !ok { start, ok := tmap.Position(v.Pos()) @@ -216,7 +234,7 @@ func buildLabel(s string) []protocol.InlayHintLabelPart { label := protocol.InlayHintLabelPart{ Value: s, } - if len(s) > maxLabelLength { + if len(s) > maxLabelLength+len("...") { label.Value = s[:maxLabelLength] + "..." label.Tooltip = s } diff --git a/internal/lsp/testdata/inlay_hint/composite_literals.go b/internal/lsp/testdata/inlay_hint/composite_literals.go index 7eeed03e81a..b05c95ec800 100644 --- a/internal/lsp/testdata/inlay_hint/composite_literals.go +++ b/internal/lsp/testdata/inlay_hint/composite_literals.go @@ -6,7 +6,19 @@ func fieldNames() { for _, c := range []struct { in, want string }{ - {"Hello, world", "dlrow ,olleH"}, + struct{ in, want string }{"Hello, world", "dlrow ,olleH"}, + {"Hello, 世界", "界世 ,olleH"}, + {"", ""}, + } { + fmt.Println(c.in == c.want) + } +} + +func fieldNamesPointers() { + for _, c := range []*struct { + in, want string + }{ + &struct{ in, want string }{"Hello, world", "dlrow ,olleH"}, {"Hello, 世界", "界世 ,olleH"}, {"", ""}, } { diff --git a/internal/lsp/testdata/inlay_hint/composite_literals.go.golden b/internal/lsp/testdata/inlay_hint/composite_literals.go.golden index ecff7800387..eb2febdb6a3 100644 --- a/internal/lsp/testdata/inlay_hint/composite_literals.go.golden +++ b/internal/lsp/testdata/inlay_hint/composite_literals.go.golden @@ -4,12 +4,24 @@ package inlayHint //@inlayHint("package") import "fmt" func fieldNames() { - for _< int>, c< struct{in string; want strin...> := range []struct { + for _< int>, c< struct{in string; want string}> := range []struct { in, want string }{ - {"Hello, world", "dlrow ,olleH"}, - {"Hello, 世界", "界世 ,olleH"}, - {"", ""}, + struct{ in, want string }{"Hello, world", "dlrow ,olleH"}, + {"Hello, 世界", "界世 ,olleH"}, + {"", ""}, + } { + fmt.Println(c.in == c.want) + } +} + +func fieldNamesPointers() { + for _< int>, c< *struct{in string; want string}> := range []*struct { + in, want string + }{ + &struct{ in, want string }{"Hello, world", "dlrow ,olleH"}, + <&struct{in string; want string}>{"Hello, 世界", "界世 ,olleH"}, + <&struct{in string; want string}>{"", ""}, } { fmt.Println(c.in == c.want) } From 381ac87aae563aca1b0fcf5ced49d661e1dba9f4 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 16 Jun 2022 16:22:28 -0400 Subject: [PATCH 030/136] internal/lsp/debug: reduce critical sections in trace This change reduces the sizes of the critical section in traces.ProcessEvent, in particular moving allocations ahead of Lock. This reduces the contention according to the trace profiler. See https://go-review.googlesource.com/c/go/+/411909 for another reduction in contention. The largest remaining contention is Handle.Get, which thousands of goroutines wait for because we initiate typechecking top down. (Second attempt at https://go-review.googlesource.com/c/tools/+/411910, reverted in https://go-review.googlesource.com/c/tools/+/412694. The changes to Generation.Bind have been dropped.) Change-Id: Ia9050c97bd12d2d75055f8d1dfcda3ef1f5ad334 Reviewed-on: https://go-review.googlesource.com/c/tools/+/412820 Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Hyang-Ah Hana Kim --- internal/lsp/cache/parse.go | 2 +- internal/lsp/debug/trace.go | 39 ++++++++++++++++++++++--------------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go index 668c437f5c9..ab55743ccf0 100644 --- a/internal/lsp/cache/parse.go +++ b/internal/lsp/cache/parse.go @@ -278,7 +278,7 @@ func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mod tok := fset.File(file.Pos()) if tok == nil { - // file.Pos is the location of the package declaration. If there was + // file.Pos is the location of the package declaration (issue #53202). If there was // none, we can't find the token.File that ParseFile created, and we // have no choice but to recreate it. tok = fset.AddFile(fh.URI().Filename(), -1, len(src)) diff --git a/internal/lsp/debug/trace.go b/internal/lsp/debug/trace.go index ca612867a5d..bb402cfaa8f 100644 --- a/internal/lsp/debug/trace.go +++ b/internal/lsp/debug/trace.go @@ -119,8 +119,6 @@ func formatEvent(ctx context.Context, ev core.Event, lm label.Map) string { } func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) context.Context { - t.mu.Lock() - defer t.mu.Unlock() span := export.GetSpan(ctx) if span == nil { return ctx @@ -128,11 +126,8 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) switch { case event.IsStart(ev): - if t.sets == nil { - t.sets = make(map[string]*traceSet) - t.unfinished = make(map[export.SpanContext]*traceData) - } - // just starting, add it to the unfinished map + // Just starting: add it to the unfinished map. + // Allocate before the critical section. td := &traceData{ TraceID: span.ID.TraceID, SpanID: span.ID.SpanID, @@ -141,6 +136,13 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) Start: span.Start().At(), Tags: renderLabels(span.Start()), } + + t.mu.Lock() + defer t.mu.Unlock() + if t.sets == nil { + t.sets = make(map[string]*traceSet) + t.unfinished = make(map[export.SpanContext]*traceData) + } t.unfinished[span.ID] = td // and wire up parents if we have them if !span.ParentID.IsValid() { @@ -155,7 +157,19 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) parent.Children = append(parent.Children, td) case event.IsEnd(ev): - // finishing, must be already in the map + // Finishing: must be already in the map. + // Allocate events before the critical section. + events := span.Events() + tdEvents := make([]traceEvent, len(events)) + for i, event := range events { + tdEvents[i] = traceEvent{ + Time: event.At(), + Tags: renderLabels(event), + } + } + + t.mu.Lock() + defer t.mu.Unlock() td, found := t.unfinished[span.ID] if !found { return ctx // if this happens we are in a bad place @@ -164,14 +178,7 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) td.Finish = span.Finish().At() td.Duration = span.Finish().At().Sub(span.Start().At()) - events := span.Events() - td.Events = make([]traceEvent, len(events)) - for i, event := range events { - td.Events[i] = traceEvent{ - Time: event.At(), - Tags: renderLabels(event), - } - } + td.Events = tdEvents set, ok := t.sets[span.Name] if !ok { From 70ccf57e4b75514eaa0d0b35525b2b5fd63e489a Mon Sep 17 00:00:00 2001 From: aarzilli Date: Fri, 3 Dec 2021 10:51:49 +0100 Subject: [PATCH 031/136] go/packages: fix loading single file when outside of GOPATH, module Allows a 'file=' query to load a single file even when it is outside of GOPATH or a module. Fixes golang/go#49949 Change-Id: I519f1412923dfc1d2504ec49620d10c823e5c0dc Reviewed-on: https://go-review.googlesource.com/c/tools/+/369014 TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Robert Findley Reviewed-by: Hyang-Ah Hana Kim --- go/packages/golist.go | 7 ++++--- go/packages/packages_test.go | 25 +++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/go/packages/golist.go b/go/packages/golist.go index 50533995a65..de881562de1 100644 --- a/go/packages/golist.go +++ b/go/packages/golist.go @@ -302,11 +302,12 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries } dirResponse, err := state.createDriverResponse(pattern) - // If there was an error loading the package, or the package is returned - // with errors, try to load the file as an ad-hoc package. + // If there was an error loading the package, or no packages are returned, + // or the package is returned with errors, try to load the file as an + // ad-hoc package. // Usually the error will appear in a returned package, but may not if we're // in module mode and the ad-hoc is located outside a module. - if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && len(dirResponse.Packages[0].Errors) == 1 { var queryErr error if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { diff --git a/go/packages/packages_test.go b/go/packages/packages_test.go index 796edb6b7b4..647f3a366df 100644 --- a/go/packages/packages_test.go +++ b/go/packages/packages_test.go @@ -2709,6 +2709,31 @@ func TestEmptyEnvironment(t *testing.T) { } } +func TestPackageLoadSingleFile(t *testing.T) { + tmp, err := ioutil.TempDir("", "a") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + + filename := filepath.Join(tmp, "a.go") + + if err := ioutil.WriteFile(filename, []byte(`package main; func main() { println("hello world") }`), 0775); err != nil { + t.Fatal(err) + } + + pkgs, err := packages.Load(&packages.Config{Mode: packages.LoadSyntax, Dir: tmp}, "file="+filename) + if err != nil { + t.Fatalf("could not load package: %v", err) + } + if len(pkgs) != 1 { + t.Fatalf("expected one package to be loaded, got %d", len(pkgs)) + } + if len(pkgs[0].CompiledGoFiles) != 1 || pkgs[0].CompiledGoFiles[0] != filename { + t.Fatalf("expected one compiled go file (%q), got %v", filename, pkgs[0].CompiledGoFiles) + } +} + func errorMessages(errors []packages.Error) []string { var msgs []string for _, err := range errors { From 641b30b3f4033251a94b65a87e8baca26ab1a7a4 Mon Sep 17 00:00:00 2001 From: Jamal Carvalho Date: Fri, 17 Jun 2022 20:11:18 +0000 Subject: [PATCH 032/136] internal/lsp: add inlay hints for inferred type params This will show inferred type information for generic function call expressions. Example: SumNumbers<[string, int64]>(ints) For golang/go#52343 Change-Id: I05595f236626e8fb3666af5160611e074e8265a4 Reviewed-on: https://go-review.googlesource.com/c/tools/+/412994 Reviewed-by: Suzy Mueller TryBot-Result: Gopher Robot gopls-CI: kokoro Run-TryBot: Jamal Carvalho --- internal/lsp/source/inlay_hint.go | 29 ++++++++++++ .../lsp/testdata/inlay_hint/type_params.go | 45 ++++++++++++++++++ .../testdata/inlay_hint/type_params.go.golden | 47 +++++++++++++++++++ .../lsp/testdata/summary_go1.18.txt.golden | 2 +- 4 files changed, 122 insertions(+), 1 deletion(-) create mode 100644 internal/lsp/testdata/inlay_hint/type_params.go create mode 100644 internal/lsp/testdata/inlay_hint/type_params.go.golden diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go index af9e715c91a..8fe46b27a89 100644 --- a/internal/lsp/source/inlay_hint.go +++ b/internal/lsp/source/inlay_hint.go @@ -16,6 +16,7 @@ import ( "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/lsp/lsppos" "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/internal/typeparams" ) const ( @@ -40,6 +41,7 @@ func InlayHint(ctx context.Context, snapshot Snapshot, fh FileHandle, _ protocol switch n := node.(type) { case *ast.CallExpr: hints = append(hints, parameterNames(n, tmap, info)...) + hints = append(hints, funcTypeParams(n, tmap, info)...) case *ast.AssignStmt: hints = append(hints, assignVariableTypes(n, tmap, info, &q)...) case *ast.RangeStmt: @@ -90,6 +92,33 @@ func parameterNames(node *ast.CallExpr, tmap *lsppos.TokenMapper, info *types.In return hints } +func funcTypeParams(node *ast.CallExpr, tmap *lsppos.TokenMapper, info *types.Info) []protocol.InlayHint { + id, ok := node.Fun.(*ast.Ident) + if !ok { + return nil + } + inst := typeparams.GetInstances(info)[id] + if inst.TypeArgs == nil { + return nil + } + start, ok := tmap.Position(id.End()) + if !ok { + return nil + } + var args []string + for i := 0; i < inst.TypeArgs.Len(); i++ { + args = append(args, inst.TypeArgs.At(i).String()) + } + if len(args) == 0 { + return nil + } + return []protocol.InlayHint{{ + Position: &start, + Label: buildLabel("[" + strings.Join(args, ", ") + "]"), + Kind: protocol.Type, + }} +} + func assignVariableTypes(node *ast.AssignStmt, tmap *lsppos.TokenMapper, info *types.Info, q *types.Qualifier) []protocol.InlayHint { if node.Tok != token.DEFINE { return nil diff --git a/internal/lsp/testdata/inlay_hint/type_params.go b/internal/lsp/testdata/inlay_hint/type_params.go new file mode 100644 index 00000000000..3a3c7e53734 --- /dev/null +++ b/internal/lsp/testdata/inlay_hint/type_params.go @@ -0,0 +1,45 @@ +//go:build go1.18 +// +build go1.18 + +package inlayHint //@inlayHint("package") + +func main() { + ints := map[string]int64{ + "first": 34, + "second": 12, + } + + floats := map[string]float64{ + "first": 35.98, + "second": 26.99, + } + + SumIntsOrFloats[string, int64](ints) + SumIntsOrFloats[string, float64](floats) + + SumIntsOrFloats(ints) + SumIntsOrFloats(floats) + + SumNumbers(ints) + SumNumbers(floats) +} + +type Number interface { + int64 | float64 +} + +func SumIntsOrFloats[K comparable, V int64 | float64](m map[K]V) V { + var s V + for _, v := range m { + s += v + } + return s +} + +func SumNumbers[K comparable, V Number](m map[K]V) V { + var s V + for _, v := range m { + s += v + } + return s +} diff --git a/internal/lsp/testdata/inlay_hint/type_params.go.golden b/internal/lsp/testdata/inlay_hint/type_params.go.golden new file mode 100644 index 00000000000..4819963b7a4 --- /dev/null +++ b/internal/lsp/testdata/inlay_hint/type_params.go.golden @@ -0,0 +1,47 @@ +-- inlayHint -- +//go:build go1.18 +// +build go1.18 + +package inlayHint //@inlayHint("package") + +func main() { + ints< map[string]int64> := map[string]int64{ + "first": 34, + "second": 12, + } + + floats< map[string]float64> := map[string]float64{ + "first": 35.98, + "second": 26.99, + } + + SumIntsOrFloats[string, int64](ints) + SumIntsOrFloats[string, float64](floats) + + SumIntsOrFloats<[string, int64]>(ints) + SumIntsOrFloats<[string, float64]>(floats) + + SumNumbers<[string, int64]>(ints) + SumNumbers<[string, float64]>(floats) +} + +type Number interface { + int64 | float64 +} + +func SumIntsOrFloats[K comparable, V int64 | float64](m map[K]V) V { + var s V + for _< K>, v< V> := range m { + s += v + } + return s +} + +func SumNumbers[K comparable, V Number](m map[K]V) V { + var s V + for _< K>, v< V> := range m { + s += v + } + return s +} + diff --git a/internal/lsp/testdata/summary_go1.18.txt.golden b/internal/lsp/testdata/summary_go1.18.txt.golden index 28a2672db50..7e8da12d764 100644 --- a/internal/lsp/testdata/summary_go1.18.txt.golden +++ b/internal/lsp/testdata/summary_go1.18.txt.golden @@ -19,7 +19,7 @@ MethodExtractionCount = 6 DefinitionsCount = 108 TypeDefinitionsCount = 18 HighlightsCount = 69 -InlayHintsCount = 4 +InlayHintsCount = 5 ReferencesCount = 27 RenamesCount = 48 PrepareRenamesCount = 7 From a1303c83f37e72ec45811934376e94ab7fbfd3dd Mon Sep 17 00:00:00 2001 From: Jamal Carvalho Date: Tue, 21 Jun 2022 14:44:14 +0000 Subject: [PATCH 033/136] internal/lsp: remove tooltip from inlay hints The tooltip for truncated inlay hint labels is redundant with the hover state of the target identifier. This matches the behavior of inlay hint implementations in other languages. Change-Id: I209054f8c65df504cae67121e3cbc3eacaf02710 Reviewed-on: https://go-review.googlesource.com/c/tools/+/413417 Run-TryBot: Jamal Carvalho Reviewed-by: Suzy Mueller gopls-CI: kokoro TryBot-Result: Gopher Robot --- internal/lsp/source/inlay_hint.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go index 8fe46b27a89..8369681003a 100644 --- a/internal/lsp/source/inlay_hint.go +++ b/internal/lsp/source/inlay_hint.go @@ -265,7 +265,6 @@ func buildLabel(s string) []protocol.InlayHintLabelPart { } if len(s) > maxLabelLength+len("...") { label.Value = s[:maxLabelLength] + "..." - label.Tooltip = s } return []protocol.InlayHintLabelPart{label} } From 59bd4faed9b3724797fdffa7c1476ff8fb8a4878 Mon Sep 17 00:00:00 2001 From: Dylan Le Date: Wed, 25 May 2022 15:59:38 -0400 Subject: [PATCH 034/136] internal/lsp: find references to package Update References to detect if the package is referenced and a regtest to test within and external package references. Updates golang/go#41567 Change-Id: I607a47bf15f1c9f8236336f795fcef081db49d6a Reviewed-on: https://go-review.googlesource.com/c/tools/+/408714 Reviewed-by: Robert Findley Run-TryBot: Dylan Le gopls-CI: kokoro --- .../internal/regtest/misc/references_test.go | 88 +++++++++++++++++++ internal/lsp/source/references.go | 60 +++++++++++++ 2 files changed, 148 insertions(+) diff --git a/gopls/internal/regtest/misc/references_test.go b/gopls/internal/regtest/misc/references_test.go index 768251680f9..de2e9b97fd8 100644 --- a/gopls/internal/regtest/misc/references_test.go +++ b/gopls/internal/regtest/misc/references_test.go @@ -5,6 +5,8 @@ package misc import ( + "fmt" + "strings" "testing" . "golang.org/x/tools/internal/lsp/regtest" @@ -81,3 +83,89 @@ func _() { } }) } + +func TestPackageReferences(t *testing.T) { + tests := []struct { + packageName string + wantRefCount int + wantFiles []string + }{ + { + "lib1", + 3, + []string{ + "main.go", + "lib1/a.go", + "lib1/b.go", + }, + }, + { + "lib2", + 2, + []string{ + "main.go", + "lib2/a.go", + }, + }, + } + + const files = ` +-- go.mod -- +module mod.com + +go 1.18 +-- lib1/a.go -- +package lib1 + +const A = 1 + +-- lib1/b.go -- +package lib1 + +const B = 1 + +-- lib2/a.go -- +package lib2 + +const C = 1 + +-- main.go -- +package main + +import ( + "mod.com/lib1" + "mod.com/lib2" +) + +func main() { + println("Hello") +} +` + Run(t, files, func(t *testing.T, env *Env) { + for _, test := range tests { + f := fmt.Sprintf("%s/a.go", test.packageName) + env.OpenFile(f) + pos := env.RegexpSearch(f, test.packageName) + refs := env.References(fmt.Sprintf("%s/a.go", test.packageName), pos) + if len(refs) != test.wantRefCount { + t.Fatalf("got %v reference(s), want %d", len(refs), test.wantRefCount) + } + var refURIs []string + for _, ref := range refs { + refURIs = append(refURIs, string(ref.URI)) + } + for _, base := range test.wantFiles { + hasBase := false + for _, ref := range refURIs { + if strings.HasSuffix(ref, base) { + hasBase = true + break + } + } + if !hasBase { + t.Fatalf("got [%v], want reference ends with \"%v\"", strings.Join(refURIs, ","), base) + } + } + } + }) +} diff --git a/internal/lsp/source/references.go b/internal/lsp/source/references.go index 3541600b207..85bf41a21b0 100644 --- a/internal/lsp/source/references.go +++ b/internal/lsp/source/references.go @@ -9,12 +9,15 @@ import ( "errors" "fmt" "go/ast" + "go/token" "go/types" "sort" + "strconv" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/lsp/protocol" + "golang.org/x/tools/internal/lsp/safetoken" "golang.org/x/tools/internal/span" ) @@ -34,6 +37,63 @@ func References(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Posit ctx, done := event.Start(ctx, "source.References") defer done() + // Find position of the package name declaration + pgf, err := s.ParseGo(ctx, f, ParseFull) + if err != nil { + return nil, err + } + + cursorOffset, err := pgf.Mapper.Offset(pp) + if err != nil { + return nil, err + } + + packageNameStart, err := safetoken.Offset(pgf.Tok, pgf.File.Name.Pos()) + if err != nil { + return nil, err + } + + packageNameEnd, err := safetoken.Offset(pgf.Tok, pgf.File.Name.End()) + if err != nil { + return nil, err + } + + if packageNameStart <= cursorOffset && cursorOffset < packageNameEnd { + renamingPkg, err := s.PackageForFile(ctx, f.URI(), TypecheckAll, NarrowestPackage) + if err != nil { + return nil, err + } + + // Find external references to the package. + rdeps, err := s.GetReverseDependencies(ctx, renamingPkg.ID()) + if err != nil { + return nil, err + } + var refs []*ReferenceInfo + for _, dep := range rdeps { + for _, f := range dep.CompiledGoFiles() { + for _, imp := range f.File.Imports { + if path, err := strconv.Unquote(imp.Path.Value); err == nil && path == renamingPkg.PkgPath() { + refs = append(refs, &ReferenceInfo{ + Name: pgf.File.Name.Name, + MappedRange: NewMappedRange(s.FileSet(), f.Mapper, imp.Pos(), imp.End()), + }) + } + } + } + } + + // Find internal references to the package within the package itself + for _, f := range renamingPkg.CompiledGoFiles() { + refs = append(refs, &ReferenceInfo{ + Name: pgf.File.Name.Name, + MappedRange: NewMappedRange(s.FileSet(), f.Mapper, f.File.Name.Pos(), f.File.Name.End()), + }) + } + + return refs, nil + } + qualifiedObjs, err := qualifiedObjsAtProtocolPos(ctx, s, f.URI(), pp) // Don't return references for builtin types. if errors.Is(err, errBuiltin) { From 63d8015eb823e14eb3184232334cf911e5625815 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 16 Jun 2022 17:58:27 -0400 Subject: [PATCH 035/136] internal/lsp/cache: minor simplifications to Symbols Minor cleanups based on studying the code in preparation for saving a persistent index: - Remove unused error result from Symbols method. - Remove unnecessary fields from symbolHandle. - Add various explanatory comments. - In workspace_symbols.go: - separate extract and match phases of collectSymbols clearly - replace symbolCollector and matchWorker types by simple parameters - combine loops (roots, buildMatcher) - move buildMatcher creation down to where it is needed. Change-Id: Ifcad61a9a8c7d70f573024bcfa76d476552ee428 Reviewed-on: https://go-review.googlesource.com/c/tools/+/412822 Reviewed-by: Robert Findley --- internal/lsp/cache/cache.go | 1 + internal/lsp/cache/snapshot.go | 13 +- internal/lsp/cache/symbols.go | 30 ++--- internal/lsp/source/view.go | 2 +- internal/lsp/source/workspace_symbol.go | 162 ++++++++++-------------- 5 files changed, 88 insertions(+), 120 deletions(-) diff --git a/internal/lsp/cache/cache.go b/internal/lsp/cache/cache.go index 2a8a169d510..3640272688f 100644 --- a/internal/lsp/cache/cache.go +++ b/internal/lsp/cache/cache.go @@ -68,6 +68,7 @@ func (h *fileHandle) Saved() bool { return true } +// GetFile stats and (maybe) reads the file, updates the cache, and returns it. func (c *Cache) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) { return c.getFile(ctx, uri) } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 9875ae4bd70..32681735b28 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -997,9 +997,7 @@ func (s *snapshot) activePackageHandles(ctx context.Context) ([]*packageHandle, } // Symbols extracts and returns the symbols for each file in all the snapshot's views. -func (s *snapshot) Symbols(ctx context.Context) (map[span.URI][]source.Symbol, error) { - // Keep going on errors, but log the first failure. - // Partial results are better than no symbol results. +func (s *snapshot) Symbols(ctx context.Context) map[span.URI][]source.Symbol { var ( group errgroup.Group nprocs = 2 * runtime.GOMAXPROCS(-1) // symbolize is a mix of I/O and CPU @@ -1023,10 +1021,12 @@ func (s *snapshot) Symbols(ctx context.Context) (map[span.URI][]source.Symbol, e return nil }) } + // Keep going on errors, but log the first failure. + // Partial results are better than no symbol results. if err := group.Wait(); err != nil { event.Error(ctx, "getting snapshot symbols", err) } - return result, nil + return result } func (s *snapshot) MetadataForFile(ctx context.Context, uri span.URI) ([]source.Metadata, error) { @@ -1137,11 +1137,10 @@ func (s *snapshot) getSymbolHandle(uri span.URI) *symbolHandle { return s.symbols[uri] } -func (s *snapshot) addSymbolHandle(sh *symbolHandle) *symbolHandle { +func (s *snapshot) addSymbolHandle(uri span.URI, sh *symbolHandle) *symbolHandle { s.mu.Lock() defer s.mu.Unlock() - uri := sh.fh.URI() // If the package handle has already been cached, // return the cached handle instead of overriding it. if sh, ok := s.symbols[uri]; ok { @@ -1338,7 +1337,7 @@ func (s *snapshot) getFileLocked(ctx context.Context, f *fileBase) (source.Versi return fh, nil } - fh, err := s.view.session.cache.getFile(ctx, f.URI()) + fh, err := s.view.session.cache.getFile(ctx, f.URI()) // read the file if err != nil { return nil, err } diff --git a/internal/lsp/cache/symbols.go b/internal/lsp/cache/symbols.go index bf5e00b1648..d56a036ff6e 100644 --- a/internal/lsp/cache/symbols.go +++ b/internal/lsp/cache/symbols.go @@ -18,13 +18,9 @@ import ( "golang.org/x/tools/internal/memoize" ) +// A symbolHandle contains a handle to the result of symbolizing a file. type symbolHandle struct { handle *memoize.Handle - - fh source.FileHandle - - // key is the hashed key for the package. - key symbolHandleKey } // symbolData contains the data produced by extracting symbols from a file. @@ -33,30 +29,30 @@ type symbolData struct { err error } -type symbolHandleKey source.Hash - +// buildSymbolHandle returns a handle to the result of symbolizing a file, +// if necessary creating it and saving it in the snapshot. func (s *snapshot) buildSymbolHandle(ctx context.Context, fh source.FileHandle) *symbolHandle { if h := s.getSymbolHandle(fh.URI()); h != nil { return h } + type symbolHandleKey source.Hash key := symbolHandleKey(fh.FileIdentity().Hash) - h := s.generation.Bind(key, func(_ context.Context, arg memoize.Arg) interface{} { + handle := s.generation.Bind(key, func(_ context.Context, arg memoize.Arg) interface{} { snapshot := arg.(*snapshot) - data := &symbolData{} - data.symbols, data.err = symbolize(snapshot, fh) - return data + symbols, err := symbolize(snapshot, fh) + return &symbolData{symbols, err} }, nil) sh := &symbolHandle{ - handle: h, - fh: fh, - key: key, + handle: handle, } - return s.addSymbolHandle(sh) + + return s.addSymbolHandle(fh.URI(), sh) } -// symbolize extracts symbols from a file. It uses a parsed file already -// present in the cache but otherwise does not populate the cache. +// symbolize reads and parses a file and extracts symbols from it. +// It may use a parsed file already present in the cache but +// otherwise does not populate the cache. func symbolize(snapshot *snapshot, fh source.FileHandle) ([]source.Symbol, error) { src, err := fh.Read() if err != nil { diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go index 7960b0c0368..0d8d661d60c 100644 --- a/internal/lsp/source/view.go +++ b/internal/lsp/source/view.go @@ -175,7 +175,7 @@ type Snapshot interface { ActivePackages(ctx context.Context) ([]Package, error) // Symbols returns all symbols in the snapshot. - Symbols(ctx context.Context) (map[span.URI][]Symbol, error) + Symbols(ctx context.Context) map[span.URI][]Symbol // Metadata returns package metadata associated with the given file URI. MetadataForFile(ctx context.Context, uri span.URI) ([]Metadata, error) diff --git a/internal/lsp/source/workspace_symbol.go b/internal/lsp/source/workspace_symbol.go index 11e22d17bea..c7cfe5c9ef8 100644 --- a/internal/lsp/source/workspace_symbol.go +++ b/internal/lsp/source/workspace_symbol.go @@ -50,14 +50,26 @@ const maxSymbols = 100 // with a different configured SymbolMatcher per View. Therefore we assume that // Session level configuration will define the SymbolMatcher to be used for the // WorkspaceSymbols method. -func WorkspaceSymbols(ctx context.Context, matcherType SymbolMatcher, style SymbolStyle, views []View, query string) ([]protocol.SymbolInformation, error) { +func WorkspaceSymbols(ctx context.Context, matcher SymbolMatcher, style SymbolStyle, views []View, query string) ([]protocol.SymbolInformation, error) { ctx, done := event.Start(ctx, "source.WorkspaceSymbols") defer done() if query == "" { return nil, nil } - sc := newSymbolCollector(matcherType, style, query) - return sc.walk(ctx, views) + + var s symbolizer + switch style { + case DynamicSymbols: + s = dynamicSymbolMatch + case FullyQualifiedSymbols: + s = fullyQualifiedSymbolMatch + case PackageQualifiedSymbols: + s = packageSymbolMatch + default: + panic(fmt.Errorf("unknown symbol style: %v", style)) + } + + return collectSymbols(ctx, views, matcher, s, query) } // A matcherFunc returns the index and score of a symbol match. @@ -136,43 +148,6 @@ func packageSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]strin return nil, 0 } -// symbolCollector holds context as we walk Packages, gathering symbols that -// match a given query. -// -// How we match symbols is parameterized by two interfaces: -// - A matcherFunc determines how well a string symbol matches a query. It -// returns a non-negative score indicating the quality of the match. A score -// of zero indicates no match. -// - A symbolizer determines how we extract the symbol for an object. This -// enables the 'symbolStyle' configuration option. -type symbolCollector struct { - // These types parameterize the symbol-matching pass. - matchers []matcherFunc - symbolizer symbolizer - - symbolStore -} - -func newSymbolCollector(matcher SymbolMatcher, style SymbolStyle, query string) *symbolCollector { - var s symbolizer - switch style { - case DynamicSymbols: - s = dynamicSymbolMatch - case FullyQualifiedSymbols: - s = fullyQualifiedSymbolMatch - case PackageQualifiedSymbols: - s = packageSymbolMatch - default: - panic(fmt.Errorf("unknown symbol style: %v", style)) - } - sc := &symbolCollector{symbolizer: s} - sc.matchers = make([]matcherFunc, runtime.GOMAXPROCS(-1)) - for i := range sc.matchers { - sc.matchers[i] = buildMatcher(matcher, query) - } - return sc -} - func buildMatcher(matcher SymbolMatcher, query string) matcherFunc { switch matcher { case SymbolFuzzy: @@ -302,36 +277,42 @@ func (c comboMatcher) match(chunks []string) (int, float64) { return first, score } -func (sc *symbolCollector) walk(ctx context.Context, views []View) ([]protocol.SymbolInformation, error) { - // Use the root view URIs for determining (lexically) whether a uri is in any - // open workspace. - var roots []string - for _, v := range views { - roots = append(roots, strings.TrimRight(string(v.Folder()), "/")) - } - - results := make(chan *symbolStore) - matcherlen := len(sc.matchers) - files := make(map[span.URI]symbolFile) +// collectSymbols calls snapshot.Symbols to walk the syntax trees of +// all files in the views' current snapshots, and returns a sorted, +// scored list of symbols that best match the parameters. +// +// How it matches symbols is parameterized by two interfaces: +// - A matcherFunc determines how well a string symbol matches a query. It +// returns a non-negative score indicating the quality of the match. A score +// of zero indicates no match. +// - A symbolizer determines how we extract the symbol for an object. This +// enables the 'symbolStyle' configuration option. +// +func collectSymbols(ctx context.Context, views []View, matcherType SymbolMatcher, symbolizer symbolizer, query string) ([]protocol.SymbolInformation, error) { + // Extract symbols from all files. + var work []symbolFile + var roots []string + seen := make(map[span.URI]bool) + // TODO(adonovan): opt: parallelize this loop? How often is len > 1? for _, v := range views { snapshot, release := v.Snapshot(ctx) defer release() - psyms, err := snapshot.Symbols(ctx) - if err != nil { - return nil, err - } + + // Use the root view URIs for determining (lexically) + // whether a URI is in any open workspace. + roots = append(roots, strings.TrimRight(string(v.Folder()), "/")) filters := v.Options().DirectoryFilters folder := filepath.ToSlash(v.Folder().Filename()) - for uri, syms := range psyms { + for uri, syms := range snapshot.Symbols(ctx) { norm := filepath.ToSlash(uri.Filename()) nm := strings.TrimPrefix(norm, folder) if FiltersDisallow(nm, filters) { continue } // Only scan each file once. - if _, ok := files[uri]; ok { + if seen[uri] { continue } mds, err := snapshot.MetadataForFile(ctx, uri) @@ -343,39 +324,37 @@ func (sc *symbolCollector) walk(ctx context.Context, views []View) ([]protocol.S // TODO: should use the bug reporting API continue } - files[uri] = symbolFile{uri, mds[0], syms} + seen[uri] = true + work = append(work, symbolFile{uri, mds[0], syms}) } } - var work []symbolFile - for _, f := range files { - work = append(work, f) - } - - // Compute matches concurrently. Each symbolWorker has its own symbolStore, + // Match symbols in parallel. + // Each worker has its own symbolStore, // which we merge at the end. - for i, matcher := range sc.matchers { - go func(i int, matcher matcherFunc) { - w := &symbolWorker{ - symbolizer: sc.symbolizer, - matcher: matcher, - ss: &symbolStore{}, - roots: roots, - } - for j := i; j < len(work); j += matcherlen { - w.matchFile(work[j]) + nmatchers := runtime.GOMAXPROCS(-1) // matching is CPU bound + results := make(chan *symbolStore) + for i := 0; i < nmatchers; i++ { + go func(i int) { + matcher := buildMatcher(matcherType, query) + store := new(symbolStore) + // Assign files to workers in round-robin fashion. + for j := i; j < len(work); j += nmatchers { + matchFile(store, symbolizer, matcher, roots, work[j]) } - results <- w.ss - }(i, matcher) + results <- store + }(i) } - for i := 0; i < matcherlen; i++ { - ss := <-results - for _, si := range ss.res { - sc.store(si) + // Gather and merge results as they arrive. + var unified symbolStore + for i := 0; i < nmatchers; i++ { + store := <-results + for _, syms := range store.res { + unified.store(syms) } } - return sc.results(), nil + return unified.results(), nil } // FilterDisallow is code from the body of cache.pathExcludedByFilter in cache/view.go @@ -407,20 +386,13 @@ type symbolFile struct { syms []Symbol } -// symbolWorker matches symbols and captures the highest scoring results. -type symbolWorker struct { - symbolizer symbolizer - matcher matcherFunc - ss *symbolStore - roots []string -} - -func (w *symbolWorker) matchFile(i symbolFile) { +// matchFile scans a symbol file and adds matching symbols to the store. +func matchFile(store *symbolStore, symbolizer symbolizer, matcher matcherFunc, roots []string, i symbolFile) { for _, sym := range i.syms { - symbolParts, score := w.symbolizer(sym.Name, i.md, w.matcher) + symbolParts, score := symbolizer(sym.Name, i.md, matcher) // Check if the score is too low before applying any downranking. - if w.ss.tooLow(score) { + if store.tooLow(score) { continue } @@ -463,7 +435,7 @@ func (w *symbolWorker) matchFile(i symbolFile) { } inWorkspace := false - for _, root := range w.roots { + for _, root := range roots { if strings.HasPrefix(string(i.uri), root) { inWorkspace = true break @@ -484,7 +456,7 @@ func (w *symbolWorker) matchFile(i symbolFile) { } score *= 1.0 - depth*depthFactor - if w.ss.tooLow(score) { + if store.tooLow(score) { continue } @@ -496,7 +468,7 @@ func (w *symbolWorker) matchFile(i symbolFile) { rng: sym.Range, container: i.md.PackagePath(), } - w.ss.store(si) + store.store(si) } } From cbb8e8e9232d6ad09d1b469391c0161db87be765 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 13 Jun 2022 15:09:32 -0400 Subject: [PATCH 036/136] internal/span: optimise URI.Filename to avoid allocation This change adds a fast-path check for the common case: "file:", lowercase, followed by a simple POSIX absolute file name without special characters. This function used to account for 1% of CPU on the DidChange benchmark (and I'm sure I've seen higher fractions on other tests--but perhaps that was before the clone optimizations?). It was tested by adding an assertion that it agrees with the slow path and running all our tests. Change-Id: I15492b8a317715468870b00041bf8f6b0bb53bb2 Reviewed-on: https://go-review.googlesource.com/c/tools/+/411900 TryBot-Result: Gopher Robot Reviewed-by: Robert Findley Run-TryBot: Alan Donovan gopls-CI: kokoro --- internal/span/uri.go | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/internal/span/uri.go b/internal/span/uri.go index f2b39ca424e..8132665d75f 100644 --- a/internal/span/uri.go +++ b/internal/span/uri.go @@ -35,13 +35,29 @@ func (uri URI) Filename() string { } func filename(uri URI) (string, error) { - // This function is frequently called and its cost is - // dominated by the allocation of a net.URL. - // TODO(adonovan): opt: replace by a bespoke parseFileURI - // function that doesn't allocate. if uri == "" { return "", nil } + + // This conservative check for the common case + // of a simple non-empty absolute POSIX filename + // avoids the allocation of a net.URL. + if strings.HasPrefix(string(uri), "file:///") { + rest := string(uri)[len("file://"):] // leave one slash + for i := 0; i < len(rest); i++ { + b := rest[i] + // Reject these cases: + if b < ' ' || b == 0x7f || // control character + b == '%' || b == '+' || // URI escape + b == ':' || // Windows drive letter + b == '@' || b == '&' || b == '?' { // authority or query + goto slow + } + } + return rest, nil + } +slow: + u, err := url.ParseRequestURI(string(uri)) if err != nil { return "", err @@ -54,6 +70,7 @@ func filename(uri URI) (string, error) { if isWindowsDriveURIPath(u.Path) { u.Path = strings.ToUpper(string(u.Path[1])) + u.Path[2:] } + return u.Path, nil } From a2de63544e7a13883025fb60af623a1e787df32e Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Tue, 12 Apr 2022 09:45:50 -0400 Subject: [PATCH 037/136] internal/lsp/cache: honor the go.work for computing workspace packages When using Go workspaces, the go.work file should be used to determine which packages are workspace packages. For golang/go#48929 Change-Id: I1a8753ab7887daf193e093fca5070b4cc250a245 Reviewed-on: https://go-review.googlesource.com/c/tools/+/400822 Run-TryBot: Robert Findley gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan --- .../regtest/workspace/workspace_test.go | 64 +++++++++ internal/lsp/cache/load.go | 128 ++++++++++++++---- internal/lsp/cache/metadata.go | 7 - internal/lsp/cache/session.go | 2 + internal/lsp/cache/snapshot.go | 23 +++- internal/lsp/cache/view.go | 19 ++- internal/lsp/diagnostics.go | 2 + internal/lsp/regtest/expectation.go | 2 +- internal/lsp/source/util.go | 12 +- 9 files changed, 213 insertions(+), 46 deletions(-) diff --git a/gopls/internal/regtest/workspace/workspace_test.go b/gopls/internal/regtest/workspace/workspace_test.go index 5e5bcd13b5d..9e4b85fced8 100644 --- a/gopls/internal/regtest/workspace/workspace_test.go +++ b/gopls/internal/regtest/workspace/workspace_test.go @@ -1305,3 +1305,67 @@ func (Server) Foo() {} _, _ = env.GoToDefinition("other_test.go", env.RegexpSearch("other_test.go", "Server")) }) } + +// Test for golang/go#48929. +func TestClearNonWorkspaceDiagnostics(t *testing.T) { + testenv.NeedsGo1Point(t, 18) // uses go.work + + const ws = ` +-- go.work -- +go 1.18 + +use ( + ./b +) +-- a/go.mod -- +module a + +go 1.17 +-- a/main.go -- +package main + +func main() { + var V string +} +-- b/go.mod -- +module b + +go 1.17 +-- b/main.go -- +package b + +import ( + _ "fmt" +) +` + Run(t, ws, func(t *testing.T, env *Env) { + env.OpenFile("b/main.go") + env.Await( + OnceMet( + env.DoneWithOpen(), + NoDiagnostics("a/main.go"), + ), + ) + env.OpenFile("a/main.go") + env.Await( + OnceMet( + env.DoneWithOpen(), + env.DiagnosticAtRegexpWithMessage("a/main.go", "V", "declared but not used"), + ), + ) + env.CloseBuffer("a/main.go") + + // Make an arbitrary edit because gopls explicitly diagnoses a/main.go + // whenever it is "changed". + // + // TODO(rfindley): it should not be necessary to make another edit here. + // Gopls should be smart enough to avoid diagnosing a. + env.RegexpReplace("b/main.go", "package b", "package b // a package") + env.Await( + OnceMet( + env.DoneWithChange(), + EmptyDiagnostics("a/main.go"), + ), + ) + }) +} diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index 5f24d0f08ef..5ce49f00d43 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -197,7 +197,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf } // TODO: once metadata is immutable, we shouldn't have to lock here. s.mu.Lock() - err := s.computeMetadataUpdates(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, updates, nil) + err := computeMetadataUpdates(ctx, s.meta, PackagePath(pkg.PkgPath), pkg, cfg, query, updates, nil) s.mu.Unlock() if err != nil { return err @@ -216,7 +216,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf delete(s.packages, key) } } - s.workspacePackages = computeWorkspacePackages(s.meta) + s.workspacePackages = computeWorkspacePackagesLocked(s, s.meta) s.dumpWorkspace("load") s.mu.Unlock() @@ -442,7 +442,7 @@ func getWorkspaceDir(ctx context.Context, h *memoize.Handle, g *memoize.Generati // computeMetadataUpdates populates the updates map with metadata updates to // apply, based on the given pkg. It recurs through pkg.Imports to ensure that // metadata exists for all dependencies. -func (s *snapshot) computeMetadataUpdates(ctx context.Context, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*KnownMetadata, path []PackageID) error { +func computeMetadataUpdates(ctx context.Context, g *metadataGraph, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*KnownMetadata, path []PackageID) error { id := PackageID(pkg.ID) if new := updates[id]; new != nil { return nil @@ -494,28 +494,13 @@ func (s *snapshot) computeMetadataUpdates(ctx context.Context, pkgPath PackagePa m.Errors = append(m.Errors, err) } - uris := map[span.URI]struct{}{} for _, filename := range pkg.CompiledGoFiles { uri := span.URIFromPath(filename) m.CompiledGoFiles = append(m.CompiledGoFiles, uri) - uris[uri] = struct{}{} } for _, filename := range pkg.GoFiles { uri := span.URIFromPath(filename) m.GoFiles = append(m.GoFiles, uri) - uris[uri] = struct{}{} - } - - for uri := range uris { - // In order for a package to be considered for the workspace, at least one - // file must be contained in the workspace and not vendored. - - // The package's files are in this view. It may be a workspace package. - // Vendored packages are not likely to be interesting to the user. - if !strings.Contains(string(uri), "/vendor/") && s.view.contains(uri) { - m.HasWorkspaceFiles = true - break - } } for importPath, importPkg := range pkg.Imports { @@ -532,8 +517,8 @@ func (s *snapshot) computeMetadataUpdates(ctx context.Context, pkgPath PackagePa m.MissingDeps[importPkgPath] = struct{}{} continue } - if s.noValidMetadataForIDLocked(importID) { - if err := s.computeMetadataUpdates(ctx, importPkgPath, importPkg, cfg, query, updates, append(path, id)); err != nil { + if noValidMetadataForID(g, importID) { + if err := computeMetadataUpdates(ctx, g, importPkgPath, importPkg, cfg, query, updates, append(path, id)); err != nil { event.Error(ctx, "error in dependency", err) } } @@ -542,12 +527,101 @@ func (s *snapshot) computeMetadataUpdates(ctx context.Context, pkgPath PackagePa return nil } -// computeWorkspacePackages computes workspace packages for the given metadata -// graph. -func computeWorkspacePackages(meta *metadataGraph) map[PackageID]PackagePath { +// containsPackageLocked reports whether p is a workspace package for the +// snapshot s. +// +// s.mu must be held while calling this function. +func containsPackageLocked(s *snapshot, m *Metadata) bool { + // In legacy workspace mode, or if a package does not have an associated + // module, a package is considered inside the workspace if any of its files + // are under the workspace root (and not excluded). + // + // Otherwise if the package has a module it must be an active module (as + // defined by the module root or go.work file) and at least one file must not + // be filtered out by directoryFilters. + if m.Module != nil && s.workspace.moduleSource != legacyWorkspace { + modURI := span.URIFromPath(m.Module.GoMod) + _, ok := s.workspace.activeModFiles[modURI] + if !ok { + return false + } + + uris := map[span.URI]struct{}{} + for _, uri := range m.CompiledGoFiles { + uris[uri] = struct{}{} + } + for _, uri := range m.GoFiles { + uris[uri] = struct{}{} + } + + for uri := range uris { + // Don't use view.contains here. go.work files may include modules + // outside of the workspace folder. + if !strings.Contains(string(uri), "/vendor/") && !s.view.filters(uri) { + return true + } + } + return false + } + + return containsFileInWorkspaceLocked(s, m) +} + +// containsOpenFileLocked reports whether any file referenced by m is open in +// the snapshot s. +// +// s.mu must be held while calling this function. +func containsOpenFileLocked(s *snapshot, m *KnownMetadata) bool { + uris := map[span.URI]struct{}{} + for _, uri := range m.CompiledGoFiles { + uris[uri] = struct{}{} + } + for _, uri := range m.GoFiles { + uris[uri] = struct{}{} + } + + for uri := range uris { + if s.isOpenLocked(uri) { + return true + } + } + return false +} + +// containsFileInWorkspace reports whether m contains any file inside the +// workspace of the snapshot s. +// +// s.mu must be held while calling this function. +func containsFileInWorkspaceLocked(s *snapshot, m *Metadata) bool { + uris := map[span.URI]struct{}{} + for _, uri := range m.CompiledGoFiles { + uris[uri] = struct{}{} + } + for _, uri := range m.GoFiles { + uris[uri] = struct{}{} + } + + for uri := range uris { + // In order for a package to be considered for the workspace, at least one + // file must be contained in the workspace and not vendored. + + // The package's files are in this view. It may be a workspace package. + // Vendored packages are not likely to be interesting to the user. + if !strings.Contains(string(uri), "/vendor/") && s.view.contains(uri) { + return true + } + } + return false +} + +// computeWorkspacePackagesLocked computes workspace packages in the snapshot s +// for the given metadata graph. +// +// s.mu must be held while calling this function. +func computeWorkspacePackagesLocked(s *snapshot, meta *metadataGraph) map[PackageID]PackagePath { workspacePackages := make(map[PackageID]PackagePath) for _, m := range meta.metadata { - if !m.HasWorkspaceFiles { + if !containsPackageLocked(s, m.Metadata) { continue } if m.PkgFilesChanged { @@ -567,6 +641,12 @@ func computeWorkspacePackages(meta *metadataGraph) map[PackageID]PackagePath { if allFilesHaveRealPackages(meta, m) { continue } + + // We only care about command-line-arguments packages if they are still + // open. + if !containsOpenFileLocked(s, m) { + continue + } } switch { diff --git a/internal/lsp/cache/metadata.go b/internal/lsp/cache/metadata.go index 525a3e65495..b4da7130c23 100644 --- a/internal/lsp/cache/metadata.go +++ b/internal/lsp/cache/metadata.go @@ -67,13 +67,6 @@ type Metadata struct { // TODO(rfindley): this can probably just be a method, since it is derived // from other fields. IsIntermediateTestVariant bool - - // HasWorkspaceFiles reports whether m contains any files that are considered - // part of the workspace. - // - // TODO(golang/go#48929): this should be a property of the workspace - // (the go.work file), not a constant. - HasWorkspaceFiles bool } // Name implements the source.Metadata interface. diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 286d8f12c46..0d3e944b980 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -323,6 +323,8 @@ func bestViewForURI(uri span.URI, views []*View) *View { if longest != nil && len(longest.Folder()) > len(view.Folder()) { continue } + // TODO(rfindley): this should consider the workspace layout (i.e. + // go.work). if view.contains(uri) { longest = view } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 32681735b28..b85b46c64ce 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -768,6 +768,8 @@ func (s *snapshot) isActiveLocked(id PackageID, seen map[PackageID]bool) (active return true } } + // TODO(rfindley): it looks incorrect that we don't also check GoFiles here. + // If a CGo file is open, we want to consider the package active. for _, dep := range m.Deps { if s.isActiveLocked(dep, seen) { return true @@ -1289,11 +1291,11 @@ func (s *snapshot) noValidMetadataForURILocked(uri span.URI) bool { func (s *snapshot) noValidMetadataForID(id PackageID) bool { s.mu.Lock() defer s.mu.Unlock() - return s.noValidMetadataForIDLocked(id) + return noValidMetadataForID(s.meta, id) } -func (s *snapshot) noValidMetadataForIDLocked(id PackageID) bool { - m := s.meta.metadata[id] +func noValidMetadataForID(g *metadataGraph, id PackageID) bool { + m := g.metadata[id] return m == nil || !m.Valid } @@ -1789,8 +1791,10 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } } + // Compute invalidations based on file changes. changedPkgFiles := map[PackageID]bool{} // packages whose file set may have changed anyImportDeleted := false + anyFileOpenedOrClosed := false for uri, change := range changes { // Maybe reinitialize the view if we see a change in the vendor // directory. @@ -1800,6 +1804,10 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC // The original FileHandle for this URI is cached on the snapshot. originalFH := s.files[uri] + var originalOpen, newOpen bool + _, originalOpen = originalFH.(*overlay) + _, newOpen = change.fileHandle.(*overlay) + anyFileOpenedOrClosed = originalOpen != newOpen // If uri is a Go file, check if it has changed in a way that would // invalidate metadata. Note that we can't use s.view.FileKind here, @@ -1903,6 +1911,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC newGen.Inherit(v.handle) result.packages[k] = v } + // Copy the package analysis information. for k, v := range s.actions { if _, ok := idsToInvalidate[k.pkg.id]; ok { @@ -1988,13 +1997,19 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } } + // Update metadata, if necessary. if len(metadataUpdates) > 0 { result.meta = s.meta.Clone(metadataUpdates) - result.workspacePackages = computeWorkspacePackages(result.meta) } else { // No metadata changes. Since metadata is only updated by cloning, it is // safe to re-use the existing metadata here. result.meta = s.meta + } + + // Update workspace packages, if necessary. + if result.meta != s.meta || anyFileOpenedOrClosed { + result.workspacePackages = computeWorkspacePackagesLocked(result, result.meta) + } else { result.workspacePackages = s.workspacePackages } diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go index 0ed9883451b..620efd8b965 100644 --- a/internal/lsp/cache/view.go +++ b/internal/lsp/cache/view.go @@ -397,16 +397,27 @@ func (s *snapshot) locateTemplateFiles(ctx context.Context) { } func (v *View) contains(uri span.URI) bool { + // TODO(rfindley): should we ignore the root here? It is not provided by the + // user, and is undefined when go.work is outside the workspace. It would be + // better to explicitly consider the set of active modules wherever relevant. inRoot := source.InDir(v.rootURI.Filename(), uri.Filename()) inFolder := source.InDir(v.folder.Filename(), uri.Filename()) + if !inRoot && !inFolder { return false } - // Filters are applied relative to the workspace folder. - if inFolder { - return !pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), v.rootURI.Filename(), v.gomodcache, v.Options()) + + return !v.filters(uri) +} + +// filters reports whether uri is filtered by the currently configured +// directoryFilters. +func (v *View) filters(uri span.URI) bool { + // Only filter relative to the configured root directory. + if source.InDirLex(v.folder.Filename(), uri.Filename()) { + return pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), v.rootURI.Filename(), v.gomodcache, v.Options()) } - return true + return false } func (v *View) mapFile(uri span.URI, f *fileBase) { diff --git a/internal/lsp/diagnostics.go b/internal/lsp/diagnostics.go index 0837b22cc22..9648921ef5d 100644 --- a/internal/lsp/diagnostics.go +++ b/internal/lsp/diagnostics.go @@ -66,6 +66,8 @@ func (d diagnosticSource) String() string { return "FromTypeChecking" case orphanedSource: return "FromOrphans" + case workSource: + return "FromGoWork" default: return fmt.Sprintf("From?%d?", d) } diff --git a/internal/lsp/regtest/expectation.go b/internal/lsp/regtest/expectation.go index ab808f9e8cf..737f83da89c 100644 --- a/internal/lsp/regtest/expectation.go +++ b/internal/lsp/regtest/expectation.go @@ -613,7 +613,7 @@ func NoDiagnostics(name string) Expectation { } return SimpleExpectation{ check: check, - description: "no diagnostics", + description: fmt.Sprintf("no diagnostics for %q", name), } } diff --git a/internal/lsp/source/util.go b/internal/lsp/source/util.go index 9cb2ee69482..b8a7fc9135f 100644 --- a/internal/lsp/source/util.go +++ b/internal/lsp/source/util.go @@ -471,7 +471,7 @@ func CompareURI(left, right span.URI) int { // // Copied and slightly adjusted from go/src/cmd/go/internal/search/search.go. func InDir(dir, path string) bool { - if inDirLex(dir, path) { + if InDirLex(dir, path) { return true } if !honorSymlinks { @@ -481,18 +481,18 @@ func InDir(dir, path string) bool { if err != nil || xpath == path { xpath = "" } else { - if inDirLex(dir, xpath) { + if InDirLex(dir, xpath) { return true } } xdir, err := filepath.EvalSymlinks(dir) if err == nil && xdir != dir { - if inDirLex(xdir, path) { + if InDirLex(xdir, path) { return true } if xpath != "" { - if inDirLex(xdir, xpath) { + if InDirLex(xdir, xpath) { return true } } @@ -500,11 +500,11 @@ func InDir(dir, path string) bool { return false } -// inDirLex is like inDir but only checks the lexical form of the file names. +// InDirLex is like inDir but only checks the lexical form of the file names. // It does not consider symbolic links. // // Copied from go/src/cmd/go/internal/search/search.go. -func inDirLex(dir, path string) bool { +func InDirLex(dir, path string) bool { pv := strings.ToUpper(filepath.VolumeName(path)) dv := strings.ToUpper(filepath.VolumeName(dir)) path = path[len(pv):] From 4e231cb6f8b5a782a7973e80dbc8ef7183799dad Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Sun, 15 Aug 2021 12:02:50 -0400 Subject: [PATCH 038/136] internal/lsp/cache: don't prune unreachable metadata on clone Package metadata is small; there is no reason not to keep it around, and pruning it on every clone is needless work. Change-Id: I9ea73315cc6b673625f0f7defe1fd61c2e1eb123 Reviewed-on: https://go-review.googlesource.com/c/tools/+/373695 Reviewed-by: Alan Donovan Run-TryBot: Robert Findley gopls-CI: kokoro TryBot-Result: Gopher Robot --- internal/lsp/cache/snapshot.go | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index b85b46c64ce..3c46648bff9 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -1945,27 +1945,6 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } } - // Collect all of the IDs that are reachable from the workspace packages. - // Any unreachable IDs will have their metadata deleted outright. - reachableID := map[PackageID]bool{} - var addForwardDeps func(PackageID) - addForwardDeps = func(id PackageID) { - if reachableID[id] { - return - } - reachableID[id] = true - m, ok := s.meta.metadata[id] - if !ok { - return - } - for _, depID := range m.Deps { - addForwardDeps(depID) - } - } - for id := range s.workspacePackages { - addForwardDeps(id) - } - // Compute which metadata updates are required. We only need to invalidate // packages directly containing the affected file, and only if it changed in // a relevant way. @@ -1977,12 +1956,6 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC metadataUpdates[k] = nil continue } - // The ID is not reachable from any workspace package, so it should - // be deleted. - if !reachableID[k] { - metadataUpdates[k] = nil - continue - } valid := v.Valid && !invalidateMetadata pkgFilesChanged := v.PkgFilesChanged || changedPkgFiles[k] shouldLoad := v.ShouldLoad || invalidateMetadata From a44cc76dc11c26d2f2d8d04f05fe0094facfdce0 Mon Sep 17 00:00:00 2001 From: Tormod Erevik Lea Date: Fri, 17 Jun 2022 10:55:46 +0200 Subject: [PATCH 039/136] cmd/stringer: use explicit NeedX values instead of deprecated LoadSyntax MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: Ie25f67fd98d34b0a907bd13357e2643b1b79443b Reviewed-on: https://go-review.googlesource.com/c/tools/+/412914 Reviewed-by: Alex Rakoczy Auto-Submit: Daniel Martí gopls-CI: kokoro Reviewed-by: Ian Lance Taylor Reviewed-by: Daniel Martí Run-TryBot: Daniel Martí TryBot-Result: Gopher Robot Run-TryBot: Ian Lance Taylor Auto-Submit: Ian Lance Taylor --- cmd/stringer/stringer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/stringer/stringer.go b/cmd/stringer/stringer.go index 9f9c85a0370..b079985b36c 100644 --- a/cmd/stringer/stringer.go +++ b/cmd/stringer/stringer.go @@ -217,7 +217,7 @@ type Package struct { // parsePackage exits if there is an error. func (g *Generator) parsePackage(patterns []string, tags []string) { cfg := &packages.Config{ - Mode: packages.LoadSyntax, + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, // TODO: Need to think about constants in test files. Maybe write type_string_test.go // in a separate pass? For later. Tests: false, From 871637b6476ec258626a649fd4c4e5bc871f9535 Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Mon, 13 Jun 2022 19:25:46 -0400 Subject: [PATCH 040/136] internal/lsp: add settings for inlay hints and enable This change adds user settings for enabling inlay hints, modeled roughly after analyzers. This will allow users to turn on specific inlay hints that they like and leave others off. With all of the inlay hints turned off by default, we can now enable inlay hints. Change-Id: Ie5dfcbbab1e0b7312eafcc4aa08cb4fe8a83fc31 Reviewed-on: https://go-review.googlesource.com/c/tools/+/411906 Run-TryBot: Suzy Mueller Reviewed-by: Jamal Carvalho gopls-CI: kokoro Reviewed-by: Robert Findley --- gopls/doc/generate.go | 44 +++++++ gopls/doc/inlayHints.md | 73 +++++++++++ gopls/doc/settings.md | 13 ++ internal/lsp/general.go | 1 + internal/lsp/lsp_test.go | 3 + internal/lsp/source/api_json.go | 75 +++++++++++ internal/lsp/source/inlay_hint.go | 206 +++++++++++++++++++++++------- internal/lsp/source/options.go | 35 ++++- internal/lsp/tests/util.go | 9 ++ 9 files changed, 410 insertions(+), 49 deletions(-) create mode 100644 gopls/doc/inlayHints.md diff --git a/gopls/doc/generate.go b/gopls/doc/generate.go index e63653de6bc..c7e0e0ffcc0 100644 --- a/gopls/doc/generate.go +++ b/gopls/doc/generate.go @@ -63,6 +63,9 @@ func doMain(baseDir string, write bool) (bool, error) { if ok, err := rewriteFile(filepath.Join(baseDir, "gopls/doc/analyzers.md"), api, write, rewriteAnalyzers); !ok || err != nil { return ok, err } + if ok, err := rewriteFile(filepath.Join(baseDir, "gopls/doc/inlayHints.md"), api, write, rewriteInlayHints); !ok || err != nil { + return ok, err + } return true, nil } @@ -102,6 +105,7 @@ func loadAPI() (*source.APIJSON, error) { } { api.Analyzers = append(api.Analyzers, loadAnalyzers(m)...) } + api.Hints = loadHints(source.AllInlayHints) for _, category := range []reflect.Value{ reflect.ValueOf(defaults.UserOptions), } { @@ -146,6 +150,14 @@ func loadAPI() (*source.APIJSON, error) { Default: def, }) } + case "hints": + for _, a := range api.Hints { + opt.EnumKeys.Keys = append(opt.EnumKeys.Keys, source.EnumKey{ + Name: fmt.Sprintf("%q", a.Name), + Doc: a.Doc, + Default: strconv.FormatBool(a.Default), + }) + } } } } @@ -488,6 +500,23 @@ func loadAnalyzers(m map[string]*source.Analyzer) []*source.AnalyzerJSON { return json } +func loadHints(m map[string]*source.Hint) []*source.HintJSON { + var sorted []string + for _, h := range m { + sorted = append(sorted, h.Name) + } + sort.Strings(sorted) + var json []*source.HintJSON + for _, name := range sorted { + h := m[name] + json = append(json, &source.HintJSON{ + Name: h.Name, + Doc: h.Doc, + }) + } + return json +} + func lowerFirst(x string) string { if x == "" { return x @@ -699,6 +728,21 @@ func rewriteAnalyzers(doc []byte, api *source.APIJSON) ([]byte, error) { return replaceSection(doc, "Analyzers", section.Bytes()) } +func rewriteInlayHints(doc []byte, api *source.APIJSON) ([]byte, error) { + section := bytes.NewBuffer(nil) + for _, hint := range api.Hints { + fmt.Fprintf(section, "## **%v**\n\n", hint.Name) + fmt.Fprintf(section, "%s\n\n", hint.Doc) + switch hint.Default { + case true: + fmt.Fprintf(section, "**Enabled by default.**\n\n") + case false: + fmt.Fprintf(section, "**Disabled by default. Enable it by setting `\"hints\": {\"%s\": true}`.**\n\n", hint.Name) + } + } + return replaceSection(doc, "Hints", section.Bytes()) +} + func replaceSection(doc []byte, sectionName string, replacement []byte) ([]byte, error) { re := regexp.MustCompile(fmt.Sprintf(`(?s)\n(.*?)`, sectionName, sectionName)) idx := re.FindSubmatchIndex(doc) diff --git a/gopls/doc/inlayHints.md b/gopls/doc/inlayHints.md new file mode 100644 index 00000000000..a4fd3e51554 --- /dev/null +++ b/gopls/doc/inlayHints.md @@ -0,0 +1,73 @@ +# Hints + +This document describes the inlay hints that `gopls` uses inside the editor. + + +## **assign_variable_types** + +Enable/disable inlay hints for variable types in assign statements: + + i/* int/*, j/* int/* := 0, len(r)-1 + +**Disabled by default. Enable it by setting `"hints": {"assign_variable_types": true}`.** + +## **composite_literal_fields** + +Enable/disable inlay hints for composite literal field names: + + {in: "Hello, world", want: "dlrow ,olleH"} + +**Disabled by default. Enable it by setting `"hints": {"composite_literal_fields": true}`.** + +## **composite_literal_types** + +Enable/disable inlay hints for composite literal types: + + for _, c := range []struct { + in, want string + }{ + /*struct{ in string; want string }*/{"Hello, world", "dlrow ,olleH"}, + } + +**Disabled by default. Enable it by setting `"hints": {"composite_literal_types": true}`.** + +## **constant_values** + +Enable/disable inlay hints for constant values: + + const ( + KindNone Kind = iota/* = 0*/ + KindPrint/* = 1*/ + KindPrintf/* = 2*/ + KindErrorf/* = 3*/ + ) + +**Disabled by default. Enable it by setting `"hints": {"constant_values": true}`.** + +## **function_type_parameters** + +Enable/disable inlay hints for implicit type parameters on generic functions: + + myFoo/*[int, string]*/(1, "hello") + +**Disabled by default. Enable it by setting `"hints": {"function_type_parameters": true}`.** + +## **parameter_names** + +Enable/disable inlay hints for parameter names: + + parseInt(/* str: */ "123", /* radix: */ 8) + +**Disabled by default. Enable it by setting `"hints": {"parameter_names": true}`.** + +## **range_variable_types** + +Enable/disable inlay hints for variable types in range statements: + + for k/* int*/, v/* string/* := range []string{} { + fmt.Println(k, v) + } + +**Disabled by default. Enable it by setting `"hints": {"range_variable_types": true}`.** + + diff --git a/gopls/doc/settings.md b/gopls/doc/settings.md index 092a3c7cfaf..0ed0e19bb02 100644 --- a/gopls/doc/settings.md +++ b/gopls/doc/settings.md @@ -35,6 +35,7 @@ still be able to independently override specific experimental features. * [Completion](#completion) * [Diagnostic](#diagnostic) * [Documentation](#documentation) + * [Inlayhint](#inlayhint) * [Navigation](#navigation) ### Build @@ -370,6 +371,18 @@ linksInHover toggles the presence of links to documentation in hover. Default: `true`. +#### Inlayhint + +##### **hints** *map[string]bool* + +**This setting is experimental and may be deleted.** + +hints specify inlay hints that users want to see. +A full list of hints that gopls uses can be found +[here](https://github.com/golang/tools/blob/master/gopls/doc/inlayHints.md). + +Default: `{}`. + #### Navigation ##### **importShortcut** *enum* diff --git a/internal/lsp/general.go b/internal/lsp/general.go index 478152bdf9a..385a04a25fd 100644 --- a/internal/lsp/general.go +++ b/internal/lsp/general.go @@ -153,6 +153,7 @@ See https://github.com/golang/go/issues/45732 for more information.`, HoverProvider: true, DocumentHighlightProvider: true, DocumentLinkProvider: protocol.DocumentLinkOptions{}, + InlayHintProvider: protocol.InlayHintOptions{}, ReferencesProvider: true, RenameProvider: renameOpts, SignatureHelpProvider: protocol.SignatureHelpOptions{ diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go index 56356e9b5a4..2ec833b860e 100644 --- a/internal/lsp/lsp_test.go +++ b/internal/lsp/lsp_test.go @@ -67,6 +67,9 @@ func testLSP(t *testing.T, datum *tests.Data) { tests.EnableAllAnalyzers(view, options) view.SetOptions(ctx, options) + // Enable all inlay hints for tests. + tests.EnableAllInlayHints(view, options) + // Only run the -modfile specific tests in module mode with Go 1.14 or above. datum.ModfileFlagAvailable = len(snapshot.ModFiles()) > 0 && testenv.Go1Point() >= 14 release() diff --git a/internal/lsp/source/api_json.go b/internal/lsp/source/api_json.go index 0695efc2fa5..4188d9d06fe 100755 --- a/internal/lsp/source/api_json.go +++ b/internal/lsp/source/api_json.go @@ -505,6 +505,51 @@ var GeneratedAPIJSON = &APIJSON{ Status: "experimental", Hierarchy: "ui.diagnostic", }, + { + Name: "hints", + Type: "map[string]bool", + Doc: "hints specify inlay hints that users want to see.\nA full list of hints that gopls uses can be found\n[here](https://github.com/golang/tools/blob/master/gopls/doc/inlayHints.md).\n", + EnumKeys: EnumKeys{Keys: []EnumKey{ + { + Name: "\"assign_variable_types\"", + Doc: "Enable/disable inlay hints for variable types in assign statements:\n\n\ti/* int/*, j/* int/* := 0, len(r)-1", + Default: "false", + }, + { + Name: "\"composite_literal_fields\"", + Doc: "Enable/disable inlay hints for composite literal field names:\n\n\t{in: \"Hello, world\", want: \"dlrow ,olleH\"}", + Default: "false", + }, + { + Name: "\"composite_literal_types\"", + Doc: "Enable/disable inlay hints for composite literal types:\n\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}", + Default: "false", + }, + { + Name: "\"constant_values\"", + Doc: "Enable/disable inlay hints for constant values:\n\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)", + Default: "false", + }, + { + Name: "\"function_type_parameters\"", + Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n\n\tmyFoo/*[int, string]*/(1, \"hello\")", + Default: "false", + }, + { + Name: "\"parameter_names\"", + Doc: "Enable/disable inlay hints for parameter names:\n\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)", + Default: "false", + }, + { + Name: "\"range_variable_types\"", + Doc: "Enable/disable inlay hints for variable types in range statements:\n\n\tfor k/* int*/, v/* string/* := range []string{} {\n\t\tfmt.Println(k, v)\n\t}", + Default: "false", + }, + }}, + Default: "{}", + Status: "experimental", + Hierarchy: "ui.inlayhint", + }, { Name: "codelenses", Type: "map[string]bool", @@ -979,4 +1024,34 @@ var GeneratedAPIJSON = &APIJSON{ Default: true, }, }, + Hints: []*HintJSON{ + { + Name: "assign_variable_types", + Doc: "Enable/disable inlay hints for variable types in assign statements:\n\n\ti/* int/*, j/* int/* := 0, len(r)-1", + }, + { + Name: "composite_literal_fields", + Doc: "Enable/disable inlay hints for composite literal field names:\n\n\t{in: \"Hello, world\", want: \"dlrow ,olleH\"}", + }, + { + Name: "composite_literal_types", + Doc: "Enable/disable inlay hints for composite literal types:\n\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}", + }, + { + Name: "constant_values", + Doc: "Enable/disable inlay hints for constant values:\n\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)", + }, + { + Name: "function_type_parameters", + Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n\n\tmyFoo/*[int, string]*/(1, \"hello\")", + }, + { + Name: "parameter_names", + Doc: "Enable/disable inlay hints for parameter names:\n\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)", + }, + { + Name: "range_variable_types", + Doc: "Enable/disable inlay hints for variable types in range statements:\n\n\tfor k/* int*/, v/* string/* := range []string{} {\n\t\tfmt.Println(k, v)\n\t}", + }, + }, } diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go index 8369681003a..99e1ad09d82 100644 --- a/internal/lsp/source/inlay_hint.go +++ b/internal/lsp/source/inlay_hint.go @@ -23,6 +23,87 @@ const ( maxLabelLength = 28 ) +type InlayHintFunc func(node ast.Node, tmap *lsppos.TokenMapper, info *types.Info, q *types.Qualifier) []protocol.InlayHint + +type Hint struct { + Name string + Doc string + Run InlayHintFunc +} + +const ( + ParameterNames = "parameter_names" + AssignVariableTypes = "assign_variable_types" + ConstantValues = "constant_values" + RangeVariableTypes = "range_variable_types" + CompositeLiteralTypes = "composite_literal_types" + CompositeLiteralFieldNames = "composite_literal_fields" + FunctionTypeParameters = "function_type_parameters" +) + +var AllInlayHints = map[string]*Hint{ + AssignVariableTypes: { + Name: AssignVariableTypes, + Doc: `Enable/disable inlay hints for variable types in assign statements: + + i/* int/*, j/* int/* := 0, len(r)-1`, + Run: assignVariableTypes, + }, + ParameterNames: { + Name: ParameterNames, + Doc: `Enable/disable inlay hints for parameter names: + + parseInt(/* str: */ "123", /* radix: */ 8)`, + Run: parameterNames, + }, + ConstantValues: { + Name: ConstantValues, + Doc: `Enable/disable inlay hints for constant values: + + const ( + KindNone Kind = iota/* = 0*/ + KindPrint/* = 1*/ + KindPrintf/* = 2*/ + KindErrorf/* = 3*/ + )`, + Run: constantValues, + }, + RangeVariableTypes: { + Name: RangeVariableTypes, + Doc: `Enable/disable inlay hints for variable types in range statements: + + for k/* int*/, v/* string/* := range []string{} { + fmt.Println(k, v) + }`, + Run: rangeVariableTypes, + }, + CompositeLiteralTypes: { + Name: CompositeLiteralTypes, + Doc: `Enable/disable inlay hints for composite literal types: + + for _, c := range []struct { + in, want string + }{ + /*struct{ in string; want string }*/{"Hello, world", "dlrow ,olleH"}, + }`, + Run: compositeLiteralTypes, + }, + CompositeLiteralFieldNames: { + Name: CompositeLiteralFieldNames, + Doc: `Enable/disable inlay hints for composite literal field names: + + {in: "Hello, world", want: "dlrow ,olleH"}`, + Run: compositeLiteralFields, + }, + FunctionTypeParameters: { + Name: FunctionTypeParameters, + Doc: `Enable/disable inlay hints for implicit type parameters on generic functions: + + myFoo/*[int, string]*/(1, "hello")`, + Run: funcTypeParams, + }, +} + func InlayHint(ctx context.Context, snapshot Snapshot, fh FileHandle, _ protocol.Range) ([]protocol.InlayHint, error) { ctx, done := event.Start(ctx, "source.InlayHint") defer done() @@ -32,38 +113,47 @@ func InlayHint(ctx context.Context, snapshot Snapshot, fh FileHandle, _ protocol return nil, fmt.Errorf("getting file for InlayHint: %w", err) } + // Collect a list of the inlay hints that are enabled. + inlayHintOptions := snapshot.View().Options().InlayHintOptions + var enabledHints []InlayHintFunc + for hint, enabled := range inlayHintOptions.Hints { + if !enabled { + continue + } + if h, ok := AllInlayHints[hint]; ok { + enabledHints = append(enabledHints, h.Run) + } + } + if len(enabledHints) == 0 { + return nil, nil + } + tmap := lsppos.NewTokenMapper(pgf.Src, pgf.Tok) info := pkg.GetTypesInfo() q := Qualifier(pgf.File, pkg.GetTypes(), info) var hints []protocol.InlayHint ast.Inspect(pgf.File, func(node ast.Node) bool { - switch n := node.(type) { - case *ast.CallExpr: - hints = append(hints, parameterNames(n, tmap, info)...) - hints = append(hints, funcTypeParams(n, tmap, info)...) - case *ast.AssignStmt: - hints = append(hints, assignVariableTypes(n, tmap, info, &q)...) - case *ast.RangeStmt: - hints = append(hints, rangeVariableTypes(n, tmap, info, &q)...) - case *ast.GenDecl: - hints = append(hints, constantValues(n, tmap, info)...) - case *ast.CompositeLit: - hints = append(hints, compositeLiterals(n, tmap, info, &q)...) + for _, fn := range enabledHints { + hints = append(hints, fn(node, tmap, info, &q)...) } return true }) return hints, nil } -func parameterNames(node *ast.CallExpr, tmap *lsppos.TokenMapper, info *types.Info) []protocol.InlayHint { - signature, ok := info.TypeOf(node.Fun).(*types.Signature) +func parameterNames(node ast.Node, tmap *lsppos.TokenMapper, info *types.Info, _ *types.Qualifier) []protocol.InlayHint { + callExpr, ok := node.(*ast.CallExpr) + if !ok { + return nil + } + signature, ok := info.TypeOf(callExpr.Fun).(*types.Signature) if !ok { return nil } var hints []protocol.InlayHint - for i, v := range node.Args { + for i, v := range callExpr.Args { start, ok := tmap.Position(v.Pos()) if !ok { continue @@ -92,8 +182,12 @@ func parameterNames(node *ast.CallExpr, tmap *lsppos.TokenMapper, info *types.In return hints } -func funcTypeParams(node *ast.CallExpr, tmap *lsppos.TokenMapper, info *types.Info) []protocol.InlayHint { - id, ok := node.Fun.(*ast.Ident) +func funcTypeParams(node ast.Node, tmap *lsppos.TokenMapper, info *types.Info, _ *types.Qualifier) []protocol.InlayHint { + ce, ok := node.(*ast.CallExpr) + if !ok { + return nil + } + id, ok := ce.Fun.(*ast.Ident) if !ok { return nil } @@ -119,12 +213,14 @@ func funcTypeParams(node *ast.CallExpr, tmap *lsppos.TokenMapper, info *types.In }} } -func assignVariableTypes(node *ast.AssignStmt, tmap *lsppos.TokenMapper, info *types.Info, q *types.Qualifier) []protocol.InlayHint { - if node.Tok != token.DEFINE { +func assignVariableTypes(node ast.Node, tmap *lsppos.TokenMapper, info *types.Info, q *types.Qualifier) []protocol.InlayHint { + stmt, ok := node.(*ast.AssignStmt) + if !ok || stmt.Tok != token.DEFINE { return nil } + var hints []protocol.InlayHint - for _, v := range node.Lhs { + for _, v := range stmt.Lhs { if h := variableType(v, tmap, info, q); h != nil { hints = append(hints, *h) } @@ -132,12 +228,16 @@ func assignVariableTypes(node *ast.AssignStmt, tmap *lsppos.TokenMapper, info *t return hints } -func rangeVariableTypes(node *ast.RangeStmt, tmap *lsppos.TokenMapper, info *types.Info, q *types.Qualifier) []protocol.InlayHint { +func rangeVariableTypes(node ast.Node, tmap *lsppos.TokenMapper, info *types.Info, q *types.Qualifier) []protocol.InlayHint { + rStmt, ok := node.(*ast.RangeStmt) + if !ok { + return nil + } var hints []protocol.InlayHint - if h := variableType(node.Key, tmap, info, q); h != nil { + if h := variableType(rStmt.Key, tmap, info, q); h != nil { hints = append(hints, *h) } - if h := variableType(node.Value, tmap, info, q); h != nil { + if h := variableType(rStmt.Value, tmap, info, q); h != nil { hints = append(hints, *h) } return hints @@ -160,13 +260,14 @@ func variableType(e ast.Expr, tmap *lsppos.TokenMapper, info *types.Info, q *typ } } -func constantValues(node *ast.GenDecl, tmap *lsppos.TokenMapper, info *types.Info) []protocol.InlayHint { - if node.Tok != token.CONST { +func constantValues(node ast.Node, tmap *lsppos.TokenMapper, info *types.Info, _ *types.Qualifier) []protocol.InlayHint { + genDecl, ok := node.(*ast.GenDecl) + if !ok || genDecl.Tok != token.CONST { return nil } var hints []protocol.InlayHint - for _, v := range node.Specs { + for _, v := range genDecl.Specs { spec, ok := v.(*ast.ValueSpec) if !ok { continue @@ -210,36 +311,26 @@ func constantValues(node *ast.GenDecl, tmap *lsppos.TokenMapper, info *types.Inf return hints } -func compositeLiterals(node *ast.CompositeLit, tmap *lsppos.TokenMapper, info *types.Info, q *types.Qualifier) []protocol.InlayHint { - typ := info.TypeOf(node) +func compositeLiteralFields(node ast.Node, tmap *lsppos.TokenMapper, info *types.Info, q *types.Qualifier) []protocol.InlayHint { + compLit, ok := node.(*ast.CompositeLit) + if !ok { + return nil + } + typ := info.TypeOf(compLit) if typ == nil { return nil } - - prefix := "" if t, ok := typ.(*types.Pointer); ok { typ = t.Elem() - prefix = "&" } - strct, ok := typ.Underlying().(*types.Struct) if !ok { return nil } var hints []protocol.InlayHint - if node.Type == nil { - // The type for this struct is implicit, add an inlay hint. - if start, ok := tmap.Position(node.Lbrace); ok { - hints = append(hints, protocol.InlayHint{ - Position: &start, - Label: buildLabel(fmt.Sprintf("%s%s", prefix, types.TypeString(typ, *q))), - Kind: protocol.Type, - }) - } - } - for i, v := range node.Elts { + for i, v := range compLit.Elts { if _, ok := v.(*ast.KeyValueExpr); !ok { start, ok := tmap.Position(v.Pos()) if !ok { @@ -259,6 +350,35 @@ func compositeLiterals(node *ast.CompositeLit, tmap *lsppos.TokenMapper, info *t return hints } +func compositeLiteralTypes(node ast.Node, tmap *lsppos.TokenMapper, info *types.Info, q *types.Qualifier) []protocol.InlayHint { + compLit, ok := node.(*ast.CompositeLit) + if !ok { + return nil + } + typ := info.TypeOf(compLit) + if typ == nil { + return nil + } + if compLit.Type != nil { + return nil + } + prefix := "" + if t, ok := typ.(*types.Pointer); ok { + typ = t.Elem() + prefix = "&" + } + // The type for this composite literal is implicit, add an inlay hint. + start, ok := tmap.Position(compLit.Lbrace) + if !ok { + return nil + } + return []protocol.InlayHint{{ + Position: &start, + Label: buildLabel(fmt.Sprintf("%s%s", prefix, types.TypeString(typ, *q))), + Kind: protocol.Type, + }} +} + func buildLabel(s string) []protocol.InlayHintLabelPart { label := protocol.InlayHintLabelPart{ Value: s, diff --git a/internal/lsp/source/options.go b/internal/lsp/source/options.go index d1d34efe787..5da14ebfe92 100644 --- a/internal/lsp/source/options.go +++ b/internal/lsp/source/options.go @@ -130,6 +130,7 @@ func DefaultOptions() *Options { Nil: true, }, }, + InlayHintOptions: InlayHintOptions{}, DocumentationOptions: DocumentationOptions{ HoverKind: FullDocumentation, LinkTarget: "pkg.go.dev", @@ -289,6 +290,7 @@ type UIOptions struct { CompletionOptions NavigationOptions DiagnosticOptions + InlayHintOptions // Codelenses overrides the enabled/disabled state of code lenses. See the // "Code Lenses" section of the @@ -407,6 +409,13 @@ type DiagnosticOptions struct { ExperimentalWatchedFileDelay time.Duration `status:"experimental"` } +type InlayHintOptions struct { + // Hints specify inlay hints that users want to see. + // A full list of hints that gopls uses can be found + // [here](https://github.com/golang/tools/blob/master/gopls/doc/inlayHints.md). + Hints map[string]bool `status:"experimental"` +} + type NavigationOptions struct { // ImportShortcut specifies whether import statements should link to // documentation or go to definitions. @@ -915,6 +924,9 @@ func (o *Options) set(name string, value interface{}, seen map[string]struct{}) case "analyses": result.setBoolMap(&o.Analyses) + case "hints": + result.setBoolMap(&o.Hints) + case "annotations": result.setAnnotationMap(&o.Annotations) @@ -1351,6 +1363,7 @@ type APIJSON struct { Commands []*CommandJSON Lenses []*LensJSON Analyzers []*AnalyzerJSON + Hints []*HintJSON } type OptionJSON struct { @@ -1416,12 +1429,8 @@ func collectEnums(opt *OptionJSON) string { } func shouldShowEnumKeysInSettings(name string) bool { - // Both of these fields have too many possible options to print. - return !hardcodedEnumKeys(name) -} - -func hardcodedEnumKeys(name string) bool { - return name == "analyses" || name == "codelenses" + // These fields have too many possible options to print. + return !(name == "analyses" || name == "codelenses" || name == "hints") } type EnumKeys struct { @@ -1489,3 +1498,17 @@ func (a *AnalyzerJSON) String() string { func (a *AnalyzerJSON) Write(w io.Writer) { fmt.Fprintf(w, "%s (%s): %v", a.Name, a.Doc, a.Default) } + +type HintJSON struct { + Name string + Doc string + Default bool +} + +func (h *HintJSON) String() string { + return h.Name +} + +func (h *HintJSON) Write(w io.Writer) { + fmt.Fprintf(w, "%s (%s): %v", h.Name, h.Doc, h.Default) +} diff --git a/internal/lsp/tests/util.go b/internal/lsp/tests/util.go index 11dda1f8edd..98562d63657 100644 --- a/internal/lsp/tests/util.go +++ b/internal/lsp/tests/util.go @@ -512,6 +512,15 @@ func EnableAllAnalyzers(view source.View, opts *source.Options) { } } +func EnableAllInlayHints(view source.View, opts *source.Options) { + if opts.Hints == nil { + opts.Hints = make(map[string]bool) + } + for name := range source.AllInlayHints { + opts.Hints[name] = true + } +} + func WorkspaceSymbolsString(ctx context.Context, data *Data, queryURI span.URI, symbols []protocol.SymbolInformation) (string, error) { queryDir := filepath.Dir(queryURI.Filename()) var filtered []string From f60e9bc48f8c1667440e2c0176c5e8931e775886 Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Wed, 22 Jun 2022 00:22:43 +0000 Subject: [PATCH 041/136] internal/lsp/cache: use persistent map for storing gofiles in the snapshot Use treap (https://en.wikipedia.org/wiki/Treap) as a persistent map to avoid copying s.goFiles across generations. Maintain an additional s.parseKeysByURIMap to avoid scanning s.goFiles on individual file's content invalidation. This on average reduces didChange latency on internal codebase from 160ms to 150ms. In a followup the same approach can be used to avoid copying s.files, s.packages, and s.knownSubdirs. Updates golang/go#45686 Change-Id: Ic4a9b3c8fb2b66256f224adf9896ddcaaa6865b1 GitHub-Last-Rev: 0abd2570ae9b20ea7126ff31bee69aa0dc3f40aa GitHub-Pull-Request: golang/tools#382 Reviewed-on: https://go-review.googlesource.com/c/tools/+/411554 Reviewed-by: Robert Findley gopls-CI: kokoro Reviewed-by: Alan Donovan Run-TryBot: Robert Findley TryBot-Result: Gopher Robot --- internal/lsp/cache/maps.go | 112 +++++++++++ internal/lsp/cache/parse.go | 4 +- internal/lsp/cache/session.go | 3 +- internal/lsp/cache/snapshot.go | 150 ++++----------- internal/lsp/cache/view.go | 4 +- internal/lsp/source/view.go | 5 + internal/memoize/memoize.go | 104 ++++++++-- internal/memoize/memoize_test.go | 55 ++++++ internal/persistent/map.go | 268 ++++++++++++++++++++++++++ internal/persistent/map_test.go | 316 +++++++++++++++++++++++++++++++ 10 files changed, 879 insertions(+), 142 deletions(-) create mode 100644 internal/lsp/cache/maps.go create mode 100644 internal/persistent/map.go create mode 100644 internal/persistent/map_test.go diff --git a/internal/lsp/cache/maps.go b/internal/lsp/cache/maps.go new file mode 100644 index 00000000000..70f8039bdac --- /dev/null +++ b/internal/lsp/cache/maps.go @@ -0,0 +1,112 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "golang.org/x/tools/internal/persistent" + "golang.org/x/tools/internal/span" +) + +// TODO(euroelessar): Use generics once support for go1.17 is dropped. + +type goFilesMap struct { + impl *persistent.Map +} + +func newGoFilesMap() *goFilesMap { + return &goFilesMap{ + impl: persistent.NewMap(func(a, b interface{}) bool { + return parseKeyLess(a.(parseKey), b.(parseKey)) + }), + } +} + +func parseKeyLess(a, b parseKey) bool { + if a.mode != b.mode { + return a.mode < b.mode + } + if a.file.Hash != b.file.Hash { + return a.file.Hash.Less(b.file.Hash) + } + return a.file.URI < b.file.URI +} + +func (m *goFilesMap) Clone() *goFilesMap { + return &goFilesMap{ + impl: m.impl.Clone(), + } +} + +func (m *goFilesMap) Destroy() { + m.impl.Destroy() +} + +func (m *goFilesMap) Load(key parseKey) (*parseGoHandle, bool) { + value, ok := m.impl.Load(key) + if !ok { + return nil, false + } + return value.(*parseGoHandle), true +} + +func (m *goFilesMap) Range(do func(key parseKey, value *parseGoHandle)) { + m.impl.Range(func(key, value interface{}) { + do(key.(parseKey), value.(*parseGoHandle)) + }) +} + +func (m *goFilesMap) Store(key parseKey, value *parseGoHandle, release func()) { + m.impl.Store(key, value, func(key, value interface{}) { + release() + }) +} + +func (m *goFilesMap) Delete(key parseKey) { + m.impl.Delete(key) +} + +type parseKeysByURIMap struct { + impl *persistent.Map +} + +func newParseKeysByURIMap() *parseKeysByURIMap { + return &parseKeysByURIMap{ + impl: persistent.NewMap(func(a, b interface{}) bool { + return a.(span.URI) < b.(span.URI) + }), + } +} + +func (m *parseKeysByURIMap) Clone() *parseKeysByURIMap { + return &parseKeysByURIMap{ + impl: m.impl.Clone(), + } +} + +func (m *parseKeysByURIMap) Destroy() { + m.impl.Destroy() +} + +func (m *parseKeysByURIMap) Load(key span.URI) ([]parseKey, bool) { + value, ok := m.impl.Load(key) + if !ok { + return nil, false + } + return value.([]parseKey), true +} + +func (m *parseKeysByURIMap) Range(do func(key span.URI, value []parseKey)) { + m.impl.Range(func(key, value interface{}) { + do(key.(span.URI), value.([]parseKey)) + }) +} + +func (m *parseKeysByURIMap) Store(key span.URI, value []parseKey) { + m.impl.Store(key, value, nil) +} + +func (m *parseKeysByURIMap) Delete(key span.URI) { + m.impl.Delete(key) +} diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go index ab55743ccf0..376524bd324 100644 --- a/internal/lsp/cache/parse.go +++ b/internal/lsp/cache/parse.go @@ -58,7 +58,7 @@ func (s *snapshot) parseGoHandle(ctx context.Context, fh source.FileHandle, mode if pgh := s.getGoFile(key); pgh != nil { return pgh } - parseHandle := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { + parseHandle, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} { snapshot := arg.(*snapshot) return parseGo(ctx, snapshot.FileSet(), fh, mode) }, nil) @@ -68,7 +68,7 @@ func (s *snapshot) parseGoHandle(ctx context.Context, fh source.FileHandle, mode file: fh, mode: mode, } - return s.addGoFile(key, pgh) + return s.addGoFile(key, pgh, release) } func (pgh *parseGoHandle) String() string { diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 0d3e944b980..7dbccf7f6ed 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -234,7 +234,8 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, packages: make(map[packageKey]*packageHandle), meta: &metadataGraph{}, files: make(map[span.URI]source.VersionedFileHandle), - goFiles: newGoFileMap(), + goFiles: newGoFilesMap(), + parseKeysByURI: newParseKeysByURIMap(), symbols: make(map[span.URI]*symbolHandle), actions: make(map[actionKey]*actionHandle), workspacePackages: make(map[PackageID]PackagePath), diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 3c46648bff9..b2ac78208d7 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -77,7 +77,8 @@ type snapshot struct { files map[span.URI]source.VersionedFileHandle // goFiles maps a parseKey to its parseGoHandle. - goFiles *goFileMap + goFiles *goFilesMap + parseKeysByURI *parseKeysByURIMap // TODO(rfindley): consider merging this with files to reduce burden on clone. symbols map[span.URI]*symbolHandle @@ -133,6 +134,12 @@ type actionKey struct { analyzer *analysis.Analyzer } +func (s *snapshot) Destroy(destroyedBy string) { + s.generation.Destroy(destroyedBy) + s.goFiles.Destroy() + s.parseKeysByURI.Destroy() +} + func (s *snapshot) ID() uint64 { return s.id } @@ -665,17 +672,23 @@ func (s *snapshot) transitiveReverseDependencies(id PackageID, ids map[PackageID func (s *snapshot) getGoFile(key parseKey) *parseGoHandle { s.mu.Lock() defer s.mu.Unlock() - return s.goFiles.get(key) + if result, ok := s.goFiles.Load(key); ok { + return result + } + return nil } -func (s *snapshot) addGoFile(key parseKey, pgh *parseGoHandle) *parseGoHandle { +func (s *snapshot) addGoFile(key parseKey, pgh *parseGoHandle, release func()) *parseGoHandle { s.mu.Lock() defer s.mu.Unlock() - - if prev := s.goFiles.get(key); prev != nil { - return prev - } - s.goFiles.set(key, pgh) + if result, ok := s.goFiles.Load(key); ok { + release() + return result + } + s.goFiles.Store(key, pgh, release) + keys, _ := s.parseKeysByURI.Load(key.file.URI) + keys = append([]parseKey{key}, keys...) + s.parseKeysByURI.Store(key.file.URI, keys) return pgh } @@ -1663,6 +1676,9 @@ func (ac *unappliedChanges) GetFile(ctx context.Context, uri span.URI) (source.F } func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) *snapshot { + ctx, done := event.Start(ctx, "snapshot.clone") + defer done() + var vendorChanged bool newWorkspace, workspaceChanged, workspaceReload := s.workspace.invalidate(ctx, changes, &unappliedChanges{ originalSnapshot: s, @@ -1686,7 +1702,8 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC packages: make(map[packageKey]*packageHandle, len(s.packages)), actions: make(map[actionKey]*actionHandle, len(s.actions)), files: make(map[span.URI]source.VersionedFileHandle, len(s.files)), - goFiles: s.goFiles.clone(), + goFiles: s.goFiles.Clone(), + parseKeysByURI: s.parseKeysByURI.Clone(), symbols: make(map[span.URI]*symbolHandle, len(s.symbols)), workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)), unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)), @@ -1731,27 +1748,14 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC result.parseWorkHandles[k] = v } - // Copy the handles of all Go source files. - // There may be tens of thousands of files, - // but changes are typically few, so we - // use a striped map optimized for this case - // and visit its stripes in parallel. - var ( - toDeleteMu sync.Mutex - toDelete []parseKey - ) - s.goFiles.forEachConcurrent(func(k parseKey, v *parseGoHandle) { - if changes[k.file.URI] == nil { - // no change (common case) - newGen.Inherit(v.handle) - } else { - toDeleteMu.Lock() - toDelete = append(toDelete, k) - toDeleteMu.Unlock() + for uri := range changes { + keys, ok := result.parseKeysByURI.Load(uri) + if ok { + for _, key := range keys { + result.goFiles.Delete(key) + } + result.parseKeysByURI.Delete(uri) } - }) - for _, k := range toDelete { - result.goFiles.delete(k) } // Copy all of the go.mod-related handles. They may be invalidated later, @@ -2194,7 +2198,7 @@ func metadataChanges(ctx context.Context, lockedSnapshot *snapshot, oldFH, newFH // lockedSnapshot must be locked. func peekOrParse(ctx context.Context, lockedSnapshot *snapshot, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { key := parseKey{file: fh.FileIdentity(), mode: mode} - if pgh := lockedSnapshot.goFiles.get(key); pgh != nil { + if pgh, ok := lockedSnapshot.goFiles.Load(key); ok { cached := pgh.handle.Cached(lockedSnapshot.generation) if cached != nil { cached := cached.(*parseGoData) @@ -2482,89 +2486,3 @@ func readGoSum(dst map[module.Version][]string, file string, data []byte) error } return nil } - -// -- goFileMap -- - -// A goFileMap is conceptually a map[parseKey]*parseGoHandle, -// optimized for cloning all or nearly all entries. -type goFileMap struct { - // The map is represented as a map of 256 stripes, one per - // distinct value of the top 8 bits of key.file.Hash. - // Each stripe has an associated boolean indicating whether it - // is shared, and thus immutable, and thus must be copied before any update. - // (The bits could be packed but it hasn't been worth it yet.) - stripes [256]map[parseKey]*parseGoHandle - exclusive [256]bool // exclusive[i] means stripe[i] is not shared and may be safely mutated -} - -// newGoFileMap returns a new empty goFileMap. -func newGoFileMap() *goFileMap { - return new(goFileMap) // all stripes are shared (non-exclusive) nil maps -} - -// clone returns a copy of m. -// For concurrency, it counts as an update to m. -func (m *goFileMap) clone() *goFileMap { - m.exclusive = [256]bool{} // original and copy are now nonexclusive - copy := *m - return © -} - -// get returns the value for key k. -func (m *goFileMap) get(k parseKey) *parseGoHandle { - return m.stripes[m.hash(k)][k] -} - -// set updates the value for key k to v. -func (m *goFileMap) set(k parseKey, v *parseGoHandle) { - m.unshare(k)[k] = v -} - -// delete deletes the value for key k, if any. -func (m *goFileMap) delete(k parseKey) { - // TODO(adonovan): opt?: skip unshare if k isn't present. - delete(m.unshare(k), k) -} - -// forEachConcurrent calls f for each entry in the map. -// Calls may be concurrent. -// f must not modify m. -func (m *goFileMap) forEachConcurrent(f func(parseKey, *parseGoHandle)) { - // Visit stripes in parallel chunks. - const p = 16 // concurrency level - var wg sync.WaitGroup - wg.Add(p) - for i := 0; i < p; i++ { - chunk := m.stripes[i*p : (i+1)*p] - go func() { - for _, stripe := range chunk { - for k, v := range stripe { - f(k, v) - } - } - wg.Done() - }() - } - wg.Wait() -} - -// -- internal-- - -// hash returns 8 bits from the key's file digest. -func (*goFileMap) hash(k parseKey) byte { return k.file.Hash[0] } - -// unshare makes k's stripe exclusive, allocating a copy if needed, and returns it. -func (m *goFileMap) unshare(k parseKey) map[parseKey]*parseGoHandle { - i := m.hash(k) - if !m.exclusive[i] { - m.exclusive[i] = true - - // Copy the map. - copy := make(map[parseKey]*parseGoHandle, len(m.stripes[i])) - for k, v := range m.stripes[i] { - copy[k] = v - } - m.stripes[i] = copy - } - return m.stripes[i] -} diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go index 620efd8b965..1810f6e641d 100644 --- a/internal/lsp/cache/view.go +++ b/internal/lsp/cache/view.go @@ -535,7 +535,7 @@ func (v *View) shutdown(ctx context.Context) { v.mu.Unlock() v.snapshotMu.Lock() if v.snapshot != nil { - go v.snapshot.generation.Destroy("View.shutdown") + go v.snapshot.Destroy("View.shutdown") v.snapshot = nil } v.snapshotMu.Unlock() @@ -732,7 +732,7 @@ func (v *View) invalidateContent(ctx context.Context, changes map[span.URI]*file oldSnapshot := v.snapshot v.snapshot = oldSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata) - go oldSnapshot.generation.Destroy("View.invalidateContent") + go oldSnapshot.Destroy("View.invalidateContent") return v.snapshot, v.snapshot.generation.Acquire() } diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go index 0d8d661d60c..73e1b7f89ed 100644 --- a/internal/lsp/source/view.go +++ b/internal/lsp/source/view.go @@ -551,6 +551,11 @@ func (h Hash) String() string { return fmt.Sprintf("%64x", [sha256.Size]byte(h)) } +// Less returns true if the given hash is less than the other. +func (h Hash) Less(other Hash) bool { + return bytes.Compare(h[:], other[:]) < 0 +} + // FileIdentity uniquely identifies a file at a version from a FileSystem. type FileIdentity struct { URI span.URI diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index 480b87f5ce9..48a642c990e 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -83,16 +83,15 @@ func (g *Generation) Destroy(destroyedBy string) { g.store.mu.Lock() defer g.store.mu.Unlock() - for k, e := range g.store.handles { + for _, e := range g.store.handles { + if !e.trackGenerations { + continue + } e.mu.Lock() if _, ok := e.generations[g]; ok { delete(e.generations, g) // delete even if it's dead, in case of dangling references to the entry. if len(e.generations) == 0 { - delete(g.store.handles, k) - e.state = stateDestroyed - if e.cleanup != nil && e.value != nil { - e.cleanup(e.value) - } + e.destroy(g.store) } } e.mu.Unlock() @@ -161,6 +160,12 @@ type Handle struct { // cleanup, if non-nil, is used to perform any necessary clean-up on values // produced by function. cleanup func(interface{}) + + // If trackGenerations is set, this handle tracks generations in which it + // is valid, via the generations field. Otherwise, it is explicitly reference + // counted via the refCounter field. + trackGenerations bool + refCounter int32 } // Bind returns a handle for the given key and function. @@ -173,7 +178,34 @@ type Handle struct { // // If cleanup is non-nil, it will be called on any non-nil values produced by // function when they are no longer referenced. +// +// It is responsibility of the caller to call Inherit on the handler whenever +// it should still be accessible by a next generation. func (g *Generation) Bind(key interface{}, function Function, cleanup func(interface{})) *Handle { + return g.getHandle(key, function, cleanup, true) +} + +// GetHandle returns a handle for the given key and function with similar +// properties and behavior as Bind. +// +// As in opposite to Bind it returns a release callback which has to be called +// once this reference to handle is not needed anymore. +func (g *Generation) GetHandle(key interface{}, function Function, cleanup func(interface{})) (*Handle, func()) { + handle := g.getHandle(key, function, cleanup, false) + store := g.store + release := func() { + store.mu.Lock() + defer store.mu.Unlock() + + handle.refCounter-- + if handle.refCounter == 0 { + handle.destroy(store) + } + } + return handle, release +} + +func (g *Generation) getHandle(key interface{}, function Function, cleanup func(interface{}), trackGenerations bool) *Handle { // panic early if the function is nil // it would panic later anyway, but in a way that was much harder to debug if function == nil { @@ -186,20 +218,19 @@ func (g *Generation) Bind(key interface{}, function Function, cleanup func(inter defer g.store.mu.Unlock() h, ok := g.store.handles[key] if !ok { - h := &Handle{ - key: key, - function: function, - generations: map[*Generation]struct{}{g: {}}, - cleanup: cleanup, + h = &Handle{ + key: key, + function: function, + cleanup: cleanup, + trackGenerations: trackGenerations, + } + if trackGenerations { + h.generations = make(map[*Generation]struct{}, 1) } g.store.handles[key] = h - return h - } - h.mu.Lock() - defer h.mu.Unlock() - if _, ok := h.generations[g]; !ok { - h.generations[g] = struct{}{} } + + h.incrementRef(g) return h } @@ -240,13 +271,44 @@ func (g *Generation) Inherit(h *Handle) { if atomic.LoadUint32(&g.destroyed) != 0 { panic("inherit on generation " + g.name + " destroyed by " + g.destroyedBy) } + if !h.trackGenerations { + panic("called Inherit on handle not created by Generation.Bind") + } + h.incrementRef(g) +} + +func (h *Handle) destroy(store *Store) { + h.state = stateDestroyed + if h.cleanup != nil && h.value != nil { + h.cleanup(h.value) + } + delete(store.handles, h.key) +} + +func (h *Handle) incrementRef(g *Generation) { h.mu.Lock() + defer h.mu.Unlock() + if h.state == stateDestroyed { panic(fmt.Sprintf("inheriting destroyed handle %#v (type %T) into generation %v", h.key, h.key, g.name)) } - h.generations[g] = struct{}{} - h.mu.Unlock() + + if h.trackGenerations { + h.generations[g] = struct{}{} + } else { + h.refCounter++ + } +} + +// hasRefLocked reports whether h is valid in generation g. h.mu must be held. +func (h *Handle) hasRefLocked(g *Generation) bool { + if !h.trackGenerations { + return true + } + + _, ok := h.generations[g] + return ok } // Cached returns the value associated with a handle. @@ -256,7 +318,7 @@ func (g *Generation) Inherit(h *Handle) { func (h *Handle) Cached(g *Generation) interface{} { h.mu.Lock() defer h.mu.Unlock() - if _, ok := h.generations[g]; !ok { + if !h.hasRefLocked(g) { return nil } if h.state == stateCompleted { @@ -277,7 +339,7 @@ func (h *Handle) Get(ctx context.Context, g *Generation, arg Arg) (interface{}, return nil, ctx.Err() } h.mu.Lock() - if _, ok := h.generations[g]; !ok { + if !h.hasRefLocked(g) { h.mu.Unlock() err := fmt.Errorf("reading key %#v: generation %v is not known", h.key, g.name) diff --git a/internal/memoize/memoize_test.go b/internal/memoize/memoize_test.go index ee0fd23ea1d..bffbfc2f6b3 100644 --- a/internal/memoize/memoize_test.go +++ b/internal/memoize/memoize_test.go @@ -106,3 +106,58 @@ func TestCleanup(t *testing.T) { t.Error("after destroying g2, v2 is not cleaned up") } } + +func TestHandleRefCounting(t *testing.T) { + s := &memoize.Store{} + g1 := s.Generation("g1") + v1 := false + v2 := false + cleanup := func(v interface{}) { + *(v.(*bool)) = true + } + h1, release1 := g1.GetHandle("key1", func(context.Context, memoize.Arg) interface{} { + return &v1 + }, nil) + h2, release2 := g1.GetHandle("key2", func(context.Context, memoize.Arg) interface{} { + return &v2 + }, cleanup) + expectGet(t, h1, g1, &v1) + expectGet(t, h2, g1, &v2) + + g2 := s.Generation("g2") + expectGet(t, h1, g2, &v1) + g1.Destroy("by test") + expectGet(t, h2, g2, &v2) + + h2Copy, release2Copy := g2.GetHandle("key2", func(context.Context, memoize.Arg) interface{} { + return &v1 + }, nil) + if h2 != h2Copy { + t.Error("NewHandle returned a new value while old is not destroyed yet") + } + expectGet(t, h2Copy, g2, &v2) + g2.Destroy("by test") + + release2() + if got, want := v2, false; got != want { + t.Error("after destroying first v2 ref, v2 is cleaned up") + } + release2Copy() + if got, want := v2, true; got != want { + t.Error("after destroying second v2 ref, v2 is not cleaned up") + } + if got, want := v1, false; got != want { + t.Error("after destroying v2, v1 is cleaned up") + } + release1() + + g3 := s.Generation("g3") + h2Copy, release2Copy = g3.GetHandle("key2", func(context.Context, memoize.Arg) interface{} { + return &v2 + }, cleanup) + if h2 == h2Copy { + t.Error("NewHandle returned previously destroyed value") + } + release2Copy() + g3.Destroy("by test") +} diff --git a/internal/persistent/map.go b/internal/persistent/map.go new file mode 100644 index 00000000000..bbcb72b6ee9 --- /dev/null +++ b/internal/persistent/map.go @@ -0,0 +1,268 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The persistent package defines various persistent data structures; +// that is, data structures that can be efficiently copied and modified +// in sublinear time. +package persistent + +import ( + "math/rand" + "sync/atomic" +) + +// Implementation details: +// * Each value is reference counted by nodes which hold it. +// * Each node is reference counted by its parent nodes. +// * Each map is considered a top-level parent node from reference counting perspective. +// * Each change does always effectivelly produce a new top level node. +// +// Functions which operate directly with nodes do have a notation in form of +// `foo(arg1:+n1, arg2:+n2) (ret1:+n3)`. +// Each argument is followed by a delta change to its reference counter. +// In case if no change is expected, the delta will be `-0`. + +// Map is an associative mapping from keys to values, both represented as +// interface{}. Key comparison and iteration order is defined by a +// client-provided function that implements a strict weak order. +// +// Maps can be Cloned in constant time. +// +// Values are reference counted, and a client-supplied release function +// is called when a value is no longer referenced by a map or any clone. +// +// Internally the implementation is based on a randomized persistent treap: +// https://en.wikipedia.org/wiki/Treap. +type Map struct { + less func(a, b interface{}) bool + root *mapNode +} + +type mapNode struct { + key interface{} + value *refValue + weight uint64 + refCount int32 + left, right *mapNode +} + +type refValue struct { + refCount int32 + value interface{} + release func(key, value interface{}) +} + +func newNodeWithRef(key, value interface{}, release func(key, value interface{})) *mapNode { + return &mapNode{ + key: key, + value: &refValue{ + value: value, + release: release, + refCount: 1, + }, + refCount: 1, + weight: rand.Uint64(), + } +} + +func (node *mapNode) shallowCloneWithRef() *mapNode { + atomic.AddInt32(&node.value.refCount, 1) + return &mapNode{ + key: node.key, + value: node.value, + weight: node.weight, + refCount: 1, + } +} + +func (node *mapNode) incref() *mapNode { + if node != nil { + atomic.AddInt32(&node.refCount, 1) + } + return node +} + +func (node *mapNode) decref() { + if node == nil { + return + } + if atomic.AddInt32(&node.refCount, -1) == 0 { + if atomic.AddInt32(&node.value.refCount, -1) == 0 { + if node.value.release != nil { + node.value.release(node.key, node.value.value) + } + node.value.value = nil + node.value.release = nil + } + node.left.decref() + node.right.decref() + } +} + +// NewMap returns a new map whose keys are ordered by the given comparison +// function (a strict weak order). It is the responsibility of the caller to +// Destroy it at later time. +func NewMap(less func(a, b interface{}) bool) *Map { + return &Map{ + less: less, + } +} + +// Clone returns a copy of the given map. It is a responsibility of the caller +// to Destroy it at later time. +func (pm *Map) Clone() *Map { + return &Map{ + less: pm.less, + root: pm.root.incref(), + } +} + +// Destroy the persistent map. +// +// After Destroy, the Map should not be used again. +func (pm *Map) Destroy() { + pm.root.decref() + pm.root = nil +} + +// Range calls f sequentially in ascending key order for all entries in the map. +func (pm *Map) Range(f func(key, value interface{})) { + pm.root.forEach(f) +} + +func (node *mapNode) forEach(f func(key, value interface{})) { + if node == nil { + return + } + node.left.forEach(f) + f(node.key, node.value.value) + node.right.forEach(f) +} + +// Load returns the value stored in the map for a key, or nil if no entry is +// present. The ok result indicates whether an entry was found in the map. +func (pm *Map) Load(key interface{}) (interface{}, bool) { + node := pm.root + for node != nil { + if pm.less(key, node.key) { + node = node.left + } else if pm.less(node.key, key) { + node = node.right + } else { + return node.value.value, true + } + } + return nil, false +} + +// Store sets the value for a key. +// If release is non-nil, it will be called with entry's key and value once the +// key is no longer contained in the map or any clone. +func (pm *Map) Store(key, value interface{}, release func(key, value interface{})) { + first := pm.root + second := newNodeWithRef(key, value, release) + pm.root = union(first, second, pm.less, true) + first.decref() + second.decref() +} + +// union returns a new tree which is a union of first and second one. +// If overwrite is set to true, second one would override a value for any duplicate keys. +// +// union(first:-0, second:-0) (result:+1) +// Union borrows both subtrees without affecting their refcount and returns a +// new reference that the caller is expected to call decref. +func union(first, second *mapNode, less func(a, b interface{}) bool, overwrite bool) *mapNode { + if first == nil { + return second.incref() + } + if second == nil { + return first.incref() + } + + if first.weight < second.weight { + second, first, overwrite = first, second, !overwrite + } + + left, mid, right := split(second, first.key, less) + var result *mapNode + if overwrite && mid != nil { + result = mid.shallowCloneWithRef() + } else { + result = first.shallowCloneWithRef() + } + result.weight = first.weight + result.left = union(first.left, left, less, overwrite) + result.right = union(first.right, right, less, overwrite) + left.decref() + mid.decref() + right.decref() + return result +} + +// split the tree midway by the key into three different ones. +// Return three new trees: left with all nodes with smaller than key, mid with +// the node matching the key, right with all nodes larger than key. +// If there are no nodes in one of trees, return nil instead of it. +// +// split(n:-0) (left:+1, mid:+1, right:+1) +// Split borrows n without affecting its refcount, and returns three +// new references that that caller is expected to call decref. +func split(n *mapNode, key interface{}, less func(a, b interface{}) bool) (left, mid, right *mapNode) { + if n == nil { + return nil, nil, nil + } + + if less(n.key, key) { + left, mid, right := split(n.right, key, less) + newN := n.shallowCloneWithRef() + newN.left = n.left.incref() + newN.right = left + return newN, mid, right + } else if less(key, n.key) { + left, mid, right := split(n.left, key, less) + newN := n.shallowCloneWithRef() + newN.left = right + newN.right = n.right.incref() + return left, mid, newN + } + mid = n.shallowCloneWithRef() + return n.left.incref(), mid, n.right.incref() +} + +// Delete deletes the value for a key. +func (pm *Map) Delete(key interface{}) { + root := pm.root + left, mid, right := split(root, key, pm.less) + pm.root = merge(left, right) + left.decref() + mid.decref() + right.decref() + root.decref() +} + +// merge two trees while preserving the weight invariant. +// All nodes in left must have smaller keys than any node in right. +// +// merge(left:-0, right:-0) (result:+1) +// Merge borrows its arguments without affecting their refcount +// and returns a new reference that the caller is expected to call decref. +func merge(left, right *mapNode) *mapNode { + switch { + case left == nil: + return right.incref() + case right == nil: + return left.incref() + case left.weight > right.weight: + root := left.shallowCloneWithRef() + root.left = left.left.incref() + root.right = merge(left.right, right) + return root + default: + root := right.shallowCloneWithRef() + root.left = merge(left, right.left) + root.right = right.right.incref() + return root + } +} diff --git a/internal/persistent/map_test.go b/internal/persistent/map_test.go new file mode 100644 index 00000000000..9585956100b --- /dev/null +++ b/internal/persistent/map_test.go @@ -0,0 +1,316 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package persistent + +import ( + "fmt" + "math/rand" + "reflect" + "sync/atomic" + "testing" +) + +type mapEntry struct { + key int + value int +} + +type validatedMap struct { + impl *Map + expected map[int]int + deleted map[mapEntry]struct{} + seen map[mapEntry]struct{} +} + +func TestSimpleMap(t *testing.T) { + deletedEntries := make(map[mapEntry]struct{}) + seenEntries := make(map[mapEntry]struct{}) + + m1 := &validatedMap{ + impl: NewMap(func(a, b interface{}) bool { + return a.(int) < b.(int) + }), + expected: make(map[int]int), + deleted: deletedEntries, + seen: seenEntries, + } + + m3 := m1.clone() + validateRef(t, m1, m3) + m3.insert(t, 8, 8) + validateRef(t, m1, m3) + m3.destroy() + + assertSameMap(t, deletedEntries, map[mapEntry]struct{}{ + {key: 8, value: 8}: {}, + }) + + validateRef(t, m1) + m1.insert(t, 1, 1) + validateRef(t, m1) + m1.insert(t, 2, 2) + validateRef(t, m1) + m1.insert(t, 3, 3) + validateRef(t, m1) + m1.remove(t, 2) + validateRef(t, m1) + m1.insert(t, 6, 6) + validateRef(t, m1) + + assertSameMap(t, deletedEntries, map[mapEntry]struct{}{ + {key: 2, value: 2}: {}, + {key: 8, value: 8}: {}, + }) + + m2 := m1.clone() + validateRef(t, m1, m2) + m1.insert(t, 6, 60) + validateRef(t, m1, m2) + m1.remove(t, 1) + validateRef(t, m1, m2) + + for i := 10; i < 14; i++ { + m1.insert(t, i, i) + validateRef(t, m1, m2) + } + + m1.insert(t, 10, 100) + validateRef(t, m1, m2) + + m1.remove(t, 12) + validateRef(t, m1, m2) + + m2.insert(t, 4, 4) + validateRef(t, m1, m2) + m2.insert(t, 5, 5) + validateRef(t, m1, m2) + + m1.destroy() + + assertSameMap(t, deletedEntries, map[mapEntry]struct{}{ + {key: 2, value: 2}: {}, + {key: 6, value: 60}: {}, + {key: 8, value: 8}: {}, + {key: 10, value: 10}: {}, + {key: 10, value: 100}: {}, + {key: 11, value: 11}: {}, + {key: 12, value: 12}: {}, + {key: 13, value: 13}: {}, + }) + + m2.insert(t, 7, 7) + validateRef(t, m2) + + m2.destroy() + + assertSameMap(t, seenEntries, deletedEntries) +} + +func TestRandomMap(t *testing.T) { + deletedEntries := make(map[mapEntry]struct{}) + seenEntries := make(map[mapEntry]struct{}) + + m := &validatedMap{ + impl: NewMap(func(a, b interface{}) bool { + return a.(int) < b.(int) + }), + expected: make(map[int]int), + deleted: deletedEntries, + seen: seenEntries, + } + + keys := make([]int, 0, 1000) + for i := 0; i < 1000; i++ { + key := rand.Int() + m.insert(t, key, key) + keys = append(keys, key) + + if i%10 == 1 { + index := rand.Intn(len(keys)) + last := len(keys) - 1 + key = keys[index] + keys[index], keys[last] = keys[last], keys[index] + keys = keys[:last] + + m.remove(t, key) + } + } + + m.destroy() + assertSameMap(t, seenEntries, deletedEntries) +} + +func (vm *validatedMap) onDelete(t *testing.T, key, value int) { + entry := mapEntry{key: key, value: value} + if _, ok := vm.deleted[entry]; ok { + t.Fatalf("tried to delete entry twice, key: %d, value: %d", key, value) + } + vm.deleted[entry] = struct{}{} +} + +func validateRef(t *testing.T, maps ...*validatedMap) { + t.Helper() + + actualCountByEntry := make(map[mapEntry]int32) + nodesByEntry := make(map[mapEntry]map[*mapNode]struct{}) + expectedCountByEntry := make(map[mapEntry]int32) + for i, m := range maps { + dfsRef(m.impl.root, actualCountByEntry, nodesByEntry) + dumpMap(t, fmt.Sprintf("%d:", i), m.impl.root) + } + for entry, nodes := range nodesByEntry { + expectedCountByEntry[entry] = int32(len(nodes)) + } + assertSameMap(t, expectedCountByEntry, actualCountByEntry) +} + +func dfsRef(node *mapNode, countByEntry map[mapEntry]int32, nodesByEntry map[mapEntry]map[*mapNode]struct{}) { + if node == nil { + return + } + + entry := mapEntry{key: node.key.(int), value: node.value.value.(int)} + countByEntry[entry] = atomic.LoadInt32(&node.value.refCount) + + nodes, ok := nodesByEntry[entry] + if !ok { + nodes = make(map[*mapNode]struct{}) + nodesByEntry[entry] = nodes + } + nodes[node] = struct{}{} + + dfsRef(node.left, countByEntry, nodesByEntry) + dfsRef(node.right, countByEntry, nodesByEntry) +} + +func dumpMap(t *testing.T, prefix string, n *mapNode) { + if n == nil { + t.Logf("%s nil", prefix) + return + } + t.Logf("%s {key: %v, value: %v (ref: %v), ref: %v, weight: %v}", prefix, n.key, n.value.value, n.value.refCount, n.refCount, n.weight) + dumpMap(t, prefix+"l", n.left) + dumpMap(t, prefix+"r", n.right) +} + +func (vm *validatedMap) validate(t *testing.T) { + t.Helper() + + validateNode(t, vm.impl.root, vm.impl.less) + + for key, value := range vm.expected { + entry := mapEntry{key: key, value: value} + if _, ok := vm.deleted[entry]; ok { + t.Fatalf("entry is deleted prematurely, key: %d, value: %d", key, value) + } + } + + actualMap := make(map[int]int, len(vm.expected)) + vm.impl.Range(func(key, value interface{}) { + if other, ok := actualMap[key.(int)]; ok { + t.Fatalf("key is present twice, key: %d, first value: %d, second value: %d", key, value, other) + } + actualMap[key.(int)] = value.(int) + }) + + assertSameMap(t, actualMap, vm.expected) +} + +func validateNode(t *testing.T, node *mapNode, less func(a, b interface{}) bool) { + if node == nil { + return + } + + if node.left != nil { + if less(node.key, node.left.key) { + t.Fatalf("left child has larger key: %v vs %v", node.left.key, node.key) + } + if node.left.weight > node.weight { + t.Fatalf("left child has larger weight: %v vs %v", node.left.weight, node.weight) + } + } + + if node.right != nil { + if less(node.right.key, node.key) { + t.Fatalf("right child has smaller key: %v vs %v", node.right.key, node.key) + } + if node.right.weight > node.weight { + t.Fatalf("right child has larger weight: %v vs %v", node.right.weight, node.weight) + } + } + + validateNode(t, node.left, less) + validateNode(t, node.right, less) +} + +func (vm *validatedMap) insert(t *testing.T, key, value int) { + vm.seen[mapEntry{key: key, value: value}] = struct{}{} + vm.impl.Store(key, value, func(deletedKey, deletedValue interface{}) { + if deletedKey != key || deletedValue != value { + t.Fatalf("unexpected passed in deleted entry: %v/%v, expected: %v/%v", deletedKey, deletedValue, key, value) + } + vm.onDelete(t, key, value) + }) + vm.expected[key] = value + vm.validate(t) + + loadValue, ok := vm.impl.Load(key) + if !ok || loadValue != value { + t.Fatalf("unexpected load result after insertion, key: %v, expected: %v, got: %v (%v)", key, value, loadValue, ok) + } +} + +func (vm *validatedMap) remove(t *testing.T, key int) { + vm.impl.Delete(key) + delete(vm.expected, key) + vm.validate(t) + + loadValue, ok := vm.impl.Load(key) + if ok { + t.Fatalf("unexpected load result after removal, key: %v, got: %v", key, loadValue) + } +} + +func (vm *validatedMap) clone() *validatedMap { + expected := make(map[int]int, len(vm.expected)) + for key, value := range vm.expected { + expected[key] = value + } + + return &validatedMap{ + impl: vm.impl.Clone(), + expected: expected, + deleted: vm.deleted, + seen: vm.seen, + } +} + +func (vm *validatedMap) destroy() { + vm.impl.Destroy() +} + +func assertSameMap(t *testing.T, map1, map2 interface{}) { + t.Helper() + + if !reflect.DeepEqual(map1, map2) { + t.Fatalf("different maps:\n%v\nvs\n%v", map1, map2) + } +} + +func isSameMap(map1, map2 reflect.Value) bool { + if map1.Len() != map2.Len() { + return false + } + iter := map1.MapRange() + for iter.Next() { + key := iter.Key() + value1 := iter.Value() + value2 := map2.MapIndex(key) + if value2.IsZero() || !reflect.DeepEqual(value1.Interface(), value2.Interface()) { + return false + } + } + return true +} From 3f5f798e2a0fc1b948638897b74eaa09eb0567ca Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Sun, 12 Jun 2022 03:38:32 +0000 Subject: [PATCH 042/136] internal/lsp/cache: use persistent map for storing files in the snapshot This on average reduces latency from 34ms to 25ms on internal codebase. Updates golang/go#45686 Change-Id: I57b05e5679620d8481b1f1a051645cf1cc00aca5 Reviewed-on: https://go-review.googlesource.com/c/tools/+/413654 TryBot-Result: Gopher Robot Reviewed-by: Robert Findley gopls-CI: kokoro Reviewed-by: Alan Donovan Run-TryBot: Alan Donovan --- internal/lsp/cache/maps.go | 81 ++++++++++++++++++++++++++-------- internal/lsp/cache/session.go | 2 +- internal/lsp/cache/snapshot.go | 75 ++++++++++++++++--------------- 3 files changed, 101 insertions(+), 57 deletions(-) diff --git a/internal/lsp/cache/maps.go b/internal/lsp/cache/maps.go index 70f8039bdac..cad4465db89 100644 --- a/internal/lsp/cache/maps.go +++ b/internal/lsp/cache/maps.go @@ -5,18 +5,63 @@ package cache import ( + "golang.org/x/tools/internal/lsp/source" "golang.org/x/tools/internal/persistent" "golang.org/x/tools/internal/span" ) // TODO(euroelessar): Use generics once support for go1.17 is dropped. +type filesMap struct { + impl *persistent.Map +} + +func newFilesMap() filesMap { + return filesMap{ + impl: persistent.NewMap(func(a, b interface{}) bool { + return a.(span.URI) < b.(span.URI) + }), + } +} + +func (m filesMap) Clone() filesMap { + return filesMap{ + impl: m.impl.Clone(), + } +} + +func (m filesMap) Destroy() { + m.impl.Destroy() +} + +func (m filesMap) Load(key span.URI) (source.VersionedFileHandle, bool) { + value, ok := m.impl.Load(key) + if !ok { + return nil, false + } + return value.(source.VersionedFileHandle), true +} + +func (m filesMap) Range(do func(key span.URI, value source.VersionedFileHandle)) { + m.impl.Range(func(key, value interface{}) { + do(key.(span.URI), value.(source.VersionedFileHandle)) + }) +} + +func (m filesMap) Store(key span.URI, value source.VersionedFileHandle) { + m.impl.Store(key, value, nil) +} + +func (m filesMap) Delete(key span.URI) { + m.impl.Delete(key) +} + type goFilesMap struct { impl *persistent.Map } -func newGoFilesMap() *goFilesMap { - return &goFilesMap{ +func newGoFilesMap() goFilesMap { + return goFilesMap{ impl: persistent.NewMap(func(a, b interface{}) bool { return parseKeyLess(a.(parseKey), b.(parseKey)) }), @@ -33,17 +78,17 @@ func parseKeyLess(a, b parseKey) bool { return a.file.URI < b.file.URI } -func (m *goFilesMap) Clone() *goFilesMap { - return &goFilesMap{ +func (m goFilesMap) Clone() goFilesMap { + return goFilesMap{ impl: m.impl.Clone(), } } -func (m *goFilesMap) Destroy() { +func (m goFilesMap) Destroy() { m.impl.Destroy() } -func (m *goFilesMap) Load(key parseKey) (*parseGoHandle, bool) { +func (m goFilesMap) Load(key parseKey) (*parseGoHandle, bool) { value, ok := m.impl.Load(key) if !ok { return nil, false @@ -51,19 +96,19 @@ func (m *goFilesMap) Load(key parseKey) (*parseGoHandle, bool) { return value.(*parseGoHandle), true } -func (m *goFilesMap) Range(do func(key parseKey, value *parseGoHandle)) { +func (m goFilesMap) Range(do func(key parseKey, value *parseGoHandle)) { m.impl.Range(func(key, value interface{}) { do(key.(parseKey), value.(*parseGoHandle)) }) } -func (m *goFilesMap) Store(key parseKey, value *parseGoHandle, release func()) { +func (m goFilesMap) Store(key parseKey, value *parseGoHandle, release func()) { m.impl.Store(key, value, func(key, value interface{}) { release() }) } -func (m *goFilesMap) Delete(key parseKey) { +func (m goFilesMap) Delete(key parseKey) { m.impl.Delete(key) } @@ -71,25 +116,25 @@ type parseKeysByURIMap struct { impl *persistent.Map } -func newParseKeysByURIMap() *parseKeysByURIMap { - return &parseKeysByURIMap{ +func newParseKeysByURIMap() parseKeysByURIMap { + return parseKeysByURIMap{ impl: persistent.NewMap(func(a, b interface{}) bool { return a.(span.URI) < b.(span.URI) }), } } -func (m *parseKeysByURIMap) Clone() *parseKeysByURIMap { - return &parseKeysByURIMap{ +func (m parseKeysByURIMap) Clone() parseKeysByURIMap { + return parseKeysByURIMap{ impl: m.impl.Clone(), } } -func (m *parseKeysByURIMap) Destroy() { +func (m parseKeysByURIMap) Destroy() { m.impl.Destroy() } -func (m *parseKeysByURIMap) Load(key span.URI) ([]parseKey, bool) { +func (m parseKeysByURIMap) Load(key span.URI) ([]parseKey, bool) { value, ok := m.impl.Load(key) if !ok { return nil, false @@ -97,16 +142,16 @@ func (m *parseKeysByURIMap) Load(key span.URI) ([]parseKey, bool) { return value.([]parseKey), true } -func (m *parseKeysByURIMap) Range(do func(key span.URI, value []parseKey)) { +func (m parseKeysByURIMap) Range(do func(key span.URI, value []parseKey)) { m.impl.Range(func(key, value interface{}) { do(key.(span.URI), value.([]parseKey)) }) } -func (m *parseKeysByURIMap) Store(key span.URI, value []parseKey) { +func (m parseKeysByURIMap) Store(key span.URI, value []parseKey) { m.impl.Store(key, value, nil) } -func (m *parseKeysByURIMap) Delete(key span.URI) { +func (m parseKeysByURIMap) Delete(key span.URI) { m.impl.Delete(key) } diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 7dbccf7f6ed..4a7a5b2f4a6 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -233,7 +233,7 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, generation: s.cache.store.Generation(generationName(v, 0)), packages: make(map[packageKey]*packageHandle), meta: &metadataGraph{}, - files: make(map[span.URI]source.VersionedFileHandle), + files: newFilesMap(), goFiles: newGoFilesMap(), parseKeysByURI: newParseKeysByURIMap(), symbols: make(map[span.URI]*symbolHandle), diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index b2ac78208d7..60cf4167ec9 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -74,11 +74,11 @@ type snapshot struct { // files maps file URIs to their corresponding FileHandles. // It may invalidated when a file's content changes. - files map[span.URI]source.VersionedFileHandle + files filesMap // goFiles maps a parseKey to its parseGoHandle. - goFiles *goFilesMap - parseKeysByURI *parseKeysByURIMap + goFiles goFilesMap + parseKeysByURI parseKeysByURIMap // TODO(rfindley): consider merging this with files to reduce burden on clone. symbols map[span.URI]*symbolHandle @@ -136,6 +136,7 @@ type actionKey struct { func (s *snapshot) Destroy(destroyedBy string) { s.generation.Destroy(destroyedBy) + s.files.Destroy() s.goFiles.Destroy() s.parseKeysByURI.Destroy() } @@ -173,11 +174,11 @@ func (s *snapshot) Templates() map[span.URI]source.VersionedFileHandle { defer s.mu.Unlock() tmpls := map[span.URI]source.VersionedFileHandle{} - for k, fh := range s.files { + s.files.Range(func(k span.URI, fh source.VersionedFileHandle) { if s.view.FileKind(fh) == source.Tmpl { tmpls[k] = fh } - } + }) return tmpls } @@ -461,27 +462,27 @@ func (s *snapshot) buildOverlay() map[string][]byte { defer s.mu.Unlock() overlays := make(map[string][]byte) - for uri, fh := range s.files { + s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) { overlay, ok := fh.(*overlay) if !ok { - continue + return } if overlay.saved { - continue + return } // TODO(rstambler): Make sure not to send overlays outside of the current view. overlays[uri.Filename()] = overlay.text - } + }) return overlays } -func hashUnsavedOverlays(files map[span.URI]source.VersionedFileHandle) source.Hash { +func hashUnsavedOverlays(files filesMap) source.Hash { var unsaved []string - for uri, fh := range files { + files.Range(func(uri span.URI, fh source.VersionedFileHandle) { if overlay, ok := fh.(*overlay); ok && !overlay.saved { unsaved = append(unsaved, uri.Filename()) } - } + }) sort.Strings(unsaved) return source.Hashf("%s", unsaved) } @@ -869,9 +870,9 @@ func (s *snapshot) collectAllKnownSubdirs(ctx context.Context) { s.knownSubdirs = map[span.URI]struct{}{} s.knownSubdirsPatternCache = "" - for uri := range s.files { + s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) { s.addKnownSubdirLocked(uri, dirs) - } + }) } func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) []span.URI { @@ -957,11 +958,11 @@ func (s *snapshot) knownFilesInDir(ctx context.Context, dir span.URI) []span.URI s.mu.Lock() defer s.mu.Unlock() - for uri := range s.files { + s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) { if source.InDir(dir.Filename(), uri.Filename()) { files = append(files, uri) } - } + }) return files } @@ -1020,8 +1021,7 @@ func (s *snapshot) Symbols(ctx context.Context) map[span.URI][]source.Symbol { resultMu sync.Mutex result = make(map[span.URI][]source.Symbol) ) - for uri, f := range s.files { - uri, f := uri, f + s.files.Range(func(uri span.URI, f source.VersionedFileHandle) { // TODO(adonovan): upgrade errgroup and use group.SetLimit(nprocs). iolimit <- struct{}{} // acquire token group.Go(func() error { @@ -1035,7 +1035,7 @@ func (s *snapshot) Symbols(ctx context.Context) map[span.URI][]source.Symbol { resultMu.Unlock() return nil }) - } + }) // Keep going on errors, but log the first failure. // Partial results are better than no symbol results. if err := group.Wait(); err != nil { @@ -1326,7 +1326,8 @@ func (s *snapshot) FindFile(uri span.URI) source.VersionedFileHandle { s.mu.Lock() defer s.mu.Unlock() - return s.files[f.URI()] + result, _ := s.files.Load(f.URI()) + return result } // GetVersionedFile returns a File for the given URI. If the file is unknown it @@ -1348,7 +1349,7 @@ func (s *snapshot) GetFile(ctx context.Context, uri span.URI) (source.FileHandle } func (s *snapshot) getFileLocked(ctx context.Context, f *fileBase) (source.VersionedFileHandle, error) { - if fh, ok := s.files[f.URI()]; ok { + if fh, ok := s.files.Load(f.URI()); ok { return fh, nil } @@ -1357,7 +1358,7 @@ func (s *snapshot) getFileLocked(ctx context.Context, f *fileBase) (source.Versi return nil, err } closed := &closedFile{fh} - s.files[f.URI()] = closed + s.files.Store(f.URI(), closed) return closed, nil } @@ -1373,16 +1374,17 @@ func (s *snapshot) openFiles() []source.VersionedFileHandle { defer s.mu.Unlock() var open []source.VersionedFileHandle - for _, fh := range s.files { + s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) { if s.isOpenLocked(fh.URI()) { open = append(open, fh) } - } + }) return open } func (s *snapshot) isOpenLocked(uri span.URI) bool { - _, open := s.files[uri].(*overlay) + fh, _ := s.files.Load(uri) + _, open := fh.(*overlay) return open } @@ -1610,29 +1612,29 @@ func (s *snapshot) orphanedFiles() []source.VersionedFileHandle { defer s.mu.Unlock() var files []source.VersionedFileHandle - for uri, fh := range s.files { + s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) { // Don't try to reload metadata for go.mod files. if s.view.FileKind(fh) != source.Go { - continue + return } // If the URI doesn't belong to this view, then it's not in a workspace // package and should not be reloaded directly. if !contains(s.view.session.viewsOf(uri), s.view) { - continue + return } // If the file is not open and is in a vendor directory, don't treat it // like a workspace package. if _, ok := fh.(*overlay); !ok && inVendor(uri) { - continue + return } // Don't reload metadata for files we've already deemed unloadable. if _, ok := s.unloadableFiles[uri]; ok { - continue + return } if s.noValidMetadataForURILocked(uri) { files = append(files, fh) } - } + }) return files } @@ -1701,7 +1703,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC initializedErr: s.initializedErr, packages: make(map[packageKey]*packageHandle, len(s.packages)), actions: make(map[actionKey]*actionHandle, len(s.actions)), - files: make(map[span.URI]source.VersionedFileHandle, len(s.files)), + files: s.files.Clone(), goFiles: s.goFiles.Clone(), parseKeysByURI: s.parseKeysByURI.Clone(), symbols: make(map[span.URI]*symbolHandle, len(s.symbols)), @@ -1721,9 +1723,6 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } // Copy all of the FileHandles. - for k, v := range s.files { - result.files[k] = v - } for k, v := range s.symbols { if change, ok := changes[k]; ok { if change.exists { @@ -1807,7 +1806,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } // The original FileHandle for this URI is cached on the snapshot. - originalFH := s.files[uri] + originalFH, _ := s.files.Load(uri) var originalOpen, newOpen bool _, originalOpen = originalFH.(*overlay) _, newOpen = change.fileHandle.(*overlay) @@ -1852,9 +1851,9 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC delete(result.parseWorkHandles, uri) // Handle the invalidated file; it may have new contents or not exist. if !change.exists { - delete(result.files, uri) + result.files.Delete(uri) } else { - result.files[uri] = change.fileHandle + result.files.Store(uri, change.fileHandle) } // Make sure to remove the changed file from the unloadable set. From 22ab2538d44ec522fbfca114ab0cdc436e9eddd5 Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Thu, 23 Jun 2022 13:38:50 -0400 Subject: [PATCH 043/136] internal/lsp: rename viewport to range The final LSP spec for 3.17 changed the name of ViewPort to Range for both InlayHints and InlineValues. This manually updates just these fields in our protocol. Change-Id: I0303a36536016ca59c87dc45f55fadcd80e72bfc Reviewed-on: https://go-review.googlesource.com/c/tools/+/413677 Reviewed-by: Robert Findley Run-TryBot: Suzy Mueller TryBot-Result: Gopher Robot gopls-CI: kokoro --- internal/lsp/inlay_hint.go | 2 +- internal/lsp/lsp_test.go | 2 +- internal/lsp/protocol/tsprotocol.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/lsp/inlay_hint.go b/internal/lsp/inlay_hint.go index b2fd028d728..8d8a419c235 100644 --- a/internal/lsp/inlay_hint.go +++ b/internal/lsp/inlay_hint.go @@ -17,5 +17,5 @@ func (s *Server) inlayHint(ctx context.Context, params *protocol.InlayHintParams if !ok { return nil, err } - return source.InlayHint(ctx, snapshot, fh, params.ViewPort) + return source.InlayHint(ctx, snapshot, fh, params.Range) } diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go index 2ec833b860e..e8febec93f3 100644 --- a/internal/lsp/lsp_test.go +++ b/internal/lsp/lsp_test.go @@ -943,7 +943,7 @@ func (r *runner) InlayHints(t *testing.T, spn span.Span) { TextDocument: protocol.TextDocumentIdentifier{ URI: protocol.URIFromSpanURI(uri), }, - // TODO: add ViewPort + // TODO: add Range }) if err != nil { t.Fatal(err) diff --git a/internal/lsp/protocol/tsprotocol.go b/internal/lsp/protocol/tsprotocol.go index 647aabc2ee1..5dd3d09e188 100644 --- a/internal/lsp/protocol/tsprotocol.go +++ b/internal/lsp/protocol/tsprotocol.go @@ -2866,7 +2866,7 @@ type InlayHintParams struct { /** * The visible document range for which inlay hints should be computed. */ - ViewPort Range `json:"viewPort"` + Range Range `json:"range"` } /** @@ -2988,7 +2988,7 @@ type InlineValueParams struct { /** * The visible document range for which inline values should be computed. */ - ViewPort Range `json:"viewPort"` + Range Range `json:"range"` /** * Additional information about the context in which inline values were * requested. From 2994e99415f52ae83985b566c317a726c14ca8ab Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Thu, 23 Jun 2022 22:40:01 +0000 Subject: [PATCH 044/136] internal/persistent: change map to use set/get as method names Purely a style change, no expected behavior difference. Change-Id: Ib882eb54537126b31d20dde65c4a517d5452a8b0 Reviewed-on: https://go-review.googlesource.com/c/tools/+/413661 gopls-CI: kokoro Reviewed-by: Alan Donovan Run-TryBot: Robert Findley Reviewed-by: Robert Findley TryBot-Result: Gopher Robot --- internal/lsp/cache/maps.go | 24 ++++++++++----------- internal/lsp/cache/snapshot.go | 26 +++++++++++----------- internal/persistent/map.go | 10 ++++----- internal/persistent/map_test.go | 38 ++++++++++++++++----------------- 4 files changed, 49 insertions(+), 49 deletions(-) diff --git a/internal/lsp/cache/maps.go b/internal/lsp/cache/maps.go index cad4465db89..91b0e77e87e 100644 --- a/internal/lsp/cache/maps.go +++ b/internal/lsp/cache/maps.go @@ -34,8 +34,8 @@ func (m filesMap) Destroy() { m.impl.Destroy() } -func (m filesMap) Load(key span.URI) (source.VersionedFileHandle, bool) { - value, ok := m.impl.Load(key) +func (m filesMap) Get(key span.URI) (source.VersionedFileHandle, bool) { + value, ok := m.impl.Get(key) if !ok { return nil, false } @@ -48,8 +48,8 @@ func (m filesMap) Range(do func(key span.URI, value source.VersionedFileHandle)) }) } -func (m filesMap) Store(key span.URI, value source.VersionedFileHandle) { - m.impl.Store(key, value, nil) +func (m filesMap) Set(key span.URI, value source.VersionedFileHandle) { + m.impl.Set(key, value, nil) } func (m filesMap) Delete(key span.URI) { @@ -88,8 +88,8 @@ func (m goFilesMap) Destroy() { m.impl.Destroy() } -func (m goFilesMap) Load(key parseKey) (*parseGoHandle, bool) { - value, ok := m.impl.Load(key) +func (m goFilesMap) Get(key parseKey) (*parseGoHandle, bool) { + value, ok := m.impl.Get(key) if !ok { return nil, false } @@ -102,8 +102,8 @@ func (m goFilesMap) Range(do func(key parseKey, value *parseGoHandle)) { }) } -func (m goFilesMap) Store(key parseKey, value *parseGoHandle, release func()) { - m.impl.Store(key, value, func(key, value interface{}) { +func (m goFilesMap) Set(key parseKey, value *parseGoHandle, release func()) { + m.impl.Set(key, value, func(key, value interface{}) { release() }) } @@ -134,8 +134,8 @@ func (m parseKeysByURIMap) Destroy() { m.impl.Destroy() } -func (m parseKeysByURIMap) Load(key span.URI) ([]parseKey, bool) { - value, ok := m.impl.Load(key) +func (m parseKeysByURIMap) Get(key span.URI) ([]parseKey, bool) { + value, ok := m.impl.Get(key) if !ok { return nil, false } @@ -148,8 +148,8 @@ func (m parseKeysByURIMap) Range(do func(key span.URI, value []parseKey)) { }) } -func (m parseKeysByURIMap) Store(key span.URI, value []parseKey) { - m.impl.Store(key, value, nil) +func (m parseKeysByURIMap) Set(key span.URI, value []parseKey) { + m.impl.Set(key, value, nil) } func (m parseKeysByURIMap) Delete(key span.URI) { diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 60cf4167ec9..c8d60853332 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -673,7 +673,7 @@ func (s *snapshot) transitiveReverseDependencies(id PackageID, ids map[PackageID func (s *snapshot) getGoFile(key parseKey) *parseGoHandle { s.mu.Lock() defer s.mu.Unlock() - if result, ok := s.goFiles.Load(key); ok { + if result, ok := s.goFiles.Get(key); ok { return result } return nil @@ -682,14 +682,14 @@ func (s *snapshot) getGoFile(key parseKey) *parseGoHandle { func (s *snapshot) addGoFile(key parseKey, pgh *parseGoHandle, release func()) *parseGoHandle { s.mu.Lock() defer s.mu.Unlock() - if result, ok := s.goFiles.Load(key); ok { + if result, ok := s.goFiles.Get(key); ok { release() return result } - s.goFiles.Store(key, pgh, release) - keys, _ := s.parseKeysByURI.Load(key.file.URI) + s.goFiles.Set(key, pgh, release) + keys, _ := s.parseKeysByURI.Get(key.file.URI) keys = append([]parseKey{key}, keys...) - s.parseKeysByURI.Store(key.file.URI, keys) + s.parseKeysByURI.Set(key.file.URI, keys) return pgh } @@ -1326,7 +1326,7 @@ func (s *snapshot) FindFile(uri span.URI) source.VersionedFileHandle { s.mu.Lock() defer s.mu.Unlock() - result, _ := s.files.Load(f.URI()) + result, _ := s.files.Get(f.URI()) return result } @@ -1349,7 +1349,7 @@ func (s *snapshot) GetFile(ctx context.Context, uri span.URI) (source.FileHandle } func (s *snapshot) getFileLocked(ctx context.Context, f *fileBase) (source.VersionedFileHandle, error) { - if fh, ok := s.files.Load(f.URI()); ok { + if fh, ok := s.files.Get(f.URI()); ok { return fh, nil } @@ -1358,7 +1358,7 @@ func (s *snapshot) getFileLocked(ctx context.Context, f *fileBase) (source.Versi return nil, err } closed := &closedFile{fh} - s.files.Store(f.URI(), closed) + s.files.Set(f.URI(), closed) return closed, nil } @@ -1383,7 +1383,7 @@ func (s *snapshot) openFiles() []source.VersionedFileHandle { } func (s *snapshot) isOpenLocked(uri span.URI) bool { - fh, _ := s.files.Load(uri) + fh, _ := s.files.Get(uri) _, open := fh.(*overlay) return open } @@ -1748,7 +1748,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } for uri := range changes { - keys, ok := result.parseKeysByURI.Load(uri) + keys, ok := result.parseKeysByURI.Get(uri) if ok { for _, key := range keys { result.goFiles.Delete(key) @@ -1806,7 +1806,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } // The original FileHandle for this URI is cached on the snapshot. - originalFH, _ := s.files.Load(uri) + originalFH, _ := s.files.Get(uri) var originalOpen, newOpen bool _, originalOpen = originalFH.(*overlay) _, newOpen = change.fileHandle.(*overlay) @@ -1853,7 +1853,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC if !change.exists { result.files.Delete(uri) } else { - result.files.Store(uri, change.fileHandle) + result.files.Set(uri, change.fileHandle) } // Make sure to remove the changed file from the unloadable set. @@ -2197,7 +2197,7 @@ func metadataChanges(ctx context.Context, lockedSnapshot *snapshot, oldFH, newFH // lockedSnapshot must be locked. func peekOrParse(ctx context.Context, lockedSnapshot *snapshot, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { key := parseKey{file: fh.FileIdentity(), mode: mode} - if pgh, ok := lockedSnapshot.goFiles.Load(key); ok { + if pgh, ok := lockedSnapshot.goFiles.Get(key); ok { cached := pgh.handle.Cached(lockedSnapshot.generation) if cached != nil { cached := cached.(*parseGoData) diff --git a/internal/persistent/map.go b/internal/persistent/map.go index bbcb72b6ee9..9c17ad09a7f 100644 --- a/internal/persistent/map.go +++ b/internal/persistent/map.go @@ -140,9 +140,9 @@ func (node *mapNode) forEach(f func(key, value interface{})) { node.right.forEach(f) } -// Load returns the value stored in the map for a key, or nil if no entry is -// present. The ok result indicates whether an entry was found in the map. -func (pm *Map) Load(key interface{}) (interface{}, bool) { +// Get returns the map value associated with the specified key, or nil if no entry +// is present. The ok result indicates whether an entry was found in the map. +func (pm *Map) Get(key interface{}) (interface{}, bool) { node := pm.root for node != nil { if pm.less(key, node.key) { @@ -156,10 +156,10 @@ func (pm *Map) Load(key interface{}) (interface{}, bool) { return nil, false } -// Store sets the value for a key. +// Set updates the value associated with the specified key. // If release is non-nil, it will be called with entry's key and value once the // key is no longer contained in the map or any clone. -func (pm *Map) Store(key, value interface{}, release func(key, value interface{})) { +func (pm *Map) Set(key, value interface{}, release func(key, value interface{})) { first := pm.root second := newNodeWithRef(key, value, release) pm.root = union(first, second, pm.less, true) diff --git a/internal/persistent/map_test.go b/internal/persistent/map_test.go index 9585956100b..059f0da4c03 100644 --- a/internal/persistent/map_test.go +++ b/internal/persistent/map_test.go @@ -39,7 +39,7 @@ func TestSimpleMap(t *testing.T) { m3 := m1.clone() validateRef(t, m1, m3) - m3.insert(t, 8, 8) + m3.set(t, 8, 8) validateRef(t, m1, m3) m3.destroy() @@ -48,15 +48,15 @@ func TestSimpleMap(t *testing.T) { }) validateRef(t, m1) - m1.insert(t, 1, 1) + m1.set(t, 1, 1) validateRef(t, m1) - m1.insert(t, 2, 2) + m1.set(t, 2, 2) validateRef(t, m1) - m1.insert(t, 3, 3) + m1.set(t, 3, 3) validateRef(t, m1) m1.remove(t, 2) validateRef(t, m1) - m1.insert(t, 6, 6) + m1.set(t, 6, 6) validateRef(t, m1) assertSameMap(t, deletedEntries, map[mapEntry]struct{}{ @@ -66,25 +66,25 @@ func TestSimpleMap(t *testing.T) { m2 := m1.clone() validateRef(t, m1, m2) - m1.insert(t, 6, 60) + m1.set(t, 6, 60) validateRef(t, m1, m2) m1.remove(t, 1) validateRef(t, m1, m2) for i := 10; i < 14; i++ { - m1.insert(t, i, i) + m1.set(t, i, i) validateRef(t, m1, m2) } - m1.insert(t, 10, 100) + m1.set(t, 10, 100) validateRef(t, m1, m2) m1.remove(t, 12) validateRef(t, m1, m2) - m2.insert(t, 4, 4) + m2.set(t, 4, 4) validateRef(t, m1, m2) - m2.insert(t, 5, 5) + m2.set(t, 5, 5) validateRef(t, m1, m2) m1.destroy() @@ -100,7 +100,7 @@ func TestSimpleMap(t *testing.T) { {key: 13, value: 13}: {}, }) - m2.insert(t, 7, 7) + m2.set(t, 7, 7) validateRef(t, m2) m2.destroy() @@ -124,7 +124,7 @@ func TestRandomMap(t *testing.T) { keys := make([]int, 0, 1000) for i := 0; i < 1000; i++ { key := rand.Int() - m.insert(t, key, key) + m.set(t, key, key) keys = append(keys, key) if i%10 == 1 { @@ -245,9 +245,9 @@ func validateNode(t *testing.T, node *mapNode, less func(a, b interface{}) bool) validateNode(t, node.right, less) } -func (vm *validatedMap) insert(t *testing.T, key, value int) { +func (vm *validatedMap) set(t *testing.T, key, value int) { vm.seen[mapEntry{key: key, value: value}] = struct{}{} - vm.impl.Store(key, value, func(deletedKey, deletedValue interface{}) { + vm.impl.Set(key, value, func(deletedKey, deletedValue interface{}) { if deletedKey != key || deletedValue != value { t.Fatalf("unexpected passed in deleted entry: %v/%v, expected: %v/%v", deletedKey, deletedValue, key, value) } @@ -256,9 +256,9 @@ func (vm *validatedMap) insert(t *testing.T, key, value int) { vm.expected[key] = value vm.validate(t) - loadValue, ok := vm.impl.Load(key) - if !ok || loadValue != value { - t.Fatalf("unexpected load result after insertion, key: %v, expected: %v, got: %v (%v)", key, value, loadValue, ok) + gotValue, ok := vm.impl.Get(key) + if !ok || gotValue != value { + t.Fatalf("unexpected get result after insertion, key: %v, expected: %v, got: %v (%v)", key, value, gotValue, ok) } } @@ -267,9 +267,9 @@ func (vm *validatedMap) remove(t *testing.T, key int) { delete(vm.expected, key) vm.validate(t) - loadValue, ok := vm.impl.Load(key) + gotValue, ok := vm.impl.Get(key) if ok { - t.Fatalf("unexpected load result after removal, key: %v, got: %v", key, loadValue) + t.Fatalf("unexpected get result after removal, key: %v, got: %v", key, gotValue) } } From 60ca6366e648a6c37776ee8ae7c756d2c2576767 Mon Sep 17 00:00:00 2001 From: Jamal Carvalho Date: Fri, 24 Jun 2022 17:11:38 +0000 Subject: [PATCH 045/136] internal/lsp: use camel case for inlay hint config fields To properly format these field names in the vscode config ui these fields should be camel case. Change-Id: I3b8b8fb6371172ecb464710f7d91b9fc67e0ed42 Reviewed-on: https://go-review.googlesource.com/c/tools/+/413684 TryBot-Result: Gopher Robot Run-TryBot: Jamal Carvalho Reviewed-by: Suzy Mueller gopls-CI: kokoro --- gopls/doc/inlayHints.md | 28 ++++++++++++++-------------- internal/lsp/source/api_json.go | 28 ++++++++++++++-------------- internal/lsp/source/inlay_hint.go | 14 +++++++------- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/gopls/doc/inlayHints.md b/gopls/doc/inlayHints.md index a4fd3e51554..15957b52ede 100644 --- a/gopls/doc/inlayHints.md +++ b/gopls/doc/inlayHints.md @@ -3,23 +3,23 @@ This document describes the inlay hints that `gopls` uses inside the editor. -## **assign_variable_types** +## **assignVariableTypes** Enable/disable inlay hints for variable types in assign statements: i/* int/*, j/* int/* := 0, len(r)-1 -**Disabled by default. Enable it by setting `"hints": {"assign_variable_types": true}`.** +**Disabled by default. Enable it by setting `"hints": {"assignVariableTypes": true}`.** -## **composite_literal_fields** +## **compositeLiteralFields** Enable/disable inlay hints for composite literal field names: {in: "Hello, world", want: "dlrow ,olleH"} -**Disabled by default. Enable it by setting `"hints": {"composite_literal_fields": true}`.** +**Disabled by default. Enable it by setting `"hints": {"compositeLiteralFields": true}`.** -## **composite_literal_types** +## **compositeLiteralTypes** Enable/disable inlay hints for composite literal types: @@ -29,9 +29,9 @@ Enable/disable inlay hints for composite literal types: /*struct{ in string; want string }*/{"Hello, world", "dlrow ,olleH"}, } -**Disabled by default. Enable it by setting `"hints": {"composite_literal_types": true}`.** +**Disabled by default. Enable it by setting `"hints": {"compositeLiteralTypes": true}`.** -## **constant_values** +## **constantValues** Enable/disable inlay hints for constant values: @@ -42,25 +42,25 @@ Enable/disable inlay hints for constant values: KindErrorf/* = 3*/ ) -**Disabled by default. Enable it by setting `"hints": {"constant_values": true}`.** +**Disabled by default. Enable it by setting `"hints": {"constantValues": true}`.** -## **function_type_parameters** +## **functionTypeParameters** Enable/disable inlay hints for implicit type parameters on generic functions: myFoo/*[int, string]*/(1, "hello") -**Disabled by default. Enable it by setting `"hints": {"function_type_parameters": true}`.** +**Disabled by default. Enable it by setting `"hints": {"functionTypeParameters": true}`.** -## **parameter_names** +## **parameterNames** Enable/disable inlay hints for parameter names: parseInt(/* str: */ "123", /* radix: */ 8) -**Disabled by default. Enable it by setting `"hints": {"parameter_names": true}`.** +**Disabled by default. Enable it by setting `"hints": {"parameterNames": true}`.** -## **range_variable_types** +## **rangeVariableTypes** Enable/disable inlay hints for variable types in range statements: @@ -68,6 +68,6 @@ Enable/disable inlay hints for variable types in range statements: fmt.Println(k, v) } -**Disabled by default. Enable it by setting `"hints": {"range_variable_types": true}`.** +**Disabled by default. Enable it by setting `"hints": {"rangeVariableTypes": true}`.** diff --git a/internal/lsp/source/api_json.go b/internal/lsp/source/api_json.go index 4188d9d06fe..ef683f31a01 100755 --- a/internal/lsp/source/api_json.go +++ b/internal/lsp/source/api_json.go @@ -511,37 +511,37 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "hints specify inlay hints that users want to see.\nA full list of hints that gopls uses can be found\n[here](https://github.com/golang/tools/blob/master/gopls/doc/inlayHints.md).\n", EnumKeys: EnumKeys{Keys: []EnumKey{ { - Name: "\"assign_variable_types\"", + Name: "\"assignVariableTypes\"", Doc: "Enable/disable inlay hints for variable types in assign statements:\n\n\ti/* int/*, j/* int/* := 0, len(r)-1", Default: "false", }, { - Name: "\"composite_literal_fields\"", + Name: "\"compositeLiteralFields\"", Doc: "Enable/disable inlay hints for composite literal field names:\n\n\t{in: \"Hello, world\", want: \"dlrow ,olleH\"}", Default: "false", }, { - Name: "\"composite_literal_types\"", + Name: "\"compositeLiteralTypes\"", Doc: "Enable/disable inlay hints for composite literal types:\n\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}", Default: "false", }, { - Name: "\"constant_values\"", + Name: "\"constantValues\"", Doc: "Enable/disable inlay hints for constant values:\n\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)", Default: "false", }, { - Name: "\"function_type_parameters\"", + Name: "\"functionTypeParameters\"", Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n\n\tmyFoo/*[int, string]*/(1, \"hello\")", Default: "false", }, { - Name: "\"parameter_names\"", + Name: "\"parameterNames\"", Doc: "Enable/disable inlay hints for parameter names:\n\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)", Default: "false", }, { - Name: "\"range_variable_types\"", + Name: "\"rangeVariableTypes\"", Doc: "Enable/disable inlay hints for variable types in range statements:\n\n\tfor k/* int*/, v/* string/* := range []string{} {\n\t\tfmt.Println(k, v)\n\t}", Default: "false", }, @@ -1026,31 +1026,31 @@ var GeneratedAPIJSON = &APIJSON{ }, Hints: []*HintJSON{ { - Name: "assign_variable_types", + Name: "assignVariableTypes", Doc: "Enable/disable inlay hints for variable types in assign statements:\n\n\ti/* int/*, j/* int/* := 0, len(r)-1", }, { - Name: "composite_literal_fields", + Name: "compositeLiteralFields", Doc: "Enable/disable inlay hints for composite literal field names:\n\n\t{in: \"Hello, world\", want: \"dlrow ,olleH\"}", }, { - Name: "composite_literal_types", + Name: "compositeLiteralTypes", Doc: "Enable/disable inlay hints for composite literal types:\n\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}", }, { - Name: "constant_values", + Name: "constantValues", Doc: "Enable/disable inlay hints for constant values:\n\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)", }, { - Name: "function_type_parameters", + Name: "functionTypeParameters", Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n\n\tmyFoo/*[int, string]*/(1, \"hello\")", }, { - Name: "parameter_names", + Name: "parameterNames", Doc: "Enable/disable inlay hints for parameter names:\n\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)", }, { - Name: "range_variable_types", + Name: "rangeVariableTypes", Doc: "Enable/disable inlay hints for variable types in range statements:\n\n\tfor k/* int*/, v/* string/* := range []string{} {\n\t\tfmt.Println(k, v)\n\t}", }, }, diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go index 99e1ad09d82..0c147283532 100644 --- a/internal/lsp/source/inlay_hint.go +++ b/internal/lsp/source/inlay_hint.go @@ -32,13 +32,13 @@ type Hint struct { } const ( - ParameterNames = "parameter_names" - AssignVariableTypes = "assign_variable_types" - ConstantValues = "constant_values" - RangeVariableTypes = "range_variable_types" - CompositeLiteralTypes = "composite_literal_types" - CompositeLiteralFieldNames = "composite_literal_fields" - FunctionTypeParameters = "function_type_parameters" + ParameterNames = "parameterNames" + AssignVariableTypes = "assignVariableTypes" + ConstantValues = "constantValues" + RangeVariableTypes = "rangeVariableTypes" + CompositeLiteralTypes = "compositeLiteralTypes" + CompositeLiteralFieldNames = "compositeLiteralFields" + FunctionTypeParameters = "functionTypeParameters" ) var AllInlayHints = map[string]*Hint{ From e1ec1f32302c374d496c1be1dc701b6c5a064a00 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Fri, 24 Jun 2022 16:31:42 -0400 Subject: [PATCH 046/136] internal/imports: use a module resolver if GOWORK is set Previously, gopls would fall back on a gopath resolver when running goimports from a directory containing go.work (but not go.mod). Fix this by update the code to recognize that GOWORK also puts goimports into module mode. All the work to _support_ go.work had already been done, but the tests were only passing because they were setting GO111MODULE=on explicitly (and therefore GOMOD=/dev/null was satisfying the pre-existing check). Also add a test for the regression in gopls. Fixes golang/go#52784 Change-Id: I31df6f71a949a5668e8dc001b3ee25ad26f2f927 Reviewed-on: https://go-review.googlesource.com/c/tools/+/413689 TryBot-Result: Gopher Robot Run-TryBot: Robert Findley gopls-CI: kokoro Reviewed-by: Michael Matloob --- gopls/internal/regtest/misc/imports_test.go | 46 ++++++++++++++++ internal/imports/fix.go | 4 +- internal/imports/mod.go | 14 ++++- internal/imports/mod_test.go | 61 +++++++++++---------- internal/lsp/cache/load.go | 2 +- 5 files changed, 93 insertions(+), 34 deletions(-) diff --git a/gopls/internal/regtest/misc/imports_test.go b/gopls/internal/regtest/misc/imports_test.go index 4ae2be6bf10..1250e78e776 100644 --- a/gopls/internal/regtest/misc/imports_test.go +++ b/gopls/internal/regtest/misc/imports_test.go @@ -214,3 +214,49 @@ func TestA(t *testing.T) { ) }) } + +// Test for golang/go#52784 +func TestGoWorkImports(t *testing.T) { + testenv.NeedsGo1Point(t, 18) + const pkg = ` +-- go.work -- +go 1.19 + +use ( + ./caller + ./mod +) +-- caller/go.mod -- +module caller.com + +go 1.18 + +require mod.com v0.0.0 + +replace mod.com => ../mod +-- caller/caller.go -- +package main + +func main() { + a.Test() +} +-- mod/go.mod -- +module mod.com + +go 1.18 +-- mod/a/a.go -- +package a + +func Test() { +} +` + Run(t, pkg, func(t *testing.T, env *Env) { + env.OpenFile("caller/caller.go") + env.Await(env.DiagnosticAtRegexp("caller/caller.go", "a.Test")) + + // Saving caller.go should trigger goimports, which should find a.Test in + // the mod.com module, thanks to the go.work file. + env.SaveBuffer("caller/caller.go") + env.Await(EmptyDiagnostics("caller/caller.go")) + }) +} diff --git a/internal/imports/fix.go b/internal/imports/fix.go index d859617b774..9e373d64ebc 100644 --- a/internal/imports/fix.go +++ b/internal/imports/fix.go @@ -796,7 +796,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return getCandidatePkgs(ctx, callback, filename, filePkg, env) } -var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB"} +var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"} // ProcessEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. @@ -906,7 +906,7 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) { if err := e.init(); err != nil { return nil, err } - if len(e.Env["GOMOD"]) == 0 { + if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { e.resolver = newGopathResolver(e) return e.resolver, nil } diff --git a/internal/imports/mod.go b/internal/imports/mod.go index 2bcf41f5fa7..46693f24339 100644 --- a/internal/imports/mod.go +++ b/internal/imports/mod.go @@ -70,9 +70,17 @@ func (r *ModuleResolver) init() error { Logf: r.env.Logf, WorkingDir: r.env.WorkingDir, } - vendorEnabled, mainModVendor, err := gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) - if err != nil { - return err + + vendorEnabled := false + var mainModVendor *gocommand.ModuleJSON + + // Module vendor directories are ignored in workspace mode: + // https://go.googlesource.com/proposal/+/master/design/45713-workspace.md + if len(r.env.Env["GOWORK"]) == 0 { + vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) + if err != nil { + return err + } } if mainModVendor != nil && vendorEnabled { diff --git a/internal/imports/mod_test.go b/internal/imports/mod_test.go index 5f71805fa77..8063dbe0f74 100644 --- a/internal/imports/mod_test.go +++ b/internal/imports/mod_test.go @@ -29,7 +29,7 @@ import ( // Tests that we can find packages in the stdlib. func TestScanStdlib(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x `, "") @@ -42,7 +42,7 @@ module x // where the module is in scope -- here we have to figure out the import path // without any help from go list. func TestScanOutOfScopeNestedModule(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -68,7 +68,7 @@ package x`, "") // Tests that we don't find a nested module contained in a local replace target. // The code for this case is too annoying to write, so it's just ignored. func TestScanNestedModuleInLocalReplace(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -107,7 +107,7 @@ package z // Tests that path encoding is handled correctly. Adapted from mod_case.txt. func TestModCase(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -124,7 +124,7 @@ import _ "rsc.io/QUOTE/QUOTE" // Not obviously relevant to goimports. Adapted from mod_domain_root.txt anyway. func TestModDomainRoot(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -140,7 +140,7 @@ import _ "example.com" // Tests that scanning the module cache > 1 time is able to find the same module. func TestModMultipleScans(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -159,7 +159,7 @@ import _ "example.com" // Tests that scanning the module cache > 1 time is able to find the same module // in the module cache. func TestModMultipleScansWithSubdirs(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -178,7 +178,7 @@ import _ "rsc.io/quote" // Tests that scanning the module cache > 1 after changing a package in module cache to make it unimportable // is able to find the same module. func TestModCacheEditModFile(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -219,7 +219,7 @@ import _ "rsc.io/quote" // Tests that -mod=vendor works. Adapted from mod_vendor_build.txt. func TestModVendorBuild(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module m go 1.12 @@ -250,7 +250,7 @@ import _ "rsc.io/sampler" // Tests that -mod=vendor is auto-enabled only for go1.14 and higher. // Vaguely inspired by mod_vendor_auto.txt. func TestModVendorAuto(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module m go 1.14 @@ -276,7 +276,7 @@ import _ "rsc.io/sampler" // Tests that a module replace works. Adapted from mod_list.txt. We start with // go.mod2; the first part of the test is irrelevant. func TestModList(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x require rsc.io/quote v1.5.1 @@ -293,7 +293,7 @@ import _ "rsc.io/quote" // Tests that a local replace works. Adapted from mod_local_replace.txt. func TestModLocalReplace(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- x/y/go.mod -- module x/y require zz v1.0.0 @@ -317,7 +317,7 @@ package z // Tests that the package at the root of the main module can be found. // Adapted from the first part of mod_multirepo.txt. func TestModMultirepo1(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module rsc.io/quote @@ -333,7 +333,7 @@ package quote // of mod_multirepo.txt (We skip the case where it doesn't have a go.mod // entry -- we just don't work in that case.) func TestModMultirepo3(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module rsc.io/quote @@ -352,7 +352,7 @@ import _ "rsc.io/quote/v2" // Tests that a nested module is found in the module cache, even though // it's checked out. Adapted from the fourth part of mod_multirepo.txt. func TestModMultirepo4(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module rsc.io/quote require rsc.io/quote/v2 v2.0.1 @@ -376,7 +376,7 @@ import _ "rsc.io/quote/v2" // Tests a simple module dependency. Adapted from the first part of mod_replace.txt. func TestModReplace1(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module quoter @@ -392,7 +392,7 @@ package main // Tests a local replace. Adapted from the second part of mod_replace.txt. func TestModReplace2(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module quoter @@ -418,7 +418,7 @@ import "rsc.io/sampler" // Tests that a module can be replaced by a different module path. Adapted // from the third part of mod_replace.txt. func TestModReplace3(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module quoter @@ -451,7 +451,7 @@ package quote // mod_replace_import.txt, with example.com/v changed to /vv because Go 1.11 // thinks /v is an invalid major version. func TestModReplaceImport(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module example.com/m @@ -556,7 +556,7 @@ package v func TestModWorkspace(t *testing.T) { testenv.NeedsGo1Point(t, 18) - mt := setup(t, ` + mt := setup(t, nil, ` -- go.work -- go 1.18 @@ -592,7 +592,7 @@ package b func TestModWorkspaceReplace(t *testing.T) { testenv.NeedsGo1Point(t, 18) - mt := setup(t, ` + mt := setup(t, nil, ` -- go.work -- use m @@ -651,7 +651,7 @@ func G() { func TestModWorkspaceReplaceOverride(t *testing.T) { testenv.NeedsGo1Point(t, 18) - mt := setup(t, `-- go.work -- + mt := setup(t, nil, `-- go.work -- use m use n replace example.com/dep => ./dep3 @@ -716,7 +716,7 @@ func G() { func TestModWorkspacePrune(t *testing.T) { testenv.NeedsGo1Point(t, 18) - mt := setup(t, ` + mt := setup(t, nil, ` -- go.work -- go 1.18 @@ -885,7 +885,7 @@ package z // Tests that we handle GO111MODULE=on with no go.mod file. See #30855. func TestNoMainModule(t *testing.T) { testenv.NeedsGo1Point(t, 12) - mt := setup(t, ` + mt := setup(t, map[string]string{"GO111MODULE": "on"}, ` -- x.go -- package x `, "") @@ -993,7 +993,9 @@ type modTest struct { // setup builds a test environment from a txtar and supporting modules // in testdata/mod, along the lines of TestScript in cmd/go. -func setup(t *testing.T, main, wd string) *modTest { +// +// extraEnv is applied on top of the default test env. +func setup(t *testing.T, extraEnv map[string]string, main, wd string) *modTest { t.Helper() testenv.NeedsGo1Point(t, 11) testenv.NeedsTool(t, "go") @@ -1023,13 +1025,16 @@ func setup(t *testing.T, main, wd string) *modTest { Env: map[string]string{ "GOPATH": filepath.Join(dir, "gopath"), "GOMODCACHE": "", - "GO111MODULE": "on", + "GO111MODULE": "auto", "GOSUMDB": "off", "GOPROXY": proxydir.ToURL(proxyDir), }, WorkingDir: filepath.Join(mainDir, wd), GocmdRunner: &gocommand.Runner{}, } + for k, v := range extraEnv { + env.Env[k] = v + } if *testDebug { env.Logf = log.Printf } @@ -1168,7 +1173,7 @@ func removeDir(dir string) { // Tests that findModFile can find the mod files from a path in the module cache. func TestFindModFileModCache(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module x @@ -1220,7 +1225,7 @@ func TestInvalidModCache(t *testing.T) { } func TestGetCandidatesRanking(t *testing.T) { - mt := setup(t, ` + mt := setup(t, nil, ` -- go.mod -- module example.com diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index 5ce49f00d43..3c6795370d1 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -253,7 +253,7 @@ func (m *moduleErrorMap) Error() string { var buf bytes.Buffer fmt.Fprintf(&buf, "%d modules have errors:\n", len(paths)) for _, path := range paths { - fmt.Fprintf(&buf, "\t%s", m.errs[path][0].Msg) + fmt.Fprintf(&buf, "\t%s:%s\n", path, m.errs[path][0].Msg) } return buf.String() From c36379be2b347665c06d85bea46f9d4b977ec3ce Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Fri, 24 Jun 2022 11:31:51 -0400 Subject: [PATCH 047/136] internal/lsp/cache: don't set new metadata if existing is valid If gopls believes it has valid metadata for a package, don't set new metadata. This is consistent with previous behavior that was changed in CL 340851. In principle this shouldn't matter, but in practice there are places where gopls doesn't yet want to invalidate packages, *even though* their metadata may have changed (such as while editing a go.mod file before saving). In the future we should eliminate these places, but for now we should let snapshot.clone control this invalidation. This also reduces the number of type-checked packages we invalidate on load. Change-Id: I0cc9bd4186245bec401332198de0047ff37e7ec7 Reviewed-on: https://go-review.googlesource.com/c/tools/+/413681 Run-TryBot: Robert Findley TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Alan Donovan --- internal/lsp/cache/graph.go | 1 - internal/lsp/cache/load.go | 38 +++++++++++++++++++++++++++------- internal/lsp/cache/snapshot.go | 5 +---- 3 files changed, 32 insertions(+), 12 deletions(-) diff --git a/internal/lsp/cache/graph.go b/internal/lsp/cache/graph.go index 36e658b3a86..3f247739fd7 100644 --- a/internal/lsp/cache/graph.go +++ b/internal/lsp/cache/graph.go @@ -18,7 +18,6 @@ import ( // TODO(rfindley): make this type immutable, so that it may be shared across // snapshots. type metadataGraph struct { - // metadata maps package IDs to their associated metadata. metadata map[PackageID]*KnownMetadata diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index 3c6795370d1..bdafdcd115b 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -444,24 +444,42 @@ func getWorkspaceDir(ctx context.Context, h *memoize.Handle, g *memoize.Generati // metadata exists for all dependencies. func computeMetadataUpdates(ctx context.Context, g *metadataGraph, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*KnownMetadata, path []PackageID) error { id := PackageID(pkg.ID) - if new := updates[id]; new != nil { - return nil - } if source.IsCommandLineArguments(pkg.ID) { suffix := ":" + strings.Join(query, ",") id = PackageID(string(id) + suffix) pkgPath = PackagePath(string(pkgPath) + suffix) } + + // If we have valid metadata for this package, don't update. This minimizes + // the amount of subsequent invalidation. + // + // TODO(rfindley): perform a sanity check that metadata matches here. If not, + // we have an invalidation bug elsewhere. + if existing := g.metadata[id]; existing != nil && existing.Valid { + return nil + } + if _, ok := updates[id]; ok { // If we've already seen this dependency, there may be an import cycle, or // we may have reached the same package transitively via distinct paths. // Check the path to confirm. + + // TODO(rfindley): this doesn't look right. Any single piece of new + // metadata could theoretically introduce import cycles in the metadata + // graph. What's the point of this limited check here (and is it even + // possible to get an import cycle in data from go/packages)? Consider + // simply returning, so that this function need not return an error. + // + // We should consider doing a more complete guard against import cycles + // elsewhere. for _, prev := range path { if prev == id { return fmt.Errorf("import cycle detected: %q", id) } } + return nil } + // Recreate the metadata rather than reusing it to avoid locking. m := &KnownMetadata{ Metadata: &Metadata{ @@ -504,6 +522,14 @@ func computeMetadataUpdates(ctx context.Context, g *metadataGraph, pkgPath Packa } for importPath, importPkg := range pkg.Imports { + // TODO(rfindley): in rare cases it is possible that the import package + // path is not the same as the package path of the import. That is to say + // (quoting adonovan): + // "The importPath string is the path by which one package is imported from + // another, but that needn't be the same as its internal name (sometimes + // called the "package path") used to prefix its linker symbols" + // + // We should not set this package path on the metadata of the dep. importPkgPath := PackagePath(importPath) importID := PackageID(importPkg.ID) @@ -517,10 +543,8 @@ func computeMetadataUpdates(ctx context.Context, g *metadataGraph, pkgPath Packa m.MissingDeps[importPkgPath] = struct{}{} continue } - if noValidMetadataForID(g, importID) { - if err := computeMetadataUpdates(ctx, g, importPkgPath, importPkg, cfg, query, updates, append(path, id)); err != nil { - event.Error(ctx, "error in dependency", err) - } + if err := computeMetadataUpdates(ctx, g, importPkgPath, importPkg, cfg, query, updates, append(path, id)); err != nil { + event.Error(ctx, "error in dependency", err) } } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index c8d60853332..05f892808fa 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -1304,11 +1304,8 @@ func (s *snapshot) noValidMetadataForURILocked(uri span.URI) bool { func (s *snapshot) noValidMetadataForID(id PackageID) bool { s.mu.Lock() defer s.mu.Unlock() - return noValidMetadataForID(s.meta, id) -} -func noValidMetadataForID(g *metadataGraph, id PackageID) bool { - m := g.metadata[id] + m := s.meta.metadata[id] return m == nil || !m.Valid } From 93a03c2c548821d80e09ec6159b80b6c8bee8887 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Fri, 24 Jun 2022 12:42:15 -0400 Subject: [PATCH 048/136] internal/lsp/cache: invalidate reverse closure when loading packages When setting new metadata for a package ID, we invalidate the corresponding type-checked packages. Whenever we invalidate a package, we must also invalidate its reverse transitive closure. Otherwise we may end up in a scenario where the go/types import graph does not match gopls' view of the import graph. Change-Id: I8db6ff3e4a722656e6dde7907e7c0470375c4847 Reviewed-on: https://go-review.googlesource.com/c/tools/+/413683 Run-TryBot: Robert Findley Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot gopls-CI: kokoro --- internal/lsp/cache/graph.go | 41 ++++++++++++++++++++++++++++++++++ internal/lsp/cache/load.go | 18 +++++++++++---- internal/lsp/cache/snapshot.go | 29 +++++++----------------- 3 files changed, 63 insertions(+), 25 deletions(-) diff --git a/internal/lsp/cache/graph.go b/internal/lsp/cache/graph.go index 3f247739fd7..dc7d4faef78 100644 --- a/internal/lsp/cache/graph.go +++ b/internal/lsp/cache/graph.go @@ -128,3 +128,44 @@ func (g *metadataGraph) build() { } } } + +// reverseTransitiveClosure calculates the set of packages that transitively +// reach an id in ids via their Deps. The result also includes given ids. +// +// If includeInvalid is false, the algorithm ignores packages with invalid +// metadata (including those in the given list of ids). +func (g *metadataGraph) reverseTransitiveClosure(includeInvalid bool, ids ...PackageID) map[PackageID]struct{} { + seen := make(map[PackageID]struct{}) + var visitAll func([]PackageID) + visitAll = func(ids []PackageID) { + for _, id := range ids { + if _, ok := seen[id]; ok { + continue + } + m := g.metadata[id] + // Only use invalid metadata if we support it. + if m == nil || !(m.Valid || includeInvalid) { + continue + } + seen[id] = struct{}{} + visitAll(g.importedBy[id]) + } + } + visitAll(ids) + return seen +} + +func collectReverseTransitiveClosure(g *metadataGraph, includeInvalid bool, ids []PackageID, seen map[PackageID]struct{}) { + for _, id := range ids { + if _, ok := seen[id]; ok { + continue + } + m := g.metadata[id] + // Only use invalid metadata if we support it. + if m == nil || !(m.Valid || includeInvalid) { + continue + } + seen[id] = struct{}{} + collectReverseTransitiveClosure(g, includeInvalid, g.importedBy[id], seen) + } +} diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index bdafdcd115b..db9a06d4dee 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -204,18 +204,28 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf } } + var loadedIDs []PackageID + for id := range updates { + loadedIDs = append(loadedIDs, id) + } + s.mu.Lock() + + // invalidate the reverse transitive closure of packages that have changed. + invalidatedPackages := s.meta.reverseTransitiveClosure(true, loadedIDs...) s.meta = s.meta.Clone(updates) + // Invalidate any packages we may have associated with this metadata. // - // TODO(rfindley): if we didn't already invalidate these in snapshot.clone, - // shouldn't we invalidate the reverse transitive closure? - for _, m := range updates { + // TODO(rfindley): this should not be necessary, as we should have already + // invalidated in snapshot.clone. + for id := range invalidatedPackages { for _, mode := range []source.ParseMode{source.ParseHeader, source.ParseExported, source.ParseFull} { - key := packageKey{mode, m.ID} + key := packageKey{mode, id} delete(s.packages, key) } } + s.workspacePackages = computeWorkspacePackagesLocked(s, s.meta) s.dumpWorkspace("load") s.mu.Unlock() diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 05f892808fa..8194750b331 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -70,6 +70,10 @@ type snapshot struct { builtin span.URI // meta holds loaded metadata. + // + // meta is guarded by mu, but the metadataGraph itself is immutable. + // TODO(rfindley): in many places we hold mu while operating on meta, even + // though we only need to hold mu while reading the pointer. meta *metadataGraph // files maps file URIs to their corresponding FileHandles. @@ -627,8 +631,10 @@ func (s *snapshot) GetReverseDependencies(ctx context.Context, id string) ([]sou if err := s.awaitLoaded(ctx); err != nil { return nil, err } - ids := make(map[PackageID]struct{}) - s.transitiveReverseDependencies(PackageID(id), ids) + s.mu.Lock() + meta := s.meta + s.mu.Unlock() + ids := meta.reverseTransitiveClosure(s.useInvalidMetadata(), PackageID(id)) // Make sure to delete the original package ID from the map. delete(ids, PackageID(id)) @@ -652,24 +658,6 @@ func (s *snapshot) checkedPackage(ctx context.Context, id PackageID, mode source return ph.check(ctx, s) } -// transitiveReverseDependencies populates the ids map with package IDs -// belonging to the provided package and its transitive reverse dependencies. -func (s *snapshot) transitiveReverseDependencies(id PackageID, ids map[PackageID]struct{}) { - if _, ok := ids[id]; ok { - return - } - m := s.getMetadata(id) - // Only use invalid metadata if we support it. - if m == nil || !(m.Valid || s.useInvalidMetadata()) { - return - } - ids[id] = struct{}{} - importedBy := s.getImportedBy(id) - for _, parentID := range importedBy { - s.transitiveReverseDependencies(parentID, ids) - } -} - func (s *snapshot) getGoFile(key parseKey) *parseGoHandle { s.mu.Lock() defer s.mu.Unlock() @@ -1879,7 +1867,6 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } // Invalidate reverse dependencies too. - // TODO(heschi): figure out the locking model and use transitiveReverseDeps? // idsToInvalidate keeps track of transitive reverse dependencies. // If an ID is present in the map, invalidate its types. // If an ID's value is true, invalidate its metadata too. From 10494c735e6ba5ec3a5eed252fc61a116a21a31d Mon Sep 17 00:00:00 2001 From: Ben Sarah Golightly Date: Fri, 24 Jun 2022 23:26:15 +0100 Subject: [PATCH 049/136] cmd/digraph: fix typo Change-Id: I086edda41c57b603afa660afb9396e17ba6c1a36 Reviewed-on: https://go-review.googlesource.com/c/tools/+/414074 Reviewed-by: Ian Lance Taylor Auto-Submit: Ian Lance Taylor TryBot-Result: Gopher Robot Reviewed-by: Robert Findley gopls-CI: kokoro Run-TryBot: Ian Lance Taylor --- cmd/digraph/digraph.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/digraph/digraph.go b/cmd/digraph/digraph.go index 62cb08d23a8..69d84ad5012 100644 --- a/cmd/digraph/digraph.go +++ b/cmd/digraph/digraph.go @@ -34,7 +34,7 @@ The support commands are: sccs all strongly connected components (one per line) scc - the set of nodes nodes strongly connected to the specified one + the set of nodes strongly connected to the specified one focus the subgraph containing all directed paths that pass through the specified node From 56116ec0159179782987cb761912b6fd3fa997ee Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Sun, 26 Jun 2022 15:17:46 -0400 Subject: [PATCH 050/136] internal/memoize: don't destroy reference counted handles Unlike generational handles, when reference counted handles are evicted from the Store we don't know that they are also no longer in use by active goroutines. Destroying them causes goroutine leaks. Also fix a data race because Handle.mu was not acquired in the release func returned by GetHandle. Change-Id: Ida7bb6961a035dd24ef8566c7e4faa6890296b5b Reviewed-on: https://go-review.googlesource.com/c/tools/+/414455 Run-TryBot: Robert Findley gopls-CI: kokoro Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot --- internal/lsp/cache/parse.go | 2 +- internal/memoize/memoize.go | 70 +++++++++++++++++++++++--------- internal/memoize/memoize_test.go | 64 +++++++++++++++++++++++------ 3 files changed, 104 insertions(+), 32 deletions(-) diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go index 376524bd324..f7b4f9c7031 100644 --- a/internal/lsp/cache/parse.go +++ b/internal/lsp/cache/parse.go @@ -61,7 +61,7 @@ func (s *snapshot) parseGoHandle(ctx context.Context, fh source.FileHandle, mode parseHandle, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} { snapshot := arg.(*snapshot) return parseGo(ctx, snapshot.FileSet(), fh, mode) - }, nil) + }) pgh := &parseGoHandle{ handle: parseHandle, diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index 48a642c990e..6477abbb810 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -83,18 +83,18 @@ func (g *Generation) Destroy(destroyedBy string) { g.store.mu.Lock() defer g.store.mu.Unlock() - for _, e := range g.store.handles { - if !e.trackGenerations { + for _, h := range g.store.handles { + if !h.trackGenerations { continue } - e.mu.Lock() - if _, ok := e.generations[g]; ok { - delete(e.generations, g) // delete even if it's dead, in case of dangling references to the entry. - if len(e.generations) == 0 { - e.destroy(g.store) + h.mu.Lock() + if _, ok := h.generations[g]; ok { + delete(h.generations, g) // delete even if it's dead, in case of dangling references to the entry. + if len(h.generations) == 0 { + h.destroy(g.store) } } - e.mu.Unlock() + h.mu.Unlock() } delete(g.store.generations, g) } @@ -120,6 +120,12 @@ type Function func(ctx context.Context, arg Arg) interface{} type state int +// TODO(rfindley): remove stateDestroyed; Handles should not need to know +// whether or not they have been destroyed. +// +// TODO(rfindley): also consider removing stateIdle. Why create a handle if you +// aren't certain you're going to need its result? And if you know you need its +// result, why wait to begin computing it? const ( stateIdle = iota stateRunning @@ -139,6 +145,12 @@ const ( // they decrement waiters. If it drops to zero, the inner context is cancelled, // computation is abandoned, and state resets to idle to start the process over // again. +// +// Handles may be tracked by generations, or directly reference counted, as +// determined by the trackGenerations field. See the field comments for more +// information about the differences between these two forms. +// +// TODO(rfindley): eliminate generational handles. type Handle struct { key interface{} mu sync.Mutex @@ -159,6 +171,11 @@ type Handle struct { value interface{} // cleanup, if non-nil, is used to perform any necessary clean-up on values // produced by function. + // + // cleanup is never set for reference counted handles. + // + // TODO(rfindley): remove this field once workspace folders no longer need to + // be tracked. cleanup func(interface{}) // If trackGenerations is set, this handle tracks generations in which it @@ -190,19 +207,27 @@ func (g *Generation) Bind(key interface{}, function Function, cleanup func(inter // // As in opposite to Bind it returns a release callback which has to be called // once this reference to handle is not needed anymore. -func (g *Generation) GetHandle(key interface{}, function Function, cleanup func(interface{})) (*Handle, func()) { - handle := g.getHandle(key, function, cleanup, false) +func (g *Generation) GetHandle(key interface{}, function Function) (*Handle, func()) { + h := g.getHandle(key, function, nil, false) store := g.store release := func() { + // Acquire store.mu before mutating refCounter store.mu.Lock() defer store.mu.Unlock() - handle.refCounter-- - if handle.refCounter == 0 { - handle.destroy(store) + h.mu.Lock() + defer h.mu.Unlock() + + h.refCounter-- + if h.refCounter == 0 { + // Don't call h.destroy: for reference counted handles we can't know when + // they are no longer reachable from runnable goroutines. For example, + // gopls could have a current operation that is using a packageHandle. + // Destroying the handle here would cause that operation to hang. + delete(store.handles, h.key) } } - return handle, release + return h, release } func (g *Generation) getHandle(key interface{}, function Function, cleanup func(interface{}), trackGenerations bool) *Handle { @@ -252,13 +277,13 @@ func (s *Store) DebugOnlyIterate(f func(k, v interface{})) { s.mu.Lock() defer s.mu.Unlock() - for k, e := range s.handles { + for k, h := range s.handles { var v interface{} - e.mu.Lock() - if e.state == stateCompleted { - v = e.value + h.mu.Lock() + if h.state == stateCompleted { + v = h.value } - e.mu.Unlock() + h.mu.Unlock() if v == nil { continue } @@ -278,6 +303,7 @@ func (g *Generation) Inherit(h *Handle) { h.incrementRef(g) } +// destroy marks h as destroyed. h.mu and store.mu must be held. func (h *Handle) destroy(store *Store) { h.state = stateDestroyed if h.cleanup != nil && h.value != nil { @@ -409,6 +435,12 @@ func (h *Handle) run(ctx context.Context, g *Generation, arg Arg) (interface{}, } return } + + if h.cleanup != nil && h.value != nil { + // Clean up before overwriting an existing value. + h.cleanup(h.value) + } + // At this point v will be cleaned up whenever h is destroyed. h.value = v h.function = nil diff --git a/internal/memoize/memoize_test.go b/internal/memoize/memoize_test.go index bffbfc2f6b3..ae387b8d049 100644 --- a/internal/memoize/memoize_test.go +++ b/internal/memoize/memoize_test.go @@ -7,7 +7,9 @@ package memoize_test import ( "context" "strings" + "sync" "testing" + "time" "golang.org/x/tools/internal/memoize" ) @@ -112,15 +114,12 @@ func TestHandleRefCounting(t *testing.T) { g1 := s.Generation("g1") v1 := false v2 := false - cleanup := func(v interface{}) { - *(v.(*bool)) = true - } h1, release1 := g1.GetHandle("key1", func(context.Context, memoize.Arg) interface{} { return &v1 - }, nil) + }) h2, release2 := g1.GetHandle("key2", func(context.Context, memoize.Arg) interface{} { return &v2 - }, cleanup) + }) expectGet(t, h1, g1, &v1) expectGet(t, h2, g1, &v2) @@ -131,7 +130,7 @@ func TestHandleRefCounting(t *testing.T) { h2Copy, release2Copy := g2.GetHandle("key2", func(context.Context, memoize.Arg) interface{} { return &v1 - }, nil) + }) if h2 != h2Copy { t.Error("NewHandle returned a new value while old is not destroyed yet") } @@ -140,24 +139,65 @@ func TestHandleRefCounting(t *testing.T) { release2() if got, want := v2, false; got != want { - t.Error("after destroying first v2 ref, v2 is cleaned up") + t.Errorf("after destroying first v2 ref, got %v, want %v", got, want) } release2Copy() - if got, want := v2, true; got != want { - t.Error("after destroying second v2 ref, v2 is not cleaned up") - } if got, want := v1, false; got != want { - t.Error("after destroying v2, v1 is cleaned up") + t.Errorf("after destroying v2, got %v, want %v", got, want) } release1() g3 := s.Generation("g3") h2Copy, release2Copy = g3.GetHandle("key2", func(context.Context, memoize.Arg) interface{} { return &v2 - }, cleanup) + }) if h2 == h2Copy { t.Error("NewHandle returned previously destroyed value") } release2Copy() g3.Destroy("by test") } + +func TestHandleDestroyedWhileRunning(t *testing.T) { + // Test that calls to Handle.Get return even if the handle is destroyed while + // running. + + s := &memoize.Store{} + g := s.Generation("g") + c := make(chan int) + + var v int + h, release := g.GetHandle("key", func(ctx context.Context, _ memoize.Arg) interface{} { + <-c + <-c + if err := ctx.Err(); err != nil { + t.Errorf("ctx.Err() = %v, want nil", err) + } + return &v + }) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) // arbitrary timeout; may be removed if it causes flakes + defer cancel() + + var wg sync.WaitGroup + wg.Add(1) + var got interface{} + var err error + go func() { + got, err = h.Get(ctx, g, nil) + wg.Done() + }() + + c <- 0 // send once to enter the handle function + release() // release before the handle function returns + c <- 0 // let the handle function proceed + + wg.Wait() + + if err != nil { + t.Errorf("Get() failed: %v", err) + } + if got != &v { + t.Errorf("Get() = %v, want %v", got, v) + } +} From 66bbba3d58b08b34aaf65516ae70d820b70b7b3e Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 27 Jun 2022 12:43:16 -0400 Subject: [PATCH 051/136] internal/memoize: remove unused Store.generations map This change removes an unused map, renames Store.mu, and add minor commentary. Change-Id: I2f064ff0daf87e0f73930bc980760a453d18e70a Reviewed-on: https://go-review.googlesource.com/c/tools/+/414494 TryBot-Result: Gopher Robot Run-TryBot: Alan Donovan gopls-CI: kokoro Reviewed-by: Robert Findley --- internal/memoize/memoize.go | 52 ++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 30 deletions(-) diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index 6477abbb810..a758deeb7f8 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -31,27 +31,15 @@ var ( // Store binds keys to functions, returning handles that can be used to access // the functions results. type Store struct { - mu sync.Mutex - // handles is the set of values stored. - handles map[interface{}]*Handle - - // generations is the set of generations live in this store. - generations map[*Generation]struct{} + handlesMu sync.Mutex // lock ordering: Store.handlesMu before Handle.mu + handles map[interface{}]*Handle } // Generation creates a new Generation associated with s. Destroy must be // called on the returned Generation once it is no longer in use. name is // for debugging purposes only. func (s *Store) Generation(name string) *Generation { - s.mu.Lock() - defer s.mu.Unlock() - if s.handles == nil { - s.handles = map[interface{}]*Handle{} - s.generations = map[*Generation]struct{}{} - } - g := &Generation{store: s, name: name} - s.generations[g] = struct{}{} - return g + return &Generation{store: s, name: name} } // A Generation is a logical point in time of the cache life-cycle. Cache @@ -81,8 +69,8 @@ func (g *Generation) Destroy(destroyedBy string) { panic("Destroy on generation " + g.name + " already destroyed by " + prevDestroyedBy) } - g.store.mu.Lock() - defer g.store.mu.Unlock() + g.store.handlesMu.Lock() + defer g.store.handlesMu.Unlock() for _, h := range g.store.handles { if !h.trackGenerations { continue @@ -96,7 +84,6 @@ func (g *Generation) Destroy(destroyedBy string) { } h.mu.Unlock() } - delete(g.store.generations, g) } // Acquire creates a new reference to g, and returns a func to release that @@ -153,7 +140,7 @@ const ( // TODO(rfindley): eliminate generational handles. type Handle struct { key interface{} - mu sync.Mutex + mu sync.Mutex // lock ordering: Store.handlesMu before Handle.mu // generations is the set of generations in which this handle is valid. generations map[*Generation]struct{} @@ -211,9 +198,9 @@ func (g *Generation) GetHandle(key interface{}, function Function) (*Handle, fun h := g.getHandle(key, function, nil, false) store := g.store release := func() { - // Acquire store.mu before mutating refCounter - store.mu.Lock() - defer store.mu.Unlock() + // Acquire store.handlesMu before mutating refCounter + store.handlesMu.Lock() + defer store.handlesMu.Unlock() h.mu.Lock() defer h.mu.Unlock() @@ -239,8 +226,8 @@ func (g *Generation) getHandle(key interface{}, function Function, cleanup func( if atomic.LoadUint32(&g.destroyed) != 0 { panic("operation on generation " + g.name + " destroyed by " + g.destroyedBy) } - g.store.mu.Lock() - defer g.store.mu.Unlock() + g.store.handlesMu.Lock() + defer g.store.handlesMu.Unlock() h, ok := g.store.handles[key] if !ok { h = &Handle{ @@ -252,6 +239,10 @@ func (g *Generation) getHandle(key interface{}, function Function, cleanup func( if trackGenerations { h.generations = make(map[*Generation]struct{}, 1) } + + if g.store.handles == nil { + g.store.handles = map[interface{}]*Handle{} + } g.store.handles[key] = h } @@ -261,10 +252,11 @@ func (g *Generation) getHandle(key interface{}, function Function, cleanup func( // Stats returns the number of each type of value in the store. func (s *Store) Stats() map[reflect.Type]int { - s.mu.Lock() - defer s.mu.Unlock() - result := map[reflect.Type]int{} + + s.handlesMu.Lock() + defer s.handlesMu.Unlock() + for k := range s.handles { result[reflect.TypeOf(k)]++ } @@ -274,8 +266,8 @@ func (s *Store) Stats() map[reflect.Type]int { // DebugOnlyIterate iterates through all live cache entries and calls f on them. // It should only be used for debugging purposes. func (s *Store) DebugOnlyIterate(f func(k, v interface{})) { - s.mu.Lock() - defer s.mu.Unlock() + s.handlesMu.Lock() + defer s.handlesMu.Unlock() for k, h := range s.handles { var v interface{} @@ -303,7 +295,7 @@ func (g *Generation) Inherit(h *Handle) { h.incrementRef(g) } -// destroy marks h as destroyed. h.mu and store.mu must be held. +// destroy marks h as destroyed. h.mu and store.handlesMu must be held. func (h *Handle) destroy(store *Store) { h.state = stateDestroyed if h.cleanup != nil && h.value != nil { From ec0831a43429a1ab34768947d9c4874fa9906e60 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 24 Jun 2022 18:10:35 -0400 Subject: [PATCH 052/136] refactor/satisfy: don't crash on type parameters This change causes the satisfy constraint pass to correctly handle type parameters. In nearly all cases this means calling coreType(T) instead of T.Underlying(). This, and the addition of cases for C[T] and C[X, Y], should make the code robust to generic syntax. However, it is still not clear what the semantics of constraints are for the renaming tool. That work is left to a follow-up. Also, add a test suite that exercises all the basic operators, using generics in each case. Fixes golang/go#52940 Change-Id: Ic1261eb551c99b582c35fadaa148b979532588df Reviewed-on: https://go-review.googlesource.com/c/tools/+/413690 Reviewed-by: Robert Findley --- refactor/satisfy/find.go | 78 ++++++++---- refactor/satisfy/find_test.go | 226 ++++++++++++++++++++++++++++++++++ 2 files changed, 277 insertions(+), 27 deletions(-) create mode 100644 refactor/satisfy/find_test.go diff --git a/refactor/satisfy/find.go b/refactor/satisfy/find.go index ff4212b7645..91fb7de0279 100644 --- a/refactor/satisfy/find.go +++ b/refactor/satisfy/find.go @@ -10,10 +10,8 @@ // // THIS PACKAGE IS EXPERIMENTAL AND MAY CHANGE AT ANY TIME. // -// It is provided only for the gorename tool. Ideally this -// functionality will become part of the type-checker in due course, -// since it is computing it anyway, and it is robust for ill-typed -// inputs, which this package is not. +// It is provided only for the gopls tool. It requires well-typed inputs. +// package satisfy // import "golang.org/x/tools/refactor/satisfy" // NOTES: @@ -25,9 +23,6 @@ package satisfy // import "golang.org/x/tools/refactor/satisfy" // ... // }}) // -// TODO(adonovan): make this robust against ill-typed input. -// Or move it into the type-checker. -// // Assignability conversions are possible in the following places: // - in assignments y = x, y := x, var y = x. // - from call argument types to formal parameter types @@ -51,11 +46,15 @@ import ( "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/typeparams" ) // A Constraint records the fact that the RHS type does and must // satisfy the LHS type, which is an interface. // The names are suggestive of an assignment statement LHS = RHS. +// +// The constraint is implicitly universally quantified over any type +// parameters appearing within the two types. type Constraint struct { LHS, RHS types.Type } @@ -129,13 +128,13 @@ func (f *Finder) exprN(e ast.Expr) types.Type { case *ast.CallExpr: // x, err := f(args) - sig := f.expr(e.Fun).Underlying().(*types.Signature) + sig := coreType(f.expr(e.Fun)).(*types.Signature) f.call(sig, e.Args) case *ast.IndexExpr: // y, ok := x[i] x := f.expr(e.X) - f.assign(f.expr(e.Index), x.Underlying().(*types.Map).Key()) + f.assign(f.expr(e.Index), coreType(x).(*types.Map).Key()) case *ast.TypeAssertExpr: // y, ok := x.(T) @@ -215,7 +214,7 @@ func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Ex f.expr(args[1]) } else { // append(x, y, z) - tElem := s.Underlying().(*types.Slice).Elem() + tElem := coreType(s).(*types.Slice).Elem() for _, arg := range args[1:] { f.assign(tElem, f.expr(arg)) } @@ -224,7 +223,7 @@ func (f *Finder) builtin(obj *types.Builtin, sig *types.Signature, args []ast.Ex case "delete": m := f.expr(args[0]) k := f.expr(args[1]) - f.assign(m.Underlying().(*types.Map).Key(), k) + f.assign(coreType(m).(*types.Map).Key(), k) default: // ordinary call @@ -358,6 +357,7 @@ func (f *Finder) expr(e ast.Expr) types.Type { f.sig = saved case *ast.CompositeLit: + // No need for coreType here: go1.18 disallows P{...} for type param P. switch T := deref(tv.Type).Underlying().(type) { case *types.Struct: for i, elem := range e.Elts { @@ -403,12 +403,20 @@ func (f *Finder) expr(e ast.Expr) types.Type { } case *ast.IndexExpr: - x := f.expr(e.X) - i := f.expr(e.Index) - if ux, ok := x.Underlying().(*types.Map); ok { - f.assign(ux.Key(), i) + if instance(f.info, e.X) { + // f[T] or C[T] -- generic instantiation + } else { + // x[i] or m[k] -- index or lookup operation + x := f.expr(e.X) + i := f.expr(e.Index) + if ux, ok := coreType(x).(*types.Map); ok { + f.assign(ux.Key(), i) + } } + case *typeparams.IndexListExpr: + // f[X, Y] -- generic instantiation + case *ast.SliceExpr: f.expr(e.X) if e.Low != nil { @@ -439,7 +447,7 @@ func (f *Finder) expr(e ast.Expr) types.Type { } } // ordinary call - f.call(f.expr(e.Fun).Underlying().(*types.Signature), e.Args) + f.call(coreType(f.expr(e.Fun)).(*types.Signature), e.Args) } case *ast.StarExpr: @@ -499,7 +507,7 @@ func (f *Finder) stmt(s ast.Stmt) { case *ast.SendStmt: ch := f.expr(s.Chan) val := f.expr(s.Value) - f.assign(ch.Underlying().(*types.Chan).Elem(), val) + f.assign(coreType(ch).(*types.Chan).Elem(), val) case *ast.IncDecStmt: f.expr(s.X) @@ -647,35 +655,35 @@ func (f *Finder) stmt(s ast.Stmt) { if s.Key != nil { k := f.expr(s.Key) var xelem types.Type - // keys of array, *array, slice, string aren't interesting - switch ux := x.Underlying().(type) { + // Keys of array, *array, slice, string aren't interesting + // since the RHS key type is just an int. + switch ux := coreType(x).(type) { case *types.Chan: xelem = ux.Elem() case *types.Map: xelem = ux.Key() } if xelem != nil { - f.assign(xelem, k) + f.assign(k, xelem) } } if s.Value != nil { val := f.expr(s.Value) var xelem types.Type - // values of strings aren't interesting - switch ux := x.Underlying().(type) { + // Values of type strings aren't interesting because + // the RHS value type is just a rune. + switch ux := coreType(x).(type) { case *types.Array: xelem = ux.Elem() - case *types.Chan: - xelem = ux.Elem() case *types.Map: xelem = ux.Elem() case *types.Pointer: // *array - xelem = deref(ux).(*types.Array).Elem() + xelem = coreType(deref(ux)).(*types.Array).Elem() case *types.Slice: xelem = ux.Elem() } if xelem != nil { - f.assign(xelem, val) + f.assign(val, xelem) } } } @@ -690,7 +698,7 @@ func (f *Finder) stmt(s ast.Stmt) { // deref returns a pointer's element type; otherwise it returns typ. func deref(typ types.Type) types.Type { - if p, ok := typ.Underlying().(*types.Pointer); ok { + if p, ok := coreType(typ).(*types.Pointer); ok { return p.Elem() } return typ @@ -699,3 +707,19 @@ func deref(typ types.Type) types.Type { func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } func isInterface(T types.Type) bool { return types.IsInterface(T) } + +func coreType(T types.Type) types.Type { return typeparams.CoreType(T) } + +func instance(info *types.Info, expr ast.Expr) bool { + var id *ast.Ident + switch x := expr.(type) { + case *ast.Ident: + id = x + case *ast.SelectorExpr: + id = x.Sel + default: + return false + } + _, ok := typeparams.GetInstances(info)[id] + return ok +} diff --git a/refactor/satisfy/find_test.go b/refactor/satisfy/find_test.go new file mode 100644 index 00000000000..234bce905d3 --- /dev/null +++ b/refactor/satisfy/find_test.go @@ -0,0 +1,226 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package satisfy_test + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "go/types" + "reflect" + "sort" + "testing" + + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/refactor/satisfy" +) + +// This test exercises various operations on core types of type parameters. +// (It also provides pretty decent coverage of the non-generic operations.) +func TestGenericCoreOperations(t *testing.T) { + if !typeparams.Enabled { + t.Skip("!typeparams.Enabled") + } + + const src = `package foo + +type I interface { f() } + +type impl struct{} +func (impl) f() {} + +// A big pile of single-serving types that implement I. +type A struct{impl} +type B struct{impl} +type C struct{impl} +type D struct{impl} +type E struct{impl} +type F struct{impl} +type G struct{impl} +type H struct{impl} +type J struct{impl} +type K struct{impl} +type L struct{impl} +type M struct{impl} +type N struct{impl} +type O struct{impl} +type P struct{impl} +type Q struct{impl} +type R struct{impl} +type S struct{impl} +type T struct{impl} +type U struct{impl} + +type Generic[T any] struct{impl} +func (Generic[T]) g(T) {} + +type GI[T any] interface{ + g(T) +} + +func _[Slice interface{ []I }](s Slice) Slice { + s[0] = L{} // I <- L + return append(s, A{}) // I <- A +} + +func _[Func interface{ func(I) B }](fn Func) { + b := fn(C{}) // I <- C + var _ I = b // I <- B +} + +func _[Chan interface{ chan D }](ch Chan) { + var i I + for i = range ch {} // I <- D + _ = i +} + +func _[Chan interface{ chan E }](ch Chan) { + var _ I = <-ch // I <- E +} + +func _[Chan interface{ chan I }](ch Chan) { + ch <- F{} // I <- F +} + +func _[Map interface{ map[G]H }](m Map) { + var k, v I + for k, v = range m {} // I <- G, I <- H + _, _ = k, v +} + +func _[Map interface{ map[I]K }](m Map) { + var _ I = m[J{}] // I <- J, I <- K + delete(m, R{}) // I <- R + _, _ = m[J{}] +} + +func _[Array interface{ [1]I }](a Array) { + a[0] = M{} // I <- M +} + +func _[Array interface{ [1]N }](a Array) { + var _ I = a[0] // I <- N +} + +func _[Array interface{ [1]O }](a Array) { + var v I + for _, v = range a {} // I <- O + _ = v +} + +func _[ArrayPtr interface{ *[1]P }](a ArrayPtr) { + var v I + for _, v = range a {} // I <- P + _ = v +} + +func _[Slice interface{ []Q }](s Slice) { + var v I + for _, v = range s {} // I <- Q + _ = v +} + +func _[Func interface{ func() (S, bool) }](fn Func) { + var i I + i, _ = fn() // I <- S + _ = i +} + +func _() I { + var _ I = T{} // I <- T + var _ I = Generic[T]{} // I <- Generic[T] + var _ I = Generic[string]{} // I <- Generic[string] + return U{} // I <- U +} + +var _ GI[string] = Generic[string]{} // GI[string] <- Generic[string] + +// universally quantified constraints: +// the type parameter may appear on the left, the right, or both sides. + +func _[T any](g Generic[T]) GI[T] { + return g // GI[T] <- Generic[T] +} + +func _[T any]() { + type GI2[T any] interface{ g(string) } + var _ GI2[T] = Generic[string]{} // GI2[T] <- Generic[string] +} + +type Gen2[T any] struct{} +func (f Gen2[T]) g(string) { global = f } // GI[string] <- Gen2[T] + +var global GI[string] + +` + got := constraints(t, src) + want := []string{ + "p.GI2[T] <- p.Generic[string]", // implicitly "forall T" quantified + "p.GI[T] <- p.Generic[T]", // implicitly "forall T" quantified + "p.GI[string] <- p.Gen2[T]", // implicitly "forall T" quantified + "p.GI[string] <- p.Generic[string]", + "p.I <- p.A", + "p.I <- p.B", + "p.I <- p.C", + "p.I <- p.D", + "p.I <- p.E", + "p.I <- p.F", + "p.I <- p.G", + "p.I <- p.Generic[p.T]", + "p.I <- p.Generic[string]", + "p.I <- p.H", + "p.I <- p.J", + "p.I <- p.K", + "p.I <- p.L", + "p.I <- p.M", + "p.I <- p.N", + "p.I <- p.O", + "p.I <- p.P", + "p.I <- p.Q", + "p.I <- p.R", + "p.I <- p.S", + "p.I <- p.T", + "p.I <- p.U", + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("found unexpected constraints: got %s, want %s", got, want) + } +} + +func constraints(t *testing.T, src string) []string { + // parse + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "p.go", src, 0) + if err != nil { + t.Fatal(err) // parse error + } + files := []*ast.File{f} + + // type-check + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + typeparams.InitInstanceInfo(info) + conf := types.Config{} + if _, err := conf.Check("p", fset, files, info); err != nil { + t.Fatal(err) // type error + } + + // gather constraints + var finder satisfy.Finder + finder.Find(info, files) + var constraints []string + for c := range finder.Result { + constraints = append(constraints, fmt.Sprintf("%v <- %v", c.LHS, c.RHS)) + } + sort.Strings(constraints) + return constraints +} From 7404bd2ffdbcd390fb7b678d9a82da10fcbf48f9 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Mon, 27 Jun 2022 15:23:55 -0400 Subject: [PATCH 053/136] all: gofmt some recent file changes Change-Id: I62d2d35275964b35032e36d6ed3c9f4a31176f91 Reviewed-on: https://go-review.googlesource.com/c/tools/+/414495 Run-TryBot: Robert Findley gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan --- internal/lsp/source/workspace_symbol.go | 1 - refactor/satisfy/find.go | 1 - 2 files changed, 2 deletions(-) diff --git a/internal/lsp/source/workspace_symbol.go b/internal/lsp/source/workspace_symbol.go index c7cfe5c9ef8..0822de0810d 100644 --- a/internal/lsp/source/workspace_symbol.go +++ b/internal/lsp/source/workspace_symbol.go @@ -287,7 +287,6 @@ func (c comboMatcher) match(chunks []string) (int, float64) { // of zero indicates no match. // - A symbolizer determines how we extract the symbol for an object. This // enables the 'symbolStyle' configuration option. -// func collectSymbols(ctx context.Context, views []View, matcherType SymbolMatcher, symbolizer symbolizer, query string) ([]protocol.SymbolInformation, error) { // Extract symbols from all files. diff --git a/refactor/satisfy/find.go b/refactor/satisfy/find.go index 91fb7de0279..aacb56bce8b 100644 --- a/refactor/satisfy/find.go +++ b/refactor/satisfy/find.go @@ -11,7 +11,6 @@ // THIS PACKAGE IS EXPERIMENTAL AND MAY CHANGE AT ANY TIME. // // It is provided only for the gopls tool. It requires well-typed inputs. -// package satisfy // import "golang.org/x/tools/refactor/satisfy" // NOTES: From 2a900561e78a69afe5828ff8388aeb0dfc6220dc Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 28 Jun 2022 09:12:07 -0400 Subject: [PATCH 054/136] go/gcexportdata: fix Find for Go modules Find needs to invoke the go command to find the package export data. It cannot rely on GOPATH-based file location heuristics. This has the nice side effect of automatically compiling the code, removing the possibility of stale export data. Ideally Find would use go/packages, but go/packages imports this package for the export data parser (not for Find itself), so we have to make do with an explicit go command invocation. Marked both Find and NewImporter deprecated: using go/packages will be faster for nearly all use cases, because it can gather info about multiple packages in a single go command invocation. They are essentially unused anyway. Removed the file name print from the example because the file may be in the cache, in which case it will not be named "fmt.a". Change-Id: I7940c90e230b22df9dcbfc8103a69a2d18df3bb0 Reviewed-on: https://go-review.googlesource.com/c/tools/+/310515 Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot gopls-CI: kokoro Run-TryBot: Russ Cox Auto-Submit: Russ Cox --- go/gcexportdata/example_test.go | 2 -- go/gcexportdata/gcexportdata.go | 20 ++++++++++++++++++-- go/gcexportdata/importer.go | 3 +++ 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/go/gcexportdata/example_test.go b/go/gcexportdata/example_test.go index e81e705b1c4..cdd68f49c53 100644 --- a/go/gcexportdata/example_test.go +++ b/go/gcexportdata/example_test.go @@ -30,7 +30,6 @@ func ExampleRead() { log.Fatalf("can't find export data for fmt") } fmt.Printf("Package path: %s\n", path) - fmt.Printf("Export data: %s\n", filepath.Base(filename)) // Open and read the file. f, err := os.Open(filename) @@ -80,7 +79,6 @@ func ExampleRead() { // Output: // // Package path: fmt - // Export data: fmt.a // Package members: Println found // Println type: func(a ...any) (n int, err error) // Println location: $GOROOT/src/fmt/print.go:123:1 diff --git a/go/gcexportdata/gcexportdata.go b/go/gcexportdata/gcexportdata.go index d50826dbf7e..ddc276cfbcb 100644 --- a/go/gcexportdata/gcexportdata.go +++ b/go/gcexportdata/gcexportdata.go @@ -22,26 +22,42 @@ package gcexportdata // import "golang.org/x/tools/go/gcexportdata" import ( "bufio" "bytes" + "encoding/json" "fmt" "go/token" "go/types" "io" "io/ioutil" + "os/exec" "golang.org/x/tools/go/internal/gcimporter" ) // Find returns the name of an object (.o) or archive (.a) file // containing type information for the specified import path, -// using the workspace layout conventions of go/build. +// using the go command. // If no file was found, an empty filename is returned. // // A relative srcDir is interpreted relative to the current working directory. // // Find also returns the package's resolved (canonical) import path, // reflecting the effects of srcDir and vendoring on importPath. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. func Find(importPath, srcDir string) (filename, path string) { - return gcimporter.FindPkg(importPath, srcDir) + cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) + cmd.Dir = srcDir + out, err := cmd.CombinedOutput() + if err != nil { + return "", "" + } + var data struct { + ImportPath string + Export string + } + json.Unmarshal(out, &data) + return data.Export, data.ImportPath } // NewReader returns a reader for the export data section of an object diff --git a/go/gcexportdata/importer.go b/go/gcexportdata/importer.go index fe6ed93215c..37a7247e268 100644 --- a/go/gcexportdata/importer.go +++ b/go/gcexportdata/importer.go @@ -22,6 +22,9 @@ import ( // version-skew problems described in the documentation of this package, // or to control the FileSet or access the imports map populated during // package loading. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { return importer{fset, imports} } From e5b33249972ac93113ee6a34ee08f8096d06271c Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Tue, 14 Jun 2022 15:18:07 -0400 Subject: [PATCH 055/136] internal/lsp: add InlayHint regtests Add regtests for inlay hints to verify we can turn on and off different inlay hints. Change-Id: Id88450c40c048b6c2544d22a0d3eadb57b70a723 Reviewed-on: https://go-review.googlesource.com/c/tools/+/411911 Reviewed-by: Robert Findley Reviewed-by: Jamal Carvalho gopls-CI: kokoro TryBot-Result: Gopher Robot Run-TryBot: Suzy Mueller --- .../regtest/inlayHints/inlayHints_test.go | 72 +++++++++++++++++++ internal/lsp/fake/editor.go | 21 ++++++ internal/lsp/regtest/wrappers.go | 11 +++ 3 files changed, 104 insertions(+) create mode 100644 gopls/internal/regtest/inlayHints/inlayHints_test.go diff --git a/gopls/internal/regtest/inlayHints/inlayHints_test.go b/gopls/internal/regtest/inlayHints/inlayHints_test.go new file mode 100644 index 00000000000..67931fbdc83 --- /dev/null +++ b/gopls/internal/regtest/inlayHints/inlayHints_test.go @@ -0,0 +1,72 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package inlayHint + +import ( + "testing" + + "golang.org/x/tools/gopls/internal/hooks" + "golang.org/x/tools/internal/lsp/bug" + . "golang.org/x/tools/internal/lsp/regtest" + "golang.org/x/tools/internal/lsp/source" + "golang.org/x/tools/internal/testenv" +) + +func TestMain(m *testing.M) { + bug.PanicOnBugs = true + Main(m, hooks.Options) +} +func TestEnablingInlayHints(t *testing.T) { + testenv.NeedsGo1Point(t, 14) // Test fails on 1.13. + const workspace = ` +-- go.mod -- +module inlayHint.test +go 1.12 +-- lib.go -- +package lib +type Number int +const ( + Zero Number = iota + One + Two +) +` + tests := []struct { + label string + enabled map[string]bool + wantInlayHint bool + }{ + { + label: "default", + wantInlayHint: false, + }, + { + label: "enable const", + enabled: map[string]bool{source.ConstantValues: true}, + wantInlayHint: true, + }, + { + label: "enable parameter names", + enabled: map[string]bool{source.ParameterNames: true}, + wantInlayHint: false, + }, + } + for _, test := range tests { + t.Run(test.label, func(t *testing.T) { + WithOptions( + EditorConfig{ + Settings: map[string]interface{}{ + "hints": test.enabled, + }, + }, + ).Run(t, workspace, func(t *testing.T, env *Env) { + env.OpenFile("lib.go") + lens := env.InlayHints("lib.go") + if gotInlayHint := len(lens) > 0; gotInlayHint != test.wantInlayHint { + t.Errorf("got inlayHint: %t, want %t", gotInlayHint, test.wantInlayHint) + } + }) + }) + } +} diff --git a/internal/lsp/fake/editor.go b/internal/lsp/fake/editor.go index 06b90bb84e5..0fc99a04982 100644 --- a/internal/lsp/fake/editor.go +++ b/internal/lsp/fake/editor.go @@ -1114,6 +1114,27 @@ func (e *Editor) Symbols(ctx context.Context, sym string) ([]protocol.SymbolInfo return ans, err } +// CodeLens executes a codelens request on the server. +func (e *Editor) InlayHint(ctx context.Context, path string) ([]protocol.InlayHint, error) { + if e.Server == nil { + return nil, nil + } + e.mu.Lock() + _, ok := e.buffers[path] + e.mu.Unlock() + if !ok { + return nil, fmt.Errorf("buffer %q is not open", path) + } + params := &protocol.InlayHintParams{ + TextDocument: e.textDocumentIdentifier(path), + } + hints, err := e.Server.InlayHint(ctx, params) + if err != nil { + return nil, err + } + return hints, nil +} + // References executes a reference request on the server. func (e *Editor) References(ctx context.Context, path string, pos Pos) ([]protocol.Location, error) { if e.Server == nil { diff --git a/internal/lsp/regtest/wrappers.go b/internal/lsp/regtest/wrappers.go index 9031e71f1f1..96e2de96271 100644 --- a/internal/lsp/regtest/wrappers.go +++ b/internal/lsp/regtest/wrappers.go @@ -358,6 +358,17 @@ func (e *Env) ExecuteCommand(params *protocol.ExecuteCommandParams, result inter } } +// InlayHints calls textDocument/inlayHints for the given path, calling t.Fatal on +// any error. +func (e *Env) InlayHints(path string) []protocol.InlayHint { + e.T.Helper() + hints, err := e.Editor.InlayHint(e.Ctx, path) + if err != nil { + e.T.Fatal(err) + } + return hints +} + // WorkspaceSymbol calls workspace/symbol func (e *Env) WorkspaceSymbol(sym string) []protocol.SymbolInformation { e.T.Helper() From 0248714391a4231dea84e35fc04f8b65a609821e Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Tue, 28 Jun 2022 14:50:14 -0400 Subject: [PATCH 056/136] internal/lsp: add additional instrumentation around package loading Add some additional logging to help debug golang/go#53586 For golang/go#53586 Change-Id: I0574fb01be47b265cd6e412855794bc2cb836dff Reviewed-on: https://go-review.googlesource.com/c/tools/+/414854 gopls-CI: kokoro Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot Run-TryBot: Robert Findley Auto-Submit: Robert Findley --- internal/lsp/cache/load.go | 14 +++++++++++--- internal/lsp/source/completion/completion.go | 2 +- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index db9a06d4dee..da0b246c54f 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -15,6 +15,7 @@ import ( "path/filepath" "sort" "strings" + "sync/atomic" "time" "golang.org/x/tools/go/packages" @@ -28,12 +29,17 @@ import ( "golang.org/x/tools/internal/span" ) +var loadID uint64 // atomic identifier for loads + // load calls packages.Load for the given scopes, updating package metadata, // import graph, and mapped files with the result. // // The resulting error may wrap the moduleErrorMap error type, representing // errors associated with specific modules. func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interface{}) (err error) { + id := atomic.AddUint64(&loadID, 1) + eventName := fmt.Sprintf("go/packages.Load #%d", id) // unique name for logging + var query []string var containsDir bool // for logging @@ -138,9 +144,9 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf return ctx.Err() } if err != nil { - event.Error(ctx, "go/packages.Load", err, tag.Snapshot.Of(s.ID()), tag.Directory.Of(cfg.Dir), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs))) + event.Error(ctx, eventName, err, tag.Snapshot.Of(s.ID()), tag.Directory.Of(cfg.Dir), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs))) } else { - event.Log(ctx, "go/packages.Load", tag.Snapshot.Of(s.ID()), tag.Directory.Of(cfg.Dir), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs))) + event.Log(ctx, eventName, tag.Snapshot.Of(s.ID()), tag.Directory.Of(cfg.Dir), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs))) } if len(pkgs) == 0 { if err == nil { @@ -168,7 +174,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf } if !containsDir || s.view.Options().VerboseOutput { - event.Log(ctx, "go/packages.Load", + event.Log(ctx, eventName, tag.Snapshot.Of(s.ID()), tag.Package.Of(pkg.ID), tag.Files.Of(pkg.CompiledGoFiles)) @@ -209,6 +215,8 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf loadedIDs = append(loadedIDs, id) } + event.Log(ctx, fmt.Sprintf("%s: updating metadata for %d packages", eventName, len(updates))) + s.mu.Lock() // invalidate the reverse transitive closure of packages that have changed. diff --git a/internal/lsp/source/completion/completion.go b/internal/lsp/source/completion/completion.go index bb1c68d2238..0c1ff3f21b0 100644 --- a/internal/lsp/source/completion/completion.go +++ b/internal/lsp/source/completion/completion.go @@ -441,7 +441,7 @@ func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHan items, surrounding, innerErr := packageClauseCompletions(ctx, snapshot, fh, protoPos) if innerErr != nil { // return the error for GetParsedFile since it's more relevant in this situation. - return nil, nil, fmt.Errorf("getting file for Completion: %w (package completions: %v)", err, innerErr) + return nil, nil, fmt.Errorf("getting file %s for Completion: %w (package completions: %v)", fh.URI(), err, innerErr) } return items, surrounding, nil } From 7743d1d949f1006ad12b190d68996acafc84d1d6 Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Wed, 15 Jun 2022 15:43:22 -0400 Subject: [PATCH 057/136] internal/lsp: respect range for inlay hints This is an optimization to avoid calculating inlayhints that are not in the requested range. Change-Id: I311f297d2998ae7d0db822eac540b1c12cae6e23 Reviewed-on: https://go-review.googlesource.com/c/tools/+/412455 gopls-CI: kokoro TryBot-Result: Gopher Robot Run-TryBot: Suzy Mueller Reviewed-by: Jamal Carvalho --- internal/lsp/source/inlay_hint.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go index 0c147283532..009cc52fdd5 100644 --- a/internal/lsp/source/inlay_hint.go +++ b/internal/lsp/source/inlay_hint.go @@ -104,7 +104,7 @@ var AllInlayHints = map[string]*Hint{ }, } -func InlayHint(ctx context.Context, snapshot Snapshot, fh FileHandle, _ protocol.Range) ([]protocol.InlayHint, error) { +func InlayHint(ctx context.Context, snapshot Snapshot, fh FileHandle, pRng protocol.Range) ([]protocol.InlayHint, error) { ctx, done := event.Start(ctx, "source.InlayHint") defer done() @@ -132,8 +132,23 @@ func InlayHint(ctx context.Context, snapshot Snapshot, fh FileHandle, _ protocol info := pkg.GetTypesInfo() q := Qualifier(pgf.File, pkg.GetTypes(), info) + // Set the range to the full file if the range is not valid. + start, end := pgf.File.Pos(), pgf.File.End() + if pRng.Start.Line < pRng.End.Line || pRng.Start.Character < pRng.End.Character { + // Adjust start and end for the specified range. + rng, err := pgf.Mapper.RangeToSpanRange(pRng) + if err != nil { + return nil, err + } + start, end = rng.Start, rng.End + } + var hints []protocol.InlayHint ast.Inspect(pgf.File, func(node ast.Node) bool { + // If not in range, we can stop looking. + if node == nil || node.End() < start || node.Pos() > end { + return false + } for _, fn := range enabledHints { hints = append(hints, fn(node, tmap, info, &q)...) } From c10541a14b3e5db8a9ae9fad7640035745288ab3 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 29 Jun 2022 11:50:37 -0400 Subject: [PATCH 058/136] go/analysis/passes/fieldalignment: document "false sharing" ...and don't claim that the most compact field order is optimal. The exception is relatively obscure, but the fact that it exists is important because it means it is not safe to apply the code transformation unconditionally. Change-Id: I391fbc1872b578d5340dd7c8fded48be30b820e0 Reviewed-on: https://go-review.googlesource.com/c/tools/+/415057 Reviewed-by: Robert Findley --- go/analysis/passes/fieldalignment/fieldalignment.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/go/analysis/passes/fieldalignment/fieldalignment.go b/go/analysis/passes/fieldalignment/fieldalignment.go index 78afe94ab30..aff663046a3 100644 --- a/go/analysis/passes/fieldalignment/fieldalignment.go +++ b/go/analysis/passes/fieldalignment/fieldalignment.go @@ -23,7 +23,7 @@ import ( const Doc = `find structs that would use less memory if their fields were sorted This analyzer find structs that can be rearranged to use less memory, and provides -a suggested edit with the optimal order. +a suggested edit with the most compact order. Note that there are two different diagnostics reported. One checks struct size, and the other reports "pointer bytes" used. Pointer bytes is how many bytes of the @@ -41,6 +41,11 @@ has 24 pointer bytes because it has to scan further through the *uint32. struct { string; uint32 } has 8 because it can stop immediately after the string pointer. + +Be aware that the most compact order is not always the most efficient. +In rare cases it may cause two variables each updated by its own goroutine +to occupy the same CPU cache line, inducing a form of memory contention +known as "false sharing" that slows down both goroutines. ` var Analyzer = &analysis.Analyzer{ From b84d509d6ffee06a6a8d82fd218009fc7f548e76 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 29 Jun 2022 13:02:25 -0400 Subject: [PATCH 059/136] gopls/doc: regenerate documentation This change should have been included in https://go-review.googlesource.com/c/tools/+/415057 but I hastily submitted it without a CI run thinking "how can a doc only change break something?". Well now I know. Sorry. :( Change-Id: Ib0fd25fddd7f9580961b44dcad032d4851684f63 Reviewed-on: https://go-review.googlesource.com/c/tools/+/415058 Reviewed-by: Dmitri Shuralyov Run-TryBot: Alan Donovan gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Robert Findley Reviewed-by: Bryan Mills Auto-Submit: Bryan Mills --- gopls/doc/analyzers.md | 7 ++++++- internal/lsp/source/api_json.go | 4 ++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/gopls/doc/analyzers.md b/gopls/doc/analyzers.md index f5c83d5771d..fd65c3a2a9d 100644 --- a/gopls/doc/analyzers.md +++ b/gopls/doc/analyzers.md @@ -131,7 +131,7 @@ of the second argument is not a pointer to a type implementing error. find structs that would use less memory if their fields were sorted This analyzer find structs that can be rearranged to use less memory, and provides -a suggested edit with the optimal order. +a suggested edit with the most compact order. Note that there are two different diagnostics reported. One checks struct size, and the other reports "pointer bytes" used. Pointer bytes is how many bytes of the @@ -150,6 +150,11 @@ has 24 pointer bytes because it has to scan further through the *uint32. has 8 because it can stop immediately after the string pointer. +Be aware that the most compact order is not always the most efficient. +In rare cases it may cause two variables each updated by its own goroutine +to occupy the same CPU cache line, inducing a form of memory contention +known as "false sharing" that slows down both goroutines. + **Disabled by default. Enable it by setting `"analyses": {"fieldalignment": true}`.** diff --git a/internal/lsp/source/api_json.go b/internal/lsp/source/api_json.go index ef683f31a01..4e2183cf4e6 100755 --- a/internal/lsp/source/api_json.go +++ b/internal/lsp/source/api_json.go @@ -280,7 +280,7 @@ var GeneratedAPIJSON = &APIJSON{ }, { Name: "\"fieldalignment\"", - Doc: "find structs that would use less memory if their fields were sorted\n\nThis analyzer find structs that can be rearranged to use less memory, and provides\na suggested edit with the optimal order.\n\nNote that there are two different diagnostics reported. One checks struct size,\nand the other reports \"pointer bytes\" used. Pointer bytes is how many bytes of the\nobject that the garbage collector has to potentially scan for pointers, for example:\n\n\tstruct { uint32; string }\n\nhave 16 pointer bytes because the garbage collector has to scan up through the string's\ninner pointer.\n\n\tstruct { string; *uint32 }\n\nhas 24 pointer bytes because it has to scan further through the *uint32.\n\n\tstruct { string; uint32 }\n\nhas 8 because it can stop immediately after the string pointer.\n", + Doc: "find structs that would use less memory if their fields were sorted\n\nThis analyzer find structs that can be rearranged to use less memory, and provides\na suggested edit with the most compact order.\n\nNote that there are two different diagnostics reported. One checks struct size,\nand the other reports \"pointer bytes\" used. Pointer bytes is how many bytes of the\nobject that the garbage collector has to potentially scan for pointers, for example:\n\n\tstruct { uint32; string }\n\nhave 16 pointer bytes because the garbage collector has to scan up through the string's\ninner pointer.\n\n\tstruct { string; *uint32 }\n\nhas 24 pointer bytes because it has to scan further through the *uint32.\n\n\tstruct { string; uint32 }\n\nhas 8 because it can stop immediately after the string pointer.\n\nBe aware that the most compact order is not always the most efficient.\nIn rare cases it may cause two variables each updated by its own goroutine\nto occupy the same CPU cache line, inducing a form of memory contention\nknown as \"false sharing\" that slows down both goroutines.\n", Default: "false", }, { @@ -866,7 +866,7 @@ var GeneratedAPIJSON = &APIJSON{ }, { Name: "fieldalignment", - Doc: "find structs that would use less memory if their fields were sorted\n\nThis analyzer find structs that can be rearranged to use less memory, and provides\na suggested edit with the optimal order.\n\nNote that there are two different diagnostics reported. One checks struct size,\nand the other reports \"pointer bytes\" used. Pointer bytes is how many bytes of the\nobject that the garbage collector has to potentially scan for pointers, for example:\n\n\tstruct { uint32; string }\n\nhave 16 pointer bytes because the garbage collector has to scan up through the string's\ninner pointer.\n\n\tstruct { string; *uint32 }\n\nhas 24 pointer bytes because it has to scan further through the *uint32.\n\n\tstruct { string; uint32 }\n\nhas 8 because it can stop immediately after the string pointer.\n", + Doc: "find structs that would use less memory if their fields were sorted\n\nThis analyzer find structs that can be rearranged to use less memory, and provides\na suggested edit with the most compact order.\n\nNote that there are two different diagnostics reported. One checks struct size,\nand the other reports \"pointer bytes\" used. Pointer bytes is how many bytes of the\nobject that the garbage collector has to potentially scan for pointers, for example:\n\n\tstruct { uint32; string }\n\nhave 16 pointer bytes because the garbage collector has to scan up through the string's\ninner pointer.\n\n\tstruct { string; *uint32 }\n\nhas 24 pointer bytes because it has to scan further through the *uint32.\n\n\tstruct { string; uint32 }\n\nhas 8 because it can stop immediately after the string pointer.\n\nBe aware that the most compact order is not always the most efficient.\nIn rare cases it may cause two variables each updated by its own goroutine\nto occupy the same CPU cache line, inducing a form of memory contention\nknown as \"false sharing\" that slows down both goroutines.\n", }, { Name: "httpresponse", From 1a196f04970c04bea7d88492e2618c03b60d4789 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 30 Jun 2022 10:50:45 -0400 Subject: [PATCH 060/136] internal/lsp/cache: don't build symbol info for non-Go files Our symbol query only searches Go files, so there is no point to building (broken) symbol information for non-Go files. Doing so introduces a lot more symbol handles that need to be tracked and walked. Change-Id: I96dd62766d079805fcb1d16eb361adfc0c31eea1 Reviewed-on: https://go-review.googlesource.com/c/tools/+/415199 Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot Run-TryBot: Robert Findley gopls-CI: kokoro --- internal/lsp/cache/snapshot.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 8194750b331..e94ad7ba09c 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -1010,6 +1010,10 @@ func (s *snapshot) Symbols(ctx context.Context) map[span.URI][]source.Symbol { result = make(map[span.URI][]source.Symbol) ) s.files.Range(func(uri span.URI, f source.VersionedFileHandle) { + if s.View().FileKind(f) != source.Go { + return // workspace symbols currently supports only Go files. + } + // TODO(adonovan): upgrade errgroup and use group.SetLimit(nprocs). iolimit <- struct{}{} // acquire token group.Go(func() error { From 8865782bc0d76401a05a438cccb05486ca1e5c62 Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Tue, 28 Jun 2022 15:44:45 -0400 Subject: [PATCH 061/136] internal/lsp: add text edits for unkeyed literals Add text edits that a user can accept to make the unkeyed composite literals keyed from the inlay hints. The text edits modify all of the unkeyed fields in a composite literal, since a mixture of keyed and unkeyed fields are not allowed. Change-Id: I0683fbaa5e22bc004b91c98fc09e495e797826ee Reviewed-on: https://go-review.googlesource.com/c/tools/+/414855 TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Jamal Carvalho Run-TryBot: Suzy Mueller --- internal/lsp/protocol/tsprotocol.go | 8 ++++++++ internal/lsp/source/inlay_hint.go | 11 ++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/internal/lsp/protocol/tsprotocol.go b/internal/lsp/protocol/tsprotocol.go index 5dd3d09e188..3a284bf45a6 100644 --- a/internal/lsp/protocol/tsprotocol.go +++ b/internal/lsp/protocol/tsprotocol.go @@ -2744,6 +2744,14 @@ type InlayHint = struct { * should fall back to a reasonable default. */ Kind InlayHintKind `json:"kind,omitempty"` + /** + * Optional text edits that are performed when accepting this inlay hint. + * + * *Note* that edits are expected to change the document so that the inlay + * hint (or its nearest variant) is now part of the document and the inlay + * hint itself is now obsolete. + */ + TextEdits []TextEdit `json:"textEdits,omitempty"` /** * The tooltip text when you hover over this item. */ diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go index 009cc52fdd5..967752b5c51 100644 --- a/internal/lsp/source/inlay_hint.go +++ b/internal/lsp/source/inlay_hint.go @@ -344,7 +344,7 @@ func compositeLiteralFields(node ast.Node, tmap *lsppos.TokenMapper, info *types } var hints []protocol.InlayHint - + var allEdits []protocol.TextEdit for i, v := range compLit.Elts { if _, ok := v.(*ast.KeyValueExpr); !ok { start, ok := tmap.Position(v.Pos()) @@ -360,8 +360,17 @@ func compositeLiteralFields(node ast.Node, tmap *lsppos.TokenMapper, info *types Kind: protocol.Parameter, PaddingRight: true, }) + allEdits = append(allEdits, protocol.TextEdit{ + Range: protocol.Range{Start: start, End: start}, + NewText: strct.Field(i).Name() + ": ", + }) } } + // It is not allowed to have a mix of keyed and unkeyed fields, so + // have the text edits add keys to all fields. + for i := range hints { + hints[i].TextEdits = allEdits + } return hints } From 8314b7aa0d97fe43e050787d55308e25f265bc63 Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Mon, 27 Jun 2022 18:15:05 -0400 Subject: [PATCH 062/136] go/analysis: add suggested fix for unkeyed composite literals Include a suggested fix with the diagnostic for unkeyed composite literals. This suggested fix will add the name of each of the fields. For golang/go#53062 Change-Id: I0c33191ff3cf66c95a9a055848274cc2b0c38224 Reviewed-on: https://go-review.googlesource.com/c/tools/+/414674 gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan Run-TryBot: Suzy Mueller --- go/analysis/passes/composite/composite.go | 41 ++++- .../passes/composite/composite_test.go | 2 +- .../passes/composite/testdata/src/a/a.go | 17 +++ .../composite/testdata/src/a/a.go.golden | 144 ++++++++++++++++++ .../testdata/src/a/a_fuzz_test.go.golden | 16 ++ .../testdata/src/typeparams/typeparams.go | 10 +- .../src/typeparams/typeparams.go.golden | 27 ++++ 7 files changed, 245 insertions(+), 12 deletions(-) create mode 100644 go/analysis/passes/composite/testdata/src/a/a.go.golden create mode 100644 go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden create mode 100644 go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden diff --git a/go/analysis/passes/composite/composite.go b/go/analysis/passes/composite/composite.go index d3670aca97a..64e184d3439 100644 --- a/go/analysis/passes/composite/composite.go +++ b/go/analysis/passes/composite/composite.go @@ -7,6 +7,7 @@ package composite import ( + "fmt" "go/ast" "go/types" "strings" @@ -83,7 +84,8 @@ func run(pass *analysis.Pass) (interface{}, error) { } for _, typ := range structuralTypes { under := deref(typ.Underlying()) - if _, ok := under.(*types.Struct); !ok { + strct, ok := under.(*types.Struct) + if !ok { // skip non-struct composite literals continue } @@ -92,20 +94,47 @@ func run(pass *analysis.Pass) (interface{}, error) { continue } - // check if the CompositeLit contains an unkeyed field + // check if the struct contains an unkeyed field allKeyValue := true - for _, e := range cl.Elts { + var suggestedFixAvailable = len(cl.Elts) == strct.NumFields() + var missingKeys []analysis.TextEdit + for i, e := range cl.Elts { if _, ok := e.(*ast.KeyValueExpr); !ok { allKeyValue = false - break + if i >= strct.NumFields() { + break + } + field := strct.Field(i) + if !field.Exported() { + // Adding unexported field names for structs not defined + // locally will not work. + suggestedFixAvailable = false + break + } + missingKeys = append(missingKeys, analysis.TextEdit{ + Pos: e.Pos(), + End: e.Pos(), + NewText: []byte(fmt.Sprintf("%s: ", field.Name())), + }) } } if allKeyValue { - // all the composite literal fields are keyed + // all the struct fields are keyed continue } - pass.ReportRangef(cl, "%s composite literal uses unkeyed fields", typeName) + diag := analysis.Diagnostic{ + Pos: cl.Pos(), + End: cl.End(), + Message: fmt.Sprintf("%s struct literal uses unkeyed fields", typeName), + } + if suggestedFixAvailable { + diag.SuggestedFixes = []analysis.SuggestedFix{{ + Message: "Add field names to struct literal", + TextEdits: missingKeys, + }} + } + pass.Report(diag) return } }) diff --git a/go/analysis/passes/composite/composite_test.go b/go/analysis/passes/composite/composite_test.go index 952de8bfdad..7afaaa7ffd4 100644 --- a/go/analysis/passes/composite/composite_test.go +++ b/go/analysis/passes/composite/composite_test.go @@ -18,5 +18,5 @@ func Test(t *testing.T) { if typeparams.Enabled { pkgs = append(pkgs, "typeparams") } - analysistest.Run(t, testdata, composite.Analyzer, pkgs...) + analysistest.RunWithSuggestedFixes(t, testdata, composite.Analyzer, pkgs...) } diff --git a/go/analysis/passes/composite/testdata/src/a/a.go b/go/analysis/passes/composite/testdata/src/a/a.go index 3a5bc203b03..cd69d395173 100644 --- a/go/analysis/passes/composite/testdata/src/a/a.go +++ b/go/analysis/passes/composite/testdata/src/a/a.go @@ -11,6 +11,7 @@ import ( "go/scanner" "go/token" "image" + "sync" "unicode" ) @@ -79,6 +80,18 @@ var badStructLiteral = flag.Flag{ // want "unkeyed fields" nil, // Value "DefValue", } +var tooManyFieldsStructLiteral = flag.Flag{ // want "unkeyed fields" + "Name", + "Usage", + nil, // Value + "DefValue", + "Extra Field", +} +var tooFewFieldsStructLiteral = flag.Flag{ // want "unkeyed fields" + "Name", + "Usage", + nil, // Value +} var delta [3]rune @@ -100,6 +113,10 @@ var badScannerErrorList = scanner.ErrorList{ &scanner.Error{token.Position{}, "foobar"}, // want "unkeyed fields" } +// sync.Mutex has unexported fields. We expect a diagnostic but no +// suggested fix. +var mu = sync.Mutex{0, 0} // want "unkeyed fields" + // Check whitelisted structs: if vet is run with --compositewhitelist=false, // this line triggers an error. var whitelistedPoint = image.Point{1, 2} diff --git a/go/analysis/passes/composite/testdata/src/a/a.go.golden b/go/analysis/passes/composite/testdata/src/a/a.go.golden new file mode 100644 index 00000000000..fe73a2e0a1d --- /dev/null +++ b/go/analysis/passes/composite/testdata/src/a/a.go.golden @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains the test for untagged struct literals. + +package a + +import ( + "flag" + "go/scanner" + "go/token" + "image" + "sync" + "unicode" +) + +var Okay1 = []string{ + "Name", + "Usage", + "DefValue", +} + +var Okay2 = map[string]bool{ + "Name": true, + "Usage": true, + "DefValue": true, +} + +var Okay3 = struct { + X string + Y string + Z string +}{ + "Name", + "Usage", + "DefValue", +} + +var Okay4 = []struct { + A int + B int +}{ + {1, 2}, + {3, 4}, +} + +type MyStruct struct { + X string + Y string + Z string +} + +var Okay5 = &MyStruct{ + "Name", + "Usage", + "DefValue", +} + +var Okay6 = []MyStruct{ + {"foo", "bar", "baz"}, + {"aa", "bb", "cc"}, +} + +var Okay7 = []*MyStruct{ + {"foo", "bar", "baz"}, + {"aa", "bb", "cc"}, +} + +// Testing is awkward because we need to reference things from a separate package +// to trigger the warnings. + +var goodStructLiteral = flag.Flag{ + Name: "Name", + Usage: "Usage", +} +var badStructLiteral = flag.Flag{ // want "unkeyed fields" + Name: "Name", + Usage: "Usage", + Value: nil, // Value + DefValue: "DefValue", +} +var tooManyFieldsStructLiteral = flag.Flag{ // want "unkeyed fields" + "Name", + "Usage", + nil, // Value + "DefValue", + "Extra Field", +} +var tooFewFieldsStructLiteral = flag.Flag{ // want "unkeyed fields" + "Name", + "Usage", + nil, // Value +} + +var delta [3]rune + +// SpecialCase is a named slice of CaseRange to test issue 9171. +var goodNamedSliceLiteral = unicode.SpecialCase{ + {Lo: 1, Hi: 2, Delta: delta}, + unicode.CaseRange{Lo: 1, Hi: 2, Delta: delta}, +} +var badNamedSliceLiteral = unicode.SpecialCase{ + {Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields" + unicode.CaseRange{Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields" +} + +// ErrorList is a named slice, so no warnings should be emitted. +var goodScannerErrorList = scanner.ErrorList{ + &scanner.Error{Msg: "foobar"}, +} +var badScannerErrorList = scanner.ErrorList{ + &scanner.Error{Pos: token.Position{}, Msg: "foobar"}, // want "unkeyed fields" +} + +// sync.Mutex has unexported fields. We expect a diagnostic but no +// suggested fix. +var mu = sync.Mutex{0, 0} // want "unkeyed fields" + +// Check whitelisted structs: if vet is run with --compositewhitelist=false, +// this line triggers an error. +var whitelistedPoint = image.Point{1, 2} + +// Do not check type from unknown package. +// See issue 15408. +var unknownPkgVar = unicode.NoSuchType{"foo", "bar"} + +// A named pointer slice of CaseRange to test issue 23539. In +// particular, we're interested in how some slice elements omit their +// type. +var goodNamedPointerSliceLiteral = []*unicode.CaseRange{ + {Lo: 1, Hi: 2}, + &unicode.CaseRange{Lo: 1, Hi: 2}, +} +var badNamedPointerSliceLiteral = []*unicode.CaseRange{ + {Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields" + &unicode.CaseRange{Lo: 1, Hi: 2, Delta: delta}, // want "unkeyed fields" +} + +// unicode.Range16 is whitelisted, so there'll be no vet error +var range16 = unicode.Range16{0xfdd0, 0xfdef, 1} + +// unicode.Range32 is whitelisted, so there'll be no vet error +var range32 = unicode.Range32{0x1fffe, 0x1ffff, 1} diff --git a/go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden b/go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden new file mode 100644 index 00000000000..20b652e88dd --- /dev/null +++ b/go/analysis/passes/composite/testdata/src/a/a_fuzz_test.go.golden @@ -0,0 +1,16 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package a + +import "testing" + +var fuzzTargets = []testing.InternalFuzzTarget{ + {"Fuzz", Fuzz}, +} + +func Fuzz(f *testing.F) {} diff --git a/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go index dd5d57efed4..f9a5e1fb105 100644 --- a/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go +++ b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go @@ -6,7 +6,7 @@ package typeparams import "typeparams/lib" -type localStruct struct { F int } +type localStruct struct{ F int } func F[ T1 ~struct{ f int }, @@ -20,8 +20,8 @@ func F[ _ = T1{2} _ = T2a{2} _ = T2b{2} // want "unkeyed fields" - _ = T3{1,2} - _ = T4{1,2} - _ = T5{1:2} - _ = T6{1:2} + _ = T3{1, 2} + _ = T4{1, 2} + _ = T5{1: 2} + _ = T6{1: 2} } diff --git a/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden new file mode 100644 index 00000000000..66cd9158cb6 --- /dev/null +++ b/go/analysis/passes/composite/testdata/src/typeparams/typeparams.go.golden @@ -0,0 +1,27 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import "typeparams/lib" + +type localStruct struct{ F int } + +func F[ + T1 ~struct{ f int }, + T2a localStruct, + T2b lib.Struct, + T3 ~[]int, + T4 lib.Slice, + T5 ~map[int]int, + T6 lib.Map, +]() { + _ = T1{2} + _ = T2a{2} + _ = T2b{F: 2} // want "unkeyed fields" + _ = T3{1, 2} + _ = T4{1, 2} + _ = T5{1: 2} + _ = T6{1: 2} +} From e8e5b37084ab41357340419ff15cba5fb08af935 Mon Sep 17 00:00:00 2001 From: Rob Findley Date: Thu, 30 Jun 2022 14:01:50 -0400 Subject: [PATCH 063/136] internal/lsp/cache: don't construct a new metadata graph if no changes Change-Id: I3f074d1fd29cf7ad0323cec76154f9b2e31f7356 Reviewed-on: https://go-review.googlesource.com/c/tools/+/415494 Run-TryBot: Robert Findley gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan --- internal/lsp/cache/graph.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/lsp/cache/graph.go b/internal/lsp/cache/graph.go index dc7d4faef78..ad39aa8d862 100644 --- a/internal/lsp/cache/graph.go +++ b/internal/lsp/cache/graph.go @@ -32,6 +32,10 @@ type metadataGraph struct { // Clone creates a new metadataGraph, applying the given updates to the // receiver. func (g *metadataGraph) Clone(updates map[PackageID]*KnownMetadata) *metadataGraph { + if len(updates) == 0 { + // Optimization: since the graph is immutable, we can return the receiver. + return g + } result := &metadataGraph{metadata: make(map[PackageID]*KnownMetadata, len(g.metadata))} // Copy metadata. for id, m := range g.metadata { From c77473fa95f1277e5bf9708de95d29e1ff350d34 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Thu, 30 Jun 2022 15:13:28 -0400 Subject: [PATCH 064/136] gopls: upgrade staticcheck to v0.3.2 Selectively upgrade only staticcheck, to pick up fixes for support to generic code. Change-Id: Ia625c4d46780139aa6e70447eebe1b6d476d4722 Reviewed-on: https://go-review.googlesource.com/c/tools/+/415495 TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan gopls-CI: kokoro Run-TryBot: Robert Findley --- gopls/go.mod | 2 +- gopls/go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/gopls/go.mod b/gopls/go.mod index 5dc62d3df0a..bd118e226bb 100644 --- a/gopls/go.mod +++ b/gopls/go.mod @@ -11,7 +11,7 @@ require ( golang.org/x/sys v0.0.0-20220209214540-3681064d5158 golang.org/x/tools v0.1.11-0.20220523181440-ccb10502d1a5 golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c - honnef.co/go/tools v0.3.0 + honnef.co/go/tools v0.3.2 mvdan.cc/gofumpt v0.3.0 mvdan.cc/xurls/v2 v2.4.0 ) diff --git a/gopls/go.sum b/gopls/go.sum index 91f552ef905..73a55fbad82 100644 --- a/gopls/go.sum +++ b/gopls/go.sum @@ -89,6 +89,8 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= honnef.co/go/tools v0.3.0 h1:2LdYUZ7CIxnYgskbUZfY7FPggmqnh6shBqfWa8Tn3XU= honnef.co/go/tools v0.3.0/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70= +honnef.co/go/tools v0.3.2 h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34= +honnef.co/go/tools v0.3.2/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= mvdan.cc/gofumpt v0.3.0 h1:kTojdZo9AcEYbQYhGuLf/zszYthRdhDNDUi2JKTxas4= mvdan.cc/gofumpt v0.3.0/go.mod h1:0+VyGZWleeIj5oostkOex+nDBA0eyavuDnDusAJ8ylo= mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio= From 9358addbaa72dd292045f8ba280aba316ca8eff1 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Tue, 28 Jun 2022 20:25:04 -0400 Subject: [PATCH 065/136] internal/lsp/cache: remove unused function This function was actually left behind, after making suggested changes to CL 413683. Change-Id: I6933e870ded9da5af06724c28839c37d58fb4cdc Reviewed-on: https://go-review.googlesource.com/c/tools/+/414856 TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan gopls-CI: kokoro Run-TryBot: Robert Findley --- internal/lsp/cache/graph.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/internal/lsp/cache/graph.go b/internal/lsp/cache/graph.go index ad39aa8d862..88c9f147195 100644 --- a/internal/lsp/cache/graph.go +++ b/internal/lsp/cache/graph.go @@ -158,18 +158,3 @@ func (g *metadataGraph) reverseTransitiveClosure(includeInvalid bool, ids ...Pac visitAll(ids) return seen } - -func collectReverseTransitiveClosure(g *metadataGraph, includeInvalid bool, ids []PackageID, seen map[PackageID]struct{}) { - for _, id := range ids { - if _, ok := seen[id]; ok { - continue - } - m := g.metadata[id] - // Only use invalid metadata if we support it. - if m == nil || !(m.Valid || includeInvalid) { - continue - } - seen[id] = struct{}{} - collectReverseTransitiveClosure(g, includeInvalid, g.importedBy[id], seen) - } -} From fa4babcd9abca1cbd669c8dd725f770ba4f75800 Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Sun, 12 Jun 2022 04:22:04 +0000 Subject: [PATCH 066/136] internal/lsp/cache: use persistent map for storing packages in the snapshot This on average reduces latency from 25ms to 12ms on internal codebase. Updates golang/go#45686 Change-Id: I49c8f09f8e54b7b486d7ff7eb8f4ba9f0d90b278 Reviewed-on: https://go-review.googlesource.com/c/tools/+/413655 gopls-CI: kokoro Run-TryBot: Robert Findley TryBot-Result: Gopher Robot Reviewed-by: Robert Findley Reviewed-by: Alan Donovan --- internal/lsp/cache/check.go | 6 ++-- internal/lsp/cache/load.go | 4 +-- internal/lsp/cache/maps.go | 51 ++++++++++++++++++++++++++++++++++ internal/lsp/cache/session.go | 2 +- internal/lsp/cache/snapshot.go | 34 ++++++++++++----------- internal/lsp/source/view.go | 4 +++ 6 files changed, 79 insertions(+), 22 deletions(-) diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index f09fc298a98..797298a4192 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -108,7 +108,7 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so m := ph.m key := ph.key - h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { + h, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} { snapshot := arg.(*snapshot) // Begin loading the direct dependencies, in parallel. @@ -128,14 +128,14 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so wg.Wait() return data - }, nil) + }) ph.handle = h // Cache the handle in the snapshot. If a package handle has already // been cached, addPackage will return the cached value. This is fine, // since the original package handle above will have no references and be // garbage collected. - ph = s.addPackageHandle(ph) + ph = s.addPackageHandle(ph, release) return ph, nil } diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index da0b246c54f..3fb67a7a98b 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -228,9 +228,9 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf // TODO(rfindley): this should not be necessary, as we should have already // invalidated in snapshot.clone. for id := range invalidatedPackages { - for _, mode := range []source.ParseMode{source.ParseHeader, source.ParseExported, source.ParseFull} { + for _, mode := range source.AllParseModes { key := packageKey{mode, id} - delete(s.packages, key) + s.packages.Delete(key) } } diff --git a/internal/lsp/cache/maps.go b/internal/lsp/cache/maps.go index 91b0e77e87e..14026abd92b 100644 --- a/internal/lsp/cache/maps.go +++ b/internal/lsp/cache/maps.go @@ -155,3 +155,54 @@ func (m parseKeysByURIMap) Set(key span.URI, value []parseKey) { func (m parseKeysByURIMap) Delete(key span.URI) { m.impl.Delete(key) } + +type packagesMap struct { + impl *persistent.Map +} + +func newPackagesMap() packagesMap { + return packagesMap{ + impl: persistent.NewMap(func(a, b interface{}) bool { + left := a.(packageKey) + right := b.(packageKey) + if left.mode != right.mode { + return left.mode < right.mode + } + return left.id < right.id + }), + } +} + +func (m packagesMap) Clone() packagesMap { + return packagesMap{ + impl: m.impl.Clone(), + } +} + +func (m packagesMap) Destroy() { + m.impl.Destroy() +} + +func (m packagesMap) Get(key packageKey) (*packageHandle, bool) { + value, ok := m.impl.Get(key) + if !ok { + return nil, false + } + return value.(*packageHandle), true +} + +func (m packagesMap) Range(do func(key packageKey, value *packageHandle)) { + m.impl.Range(func(key, value interface{}) { + do(key.(packageKey), value.(*packageHandle)) + }) +} + +func (m packagesMap) Set(key packageKey, value *packageHandle, release func()) { + m.impl.Set(key, value, func(key, value interface{}) { + release() + }) +} + +func (m packagesMap) Delete(key packageKey) { + m.impl.Delete(key) +} diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 4a7a5b2f4a6..8d8e63f13e8 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -231,7 +231,7 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, cancel: cancel, initializeOnce: &sync.Once{}, generation: s.cache.store.Generation(generationName(v, 0)), - packages: make(map[packageKey]*packageHandle), + packages: newPackagesMap(), meta: &metadataGraph{}, files: newFilesMap(), goFiles: newGoFilesMap(), diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index e94ad7ba09c..39e958e0fc2 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -89,7 +89,7 @@ type snapshot struct { // packages maps a packageKey to a set of packageHandles to which that file belongs. // It may be invalidated when a file's content changes. - packages map[packageKey]*packageHandle + packages packagesMap // actions maps an actionkey to its actionHandle. actions map[actionKey]*actionHandle @@ -140,6 +140,7 @@ type actionKey struct { func (s *snapshot) Destroy(destroyedBy string) { s.generation.Destroy(destroyedBy) + s.packages.Destroy() s.files.Destroy() s.goFiles.Destroy() s.parseKeysByURI.Destroy() @@ -711,16 +712,17 @@ func (s *snapshot) getImportedBy(id PackageID) []PackageID { return s.meta.importedBy[id] } -func (s *snapshot) addPackageHandle(ph *packageHandle) *packageHandle { +func (s *snapshot) addPackageHandle(ph *packageHandle, release func()) *packageHandle { s.mu.Lock() defer s.mu.Unlock() // If the package handle has already been cached, // return the cached handle instead of overriding it. - if ph, ok := s.packages[ph.packageKey()]; ok { - return ph + if result, ok := s.packages.Get(ph.packageKey()); ok { + release() + return result } - s.packages[ph.packageKey()] = ph + s.packages.Set(ph.packageKey(), ph, release) return ph } @@ -1090,10 +1092,10 @@ func (s *snapshot) CachedImportPaths(ctx context.Context) (map[string]source.Pac defer s.mu.Unlock() results := map[string]source.Package{} - for _, ph := range s.packages { + s.packages.Range(func(key packageKey, ph *packageHandle) { cachedPkg, err := ph.cached(s.generation) if err != nil { - continue + return } for importPath, newPkg := range cachedPkg.imports { if oldPkg, ok := results[string(importPath)]; ok { @@ -1105,7 +1107,7 @@ func (s *snapshot) CachedImportPaths(ctx context.Context) (map[string]source.Pac results[string(importPath)] = newPkg } } - } + }) return results, nil } @@ -1134,7 +1136,8 @@ func (s *snapshot) getPackage(id PackageID, mode source.ParseMode) *packageHandl id: id, mode: mode, } - return s.packages[key] + ph, _ := s.packages.Get(key) + return ph } func (s *snapshot) getSymbolHandle(uri span.URI) *symbolHandle { @@ -1690,7 +1693,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC builtin: s.builtin, initializeOnce: s.initializeOnce, initializedErr: s.initializedErr, - packages: make(map[packageKey]*packageHandle, len(s.packages)), + packages: s.packages.Clone(), actions: make(map[actionKey]*actionHandle, len(s.actions)), files: s.files.Clone(), goFiles: s.goFiles.Clone(), @@ -1894,13 +1897,12 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC addRevDeps(id, invalidateMetadata) } - // Copy the package type information. - for k, v := range s.packages { - if _, ok := idsToInvalidate[k.id]; ok { - continue + // Delete invalidated package type information. + for id := range idsToInvalidate { + for _, mode := range source.AllParseModes { + key := packageKey{mode, id} + result.packages.Delete(key) } - newGen.Inherit(v.handle) - result.packages[k] = v } // Copy the package analysis information. diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go index 73e1b7f89ed..98d11517d87 100644 --- a/internal/lsp/source/view.go +++ b/internal/lsp/source/view.go @@ -478,6 +478,10 @@ const ( ParseFull ) +// AllParseModes contains all possible values of ParseMode. +// It is used for cache invalidation on a file content change. +var AllParseModes = []ParseMode{ParseHeader, ParseExported, ParseFull} + // TypecheckMode controls what kind of parsing should be done (see ParseMode) // while type checking a package. type TypecheckMode int From 79fefdf61d2c7cb1291d58d68aab082144115b05 Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Tue, 14 Jun 2022 18:14:38 +0000 Subject: [PATCH 067/136] internal/memoize: do not iterate all handles on generation destruction This allows reducing critical section of `g.store.mu` as the vast majority of entries do not rely on generation-based GC. Change-Id: I985af0b38504ddedb22649290deac91797577b75 Reviewed-on: https://go-review.googlesource.com/c/tools/+/413656 Reviewed-by: David Chase Reviewed-by: Alan Donovan Run-TryBot: Robert Findley Reviewed-by: Robert Findley TryBot-Result: Gopher Robot gopls-CI: kokoro --- internal/memoize/memoize.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index a758deeb7f8..7fa5340c5a3 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -33,6 +33,8 @@ var ( type Store struct { handlesMu sync.Mutex // lock ordering: Store.handlesMu before Handle.mu handles map[interface{}]*Handle + // handles which are bound to generations for GC purposes. + boundHandles map[*Handle]struct{} } // Generation creates a new Generation associated with s. Destroy must be @@ -71,10 +73,7 @@ func (g *Generation) Destroy(destroyedBy string) { g.store.handlesMu.Lock() defer g.store.handlesMu.Unlock() - for _, h := range g.store.handles { - if !h.trackGenerations { - continue - } + for h := range g.store.boundHandles { h.mu.Lock() if _, ok := h.generations[g]; ok { delete(h.generations, g) // delete even if it's dead, in case of dangling references to the entry. @@ -237,7 +236,11 @@ func (g *Generation) getHandle(key interface{}, function Function, cleanup func( trackGenerations: trackGenerations, } if trackGenerations { + if g.store.boundHandles == nil { + g.store.boundHandles = map[*Handle]struct{}{} + } h.generations = make(map[*Generation]struct{}, 1) + g.store.boundHandles[h] = struct{}{} } if g.store.handles == nil { @@ -302,6 +305,9 @@ func (h *Handle) destroy(store *Store) { h.cleanup(h.value) } delete(store.handles, h.key) + if h.trackGenerations { + delete(store.boundHandles, h) + } } func (h *Handle) incrementRef(g *Generation) { From 93bf1fcc7c91fd5ef6c2a85234ab9f6543936707 Mon Sep 17 00:00:00 2001 From: Davide Masserut Date: Sat, 25 Jun 2022 20:55:23 +0000 Subject: [PATCH 068/136] gopls: add range over channel postfix completion This adds a snippet that applies to variables of type chan. When used, it replaces `channel.range!` with the following snippet: ``` for e := range channel { | } ``` Where `|` indicates the location of the cursor. Change-Id: I8b2f889b22b9f2c292041e5ca5f63c5d0ca98f11 GitHub-Last-Rev: 9cb894be80d0c5243a5e42779c3e96ba79aa66b5 GitHub-Pull-Request: golang/tools#386 Reviewed-on: https://go-review.googlesource.com/c/tools/+/414194 Reviewed-by: Hyang-Ah Hana Kim Reviewed-by: Robert Findley Run-TryBot: Robert Findley TryBot-Result: Gopher Robot gopls-CI: kokoro --- .../completion/postfix_snippet_test.go | 21 +++++++++++++++++++ .../lsp/source/completion/postfix_snippets.go | 8 +++++++ 2 files changed, 29 insertions(+) diff --git a/gopls/internal/regtest/completion/postfix_snippet_test.go b/gopls/internal/regtest/completion/postfix_snippet_test.go index 2674d555c5a..7e595aaad1e 100644 --- a/gopls/internal/regtest/completion/postfix_snippet_test.go +++ b/gopls/internal/regtest/completion/postfix_snippet_test.go @@ -264,6 +264,27 @@ for k := range foo { keys = append(keys, k) } +} +`, + }, + { + name: "channel_range", + before: ` +package foo + +func _() { + foo := make(chan int) + foo.range +} +`, + after: ` +package foo + +func _() { + foo := make(chan int) + for e := range foo { + $0 +} } `, }, diff --git a/internal/lsp/source/completion/postfix_snippets.go b/internal/lsp/source/completion/postfix_snippets.go index d7f0d90da9e..aa8454f8e98 100644 --- a/internal/lsp/source/completion/postfix_snippets.go +++ b/internal/lsp/source/completion/postfix_snippets.go @@ -149,6 +149,14 @@ for {{.VarName .KeyType "k"}}, {{.VarName .ElemType "v"}} := range {{.X}} { {{$keysVar}} = append({{$keysVar}}, {{$k}}) } {{end}}`, +}, { + label: "range", + details: "range over channel", + body: `{{if and (eq .Kind "chan") .StmtOK -}} +for {{.VarName .ElemType "e"}} := range {{.X}} { + {{.Cursor}} +} +{{- end}}`, }, { label: "var", details: "assign to variables", From ffc70b9ac150680da53ed53e8543fd0e938ce4f4 Mon Sep 17 00:00:00 2001 From: Muir Manders Date: Thu, 30 Jun 2022 19:44:46 -0700 Subject: [PATCH 069/136] lsp/completion: fix ranking of *types.PkgName candidates In Go 1.18 types.AssignableTo() started reporting that an invalid type is assignable to any interface. *types.PkgName (i.e. an import at the top of the file) has an invalid type for its Type(), so we started thinking all in scope imports were great candidates when the expected type was an interface. Fix by wrapping the AssignableTo (and AssertableTo) to explicitly return false if either operand is invalid. Updates golang/go#53595 Change-Id: Ie5a84b7f410ff5c73c6b7870e052bafaf3e21e99 Reviewed-on: https://go-review.googlesource.com/c/tools/+/415595 Reviewed-by: Hyang-Ah Hana Kim Reviewed-by: Robert Findley Run-TryBot: Robert Findley TryBot-Result: Gopher Robot gopls-CI: kokoro --- internal/lsp/source/completion/completion.go | 12 +++++------ internal/lsp/source/completion/util.go | 20 +++++++++++++++++++ internal/lsp/testdata/deep/deep.go | 7 +++++++ internal/lsp/testdata/summary.txt.golden | 2 +- .../lsp/testdata/summary_go1.18.txt.golden | 2 +- 5 files changed, 35 insertions(+), 8 deletions(-) diff --git a/internal/lsp/source/completion/completion.go b/internal/lsp/source/completion/completion.go index 0c1ff3f21b0..a2dfae69841 100644 --- a/internal/lsp/source/completion/completion.go +++ b/internal/lsp/source/completion/completion.go @@ -2314,7 +2314,7 @@ func (ci candidateInference) applyTypeNameModifiers(typ types.Type) types.Type { // matchesVariadic returns true if we are completing a variadic // parameter and candType is a compatible slice type. func (ci candidateInference) matchesVariadic(candType types.Type) bool { - return ci.variadic && ci.objType != nil && types.AssignableTo(candType, types.NewSlice(ci.objType)) + return ci.variadic && ci.objType != nil && assignableTo(candType, types.NewSlice(ci.objType)) } // findSwitchStmt returns an *ast.CaseClause's corresponding *ast.SwitchStmt or @@ -2640,7 +2640,7 @@ func (ci *candidateInference) candTypeMatches(cand *candidate) bool { return false } - if ci.convertibleTo != nil && types.ConvertibleTo(candType, ci.convertibleTo) { + if ci.convertibleTo != nil && convertibleTo(candType, ci.convertibleTo) { return true } @@ -2728,7 +2728,7 @@ func considerTypeConversion(from, to types.Type, path []types.Object) bool { return false } - if !types.ConvertibleTo(from, to) { + if !convertibleTo(from, to) { return false } @@ -2777,7 +2777,7 @@ func (ci *candidateInference) typeMatches(expType, candType types.Type) bool { // AssignableTo covers the case where the types are equal, but also handles // cases like assigning a concrete type to an interface type. - return types.AssignableTo(candType, expType) + return assignableTo(candType, expType) } // kindMatches reports whether candType's kind matches our expected @@ -2840,7 +2840,7 @@ func (ci *candidateInference) assigneesMatch(cand *candidate, sig *types.Signatu assignee = ci.assignees[i] } - if assignee == nil { + if assignee == nil || assignee == types.Typ[types.Invalid] { continue } @@ -2894,7 +2894,7 @@ func (c *completer) matchingTypeName(cand *candidate) bool { // // Where our expected type is "[]int", and we expect a type name. if c.inference.objType != nil { - return types.AssignableTo(candType, c.inference.objType) + return assignableTo(candType, c.inference.objType) } // Default to saying any type name is a match. diff --git a/internal/lsp/source/completion/util.go b/internal/lsp/source/completion/util.go index cd7849af262..e6d3bfd745f 100644 --- a/internal/lsp/source/completion/util.go +++ b/internal/lsp/source/completion/util.go @@ -321,3 +321,23 @@ func (c *completer) editText(from, to token.Pos, newText string) ([]protocol.Tex NewText: newText, }}) } + +// assignableTo is like types.AssignableTo, but returns false if +// either type is invalid. +func assignableTo(x, to types.Type) bool { + if x == types.Typ[types.Invalid] || to == types.Typ[types.Invalid] { + return false + } + + return types.AssignableTo(x, to) +} + +// convertibleTo is like types.ConvertibleTo, but returns false if +// either type is invalid. +func convertibleTo(x, to types.Type) bool { + if x == types.Typ[types.Invalid] || to == types.Typ[types.Invalid] { + return false + } + + return types.ConvertibleTo(x, to) +} diff --git a/internal/lsp/testdata/deep/deep.go b/internal/lsp/testdata/deep/deep.go index 6ed5ff83999..6908824f82f 100644 --- a/internal/lsp/testdata/deep/deep.go +++ b/internal/lsp/testdata/deep/deep.go @@ -28,6 +28,13 @@ func _() { wantsContext(c) //@rank(")", ctxBackground),rank(")", ctxTODO) } +func _() { + var cork struct{ err error } + cork.err //@item(deepCorkErr, "cork.err", "error", "field") + context //@item(deepContextPkg, "context", "\"context\"", "package") + var _ error = co //@rank(" //", deepCorkErr, deepContextPkg) +} + func _() { // deepCircle is circular. type deepCircle struct { diff --git a/internal/lsp/testdata/summary.txt.golden b/internal/lsp/testdata/summary.txt.golden index 0247551f8b9..b6c6c07b15d 100644 --- a/internal/lsp/testdata/summary.txt.golden +++ b/internal/lsp/testdata/summary.txt.golden @@ -6,7 +6,7 @@ CompletionSnippetCount = 106 UnimportedCompletionsCount = 5 DeepCompletionsCount = 5 FuzzyCompletionsCount = 8 -RankedCompletionsCount = 163 +RankedCompletionsCount = 164 CaseSensitiveCompletionsCount = 4 DiagnosticsCount = 37 FoldingRangesCount = 2 diff --git a/internal/lsp/testdata/summary_go1.18.txt.golden b/internal/lsp/testdata/summary_go1.18.txt.golden index 7e8da12d764..9fadf634090 100644 --- a/internal/lsp/testdata/summary_go1.18.txt.golden +++ b/internal/lsp/testdata/summary_go1.18.txt.golden @@ -6,7 +6,7 @@ CompletionSnippetCount = 116 UnimportedCompletionsCount = 5 DeepCompletionsCount = 5 FuzzyCompletionsCount = 8 -RankedCompletionsCount = 173 +RankedCompletionsCount = 174 CaseSensitiveCompletionsCount = 4 DiagnosticsCount = 37 FoldingRangesCount = 2 From bec0cf16be3beb52370134de9d720cefec13863f Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 27 Jun 2022 17:42:21 -0400 Subject: [PATCH 070/136] internal/lsp/cache: avoid Handle mechanism for workspace dir This change causes (*snapshot).getWorkspaceDir to create a temporary directory directly, rather than via the Store/Generation/Handle mechanism. The work is done at most once per snapshot, and the directory is deleted in Snapshot.Destroy. This removes the last remaining use of Handle's cleanup mechanism, which will be deleted in a follow-up. Change-Id: I32f09a67846d9b5577cb8849b226427f86443303 Reviewed-on: https://go-review.googlesource.com/c/tools/+/414499 gopls-CI: kokoro Run-TryBot: Alan Donovan Reviewed-by: Robert Findley TryBot-Result: Gopher Robot --- internal/lsp/cache/load.go | 86 ++++++++++++---------------------- internal/lsp/cache/snapshot.go | 18 ++++--- 2 files changed, 41 insertions(+), 63 deletions(-) diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index 3fb67a7a98b..d613dc3337e 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -7,7 +7,6 @@ package cache import ( "bytes" "context" - "crypto/sha256" "errors" "fmt" "io/ioutil" @@ -24,7 +23,6 @@ import ( "golang.org/x/tools/internal/lsp/debug/tag" "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/lsp/source" - "golang.org/x/tools/internal/memoize" "golang.org/x/tools/internal/packagesinternal" "golang.org/x/tools/internal/span" ) @@ -384,77 +382,53 @@ func (s *snapshot) applyCriticalErrorToFiles(ctx context.Context, msg string, fi return srcDiags } -type workspaceDirKey string - -type workspaceDirData struct { - dir string - err error -} - -// getWorkspaceDir gets the URI for the workspace directory associated with -// this snapshot. The workspace directory is a temp directory containing the -// go.mod file computed from all active modules. +// getWorkspaceDir returns the URI for the workspace directory +// associated with this snapshot. The workspace directory is a +// temporary directory containing the go.mod file computed from all +// active modules. func (s *snapshot) getWorkspaceDir(ctx context.Context) (span.URI, error) { s.mu.Lock() - h := s.workspaceDirHandle + dir, err := s.workspaceDir, s.workspaceDirErr s.mu.Unlock() - if h != nil { - return getWorkspaceDir(ctx, h, s.generation) + if dir == "" && err == nil { // cache miss + dir, err = makeWorkspaceDir(ctx, s.workspace, s) + s.mu.Lock() + s.workspaceDir, s.workspaceDirErr = dir, err + s.mu.Unlock() } - file, err := s.workspace.modFile(ctx, s) + return span.URIFromPath(dir), err +} + +// makeWorkspaceDir creates a temporary directory containing a go.mod +// and go.sum file for each module in the workspace. +// Note: snapshot's mutex must be unlocked for it to satisfy FileSource. +func makeWorkspaceDir(ctx context.Context, workspace *workspace, fs source.FileSource) (string, error) { + file, err := workspace.modFile(ctx, fs) if err != nil { return "", err } - hash := sha256.New() modContent, err := file.Format() if err != nil { return "", err } - sumContent, err := s.workspace.sumFile(ctx, s) + sumContent, err := workspace.sumFile(ctx, fs) if err != nil { return "", err } - hash.Write(modContent) - hash.Write(sumContent) - key := workspaceDirKey(hash.Sum(nil)) - s.mu.Lock() - h = s.generation.Bind(key, func(context.Context, memoize.Arg) interface{} { - tmpdir, err := ioutil.TempDir("", "gopls-workspace-mod") - if err != nil { - return &workspaceDirData{err: err} - } - - for name, content := range map[string][]byte{ - "go.mod": modContent, - "go.sum": sumContent, - } { - filename := filepath.Join(tmpdir, name) - if err := ioutil.WriteFile(filename, content, 0644); err != nil { - os.RemoveAll(tmpdir) - return &workspaceDirData{err: err} - } - } - - return &workspaceDirData{dir: tmpdir} - }, func(v interface{}) { - d := v.(*workspaceDirData) - if d.dir != "" { - if err := os.RemoveAll(d.dir); err != nil { - event.Error(context.Background(), "cleaning workspace dir", err) - } - } - }) - s.workspaceDirHandle = h - s.mu.Unlock() - return getWorkspaceDir(ctx, h, s.generation) -} - -func getWorkspaceDir(ctx context.Context, h *memoize.Handle, g *memoize.Generation) (span.URI, error) { - v, err := h.Get(ctx, g, nil) + tmpdir, err := ioutil.TempDir("", "gopls-workspace-mod") if err != nil { return "", err } - return span.URIFromPath(v.(*workspaceDirData).dir), nil + for name, content := range map[string][]byte{ + "go.mod": modContent, + "go.sum": sumContent, + } { + if err := ioutil.WriteFile(filepath.Join(tmpdir, name), content, 0644); err != nil { + os.RemoveAll(tmpdir) // ignore error + return "", err + } + } + return tmpdir, nil } // computeMetadataUpdates populates the updates map with metadata updates to diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 39e958e0fc2..259345bdc8f 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -115,8 +115,11 @@ type snapshot struct { modTidyHandles map[span.URI]*modTidyHandle modWhyHandles map[span.URI]*modWhyHandle - workspace *workspace - workspaceDirHandle *memoize.Handle + workspace *workspace // (not guarded by mu) + + // The cached result of makeWorkspaceDir, created on demand and deleted by Snapshot.Destroy. + workspaceDir string + workspaceDirErr error // knownSubdirs is the set of subdirectories in the workspace, used to // create glob patterns for file watching. @@ -144,6 +147,12 @@ func (s *snapshot) Destroy(destroyedBy string) { s.files.Destroy() s.goFiles.Destroy() s.parseKeysByURI.Destroy() + + if s.workspaceDir != "" { + if err := os.RemoveAll(s.workspaceDir); err != nil { + event.Error(context.Background(), "cleaning workspace dir", err) + } + } } func (s *snapshot) ID() uint64 { @@ -1709,11 +1718,6 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC workspace: newWorkspace, } - if !workspaceChanged && s.workspaceDirHandle != nil { - result.workspaceDirHandle = s.workspaceDirHandle - newGen.Inherit(s.workspaceDirHandle) - } - // Copy all of the FileHandles. for k, v := range s.symbols { if change, ok := changes[k]; ok { From f042799df4543ff8c76f0bf549b48338bcba94ac Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 1 Jul 2022 10:35:27 -0400 Subject: [PATCH 071/136] internal/memoize: delete Bind(cleanup) hook Now that the workspace directory uses Snapshot.Destroy to clean up (see https://go-review.googlesource.com/c/tools/+/414499) there is no need for this feature. Change-Id: Id5782273ce5030b4fb8f3b66a8d16a45a831ed91 Reviewed-on: https://go-review.googlesource.com/c/tools/+/414500 Reviewed-by: Robert Findley gopls-CI: kokoro Auto-Submit: Alan Donovan TryBot-Result: Gopher Robot Run-TryBot: Alan Donovan --- internal/lsp/cache/analysis.go | 2 +- internal/lsp/cache/mod.go | 6 +-- internal/lsp/cache/mod_tidy.go | 2 +- internal/lsp/cache/parse.go | 2 +- internal/lsp/cache/symbols.go | 2 +- internal/memoize/memoize.go | 67 ++++++++------------------------ internal/memoize/memoize_test.go | 45 ++------------------- 7 files changed, 27 insertions(+), 99 deletions(-) diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go index 9f7a19c5c60..847ac2db8d0 100644 --- a/internal/lsp/cache/analysis.go +++ b/internal/lsp/cache/analysis.go @@ -147,7 +147,7 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A } } return runAnalysis(ctx, snapshot, a, pkg, results) - }, nil) + }) act.handle = h act = s.addActionHandle(act) diff --git a/internal/lsp/cache/mod.go b/internal/lsp/cache/mod.go index c076f424dc9..843919d7b36 100644 --- a/internal/lsp/cache/mod.go +++ b/internal/lsp/cache/mod.go @@ -88,7 +88,7 @@ func (s *snapshot) ParseMod(ctx context.Context, modFH source.FileHandle) (*sour }, err: parseErr, } - }, nil) + }) pmh := &parseModHandle{handle: h} s.mu.Lock() @@ -162,7 +162,7 @@ func (s *snapshot) ParseWork(ctx context.Context, modFH source.FileHandle) (*sou }, err: parseErr, } - }, nil) + }) pwh := &parseWorkHandle{handle: h} s.mu.Lock() @@ -288,7 +288,7 @@ func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string why[req.Mod.Path] = whyList[i] } return &modWhyData{why: why} - }, nil) + }) mwh := &modWhyHandle{handle: h} s.mu.Lock() diff --git a/internal/lsp/cache/mod_tidy.go b/internal/lsp/cache/mod_tidy.go index bd2ff0c5f88..0f36249e057 100644 --- a/internal/lsp/cache/mod_tidy.go +++ b/internal/lsp/cache/mod_tidy.go @@ -138,7 +138,7 @@ func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*sourc TidiedContent: tempContents, }, } - }, nil) + }) mth := &modTidyHandle{handle: h} s.mu.Lock() diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go index f7b4f9c7031..c3eae2f7643 100644 --- a/internal/lsp/cache/parse.go +++ b/internal/lsp/cache/parse.go @@ -130,7 +130,7 @@ func (s *snapshot) astCacheData(ctx context.Context, spkg source.Package, pos to } astHandle := s.generation.Bind(astCacheKey{pkgHandle.key, pgf.URI}, func(ctx context.Context, arg memoize.Arg) interface{} { return buildASTCache(pgf) - }, nil) + }) d, err := astHandle.Get(ctx, s.generation, s) if err != nil { diff --git a/internal/lsp/cache/symbols.go b/internal/lsp/cache/symbols.go index d56a036ff6e..f27178e2e7f 100644 --- a/internal/lsp/cache/symbols.go +++ b/internal/lsp/cache/symbols.go @@ -41,7 +41,7 @@ func (s *snapshot) buildSymbolHandle(ctx context.Context, fh source.FileHandle) snapshot := arg.(*snapshot) symbols, err := symbolize(snapshot, fh) return &symbolData{symbols, err} - }, nil) + }) sh := &symbolHandle{ handle: handle, diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index 7fa5340c5a3..4b84410d506 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -34,6 +34,7 @@ type Store struct { handlesMu sync.Mutex // lock ordering: Store.handlesMu before Handle.mu handles map[interface{}]*Handle // handles which are bound to generations for GC purposes. + // (It is the subset of values of 'handles' with trackGenerations enabled.) boundHandles map[*Handle]struct{} } @@ -78,7 +79,11 @@ func (g *Generation) Destroy(destroyedBy string) { if _, ok := h.generations[g]; ok { delete(h.generations, g) // delete even if it's dead, in case of dangling references to the entry. if len(h.generations) == 0 { - h.destroy(g.store) + h.state = stateDestroyed + delete(g.store.handles, h.key) + if h.trackGenerations { + delete(g.store.boundHandles, h) + } } } h.mu.Unlock() @@ -155,14 +160,6 @@ type Handle struct { function Function // value is set in completed state. value interface{} - // cleanup, if non-nil, is used to perform any necessary clean-up on values - // produced by function. - // - // cleanup is never set for reference counted handles. - // - // TODO(rfindley): remove this field once workspace folders no longer need to - // be tracked. - cleanup func(interface{}) // If trackGenerations is set, this handle tracks generations in which it // is valid, via the generations field. Otherwise, it is explicitly reference @@ -171,7 +168,7 @@ type Handle struct { refCounter int32 } -// Bind returns a handle for the given key and function. +// Bind returns a "generational" handle for the given key and function. // // Each call to bind will return the same handle if it is already bound. Bind // will always return a valid handle, creating one if needed. Each key can @@ -179,22 +176,18 @@ type Handle struct { // until the associated generation is destroyed. Bind does not cause the value // to be generated. // -// If cleanup is non-nil, it will be called on any non-nil values produced by -// function when they are no longer referenced. -// // It is responsibility of the caller to call Inherit on the handler whenever // it should still be accessible by a next generation. -func (g *Generation) Bind(key interface{}, function Function, cleanup func(interface{})) *Handle { - return g.getHandle(key, function, cleanup, true) +func (g *Generation) Bind(key interface{}, function Function) *Handle { + return g.getHandle(key, function, true) } -// GetHandle returns a handle for the given key and function with similar -// properties and behavior as Bind. -// -// As in opposite to Bind it returns a release callback which has to be called -// once this reference to handle is not needed anymore. +// GetHandle returns a "reference-counted" handle for the given key +// and function with similar properties and behavior as Bind. Unlike +// Bind, it returns a release callback which must be called once the +// handle is no longer needed. func (g *Generation) GetHandle(key interface{}, function Function) (*Handle, func()) { - h := g.getHandle(key, function, nil, false) + h := g.getHandle(key, function, false) store := g.store release := func() { // Acquire store.handlesMu before mutating refCounter @@ -206,7 +199,7 @@ func (g *Generation) GetHandle(key interface{}, function Function) (*Handle, fun h.refCounter-- if h.refCounter == 0 { - // Don't call h.destroy: for reference counted handles we can't know when + // Don't mark destroyed: for reference counted handles we can't know when // they are no longer reachable from runnable goroutines. For example, // gopls could have a current operation that is using a packageHandle. // Destroying the handle here would cause that operation to hang. @@ -216,7 +209,7 @@ func (g *Generation) GetHandle(key interface{}, function Function) (*Handle, fun return h, release } -func (g *Generation) getHandle(key interface{}, function Function, cleanup func(interface{}), trackGenerations bool) *Handle { +func (g *Generation) getHandle(key interface{}, function Function, trackGenerations bool) *Handle { // panic early if the function is nil // it would panic later anyway, but in a way that was much harder to debug if function == nil { @@ -232,7 +225,6 @@ func (g *Generation) getHandle(key interface{}, function Function, cleanup func( h = &Handle{ key: key, function: function, - cleanup: cleanup, trackGenerations: trackGenerations, } if trackGenerations { @@ -298,18 +290,6 @@ func (g *Generation) Inherit(h *Handle) { h.incrementRef(g) } -// destroy marks h as destroyed. h.mu and store.handlesMu must be held. -func (h *Handle) destroy(store *Store) { - h.state = stateDestroyed - if h.cleanup != nil && h.value != nil { - h.cleanup(h.value) - } - delete(store.handles, h.key) - if h.trackGenerations { - delete(store.boundHandles, h) - } -} - func (h *Handle) incrementRef(g *Generation) { h.mu.Lock() defer h.mu.Unlock() @@ -412,11 +392,6 @@ func (h *Handle) run(ctx context.Context, g *Generation, arg Arg) (interface{}, } v := function(childCtx, arg) if childCtx.Err() != nil { - // It's possible that v was computed despite the context cancellation. In - // this case we should ensure that it is cleaned up. - if h.cleanup != nil && v != nil { - h.cleanup(v) - } return } @@ -427,19 +402,9 @@ func (h *Handle) run(ctx context.Context, g *Generation, arg Arg) (interface{}, // checked childCtx above. Even so, that should be harmless, since each // run should produce the same results. if h.state != stateRunning { - // v will never be used, so ensure that it is cleaned up. - if h.cleanup != nil && v != nil { - h.cleanup(v) - } return } - if h.cleanup != nil && h.value != nil { - // Clean up before overwriting an existing value. - h.cleanup(h.value) - } - - // At this point v will be cleaned up whenever h is destroyed. h.value = v h.function = nil h.state = stateCompleted diff --git a/internal/memoize/memoize_test.go b/internal/memoize/memoize_test.go index ae387b8d049..48bb181173e 100644 --- a/internal/memoize/memoize_test.go +++ b/internal/memoize/memoize_test.go @@ -23,7 +23,7 @@ func TestGet(t *testing.T) { h := g.Bind("key", func(context.Context, memoize.Arg) interface{} { evaled++ return "res" - }, nil) + }) expectGet(t, h, g, "res") expectGet(t, h, g, "res") if evaled != 1 { @@ -50,7 +50,7 @@ func TestGenerations(t *testing.T) { s := &memoize.Store{} // Evaluate key in g1. g1 := s.Generation("g1") - h1 := g1.Bind("key", func(context.Context, memoize.Arg) interface{} { return "res" }, nil) + h1 := g1.Bind("key", func(context.Context, memoize.Arg) interface{} { return "res" }) expectGet(t, h1, g1, "res") // Get key in g2. It should inherit the value from g1. @@ -58,7 +58,7 @@ func TestGenerations(t *testing.T) { h2 := g2.Bind("key", func(context.Context, memoize.Arg) interface{} { t.Fatal("h2 should not need evaluation") return "error" - }, nil) + }) expectGet(t, h2, g2, "res") // With g1 destroyed, g2 should still work. @@ -68,47 +68,10 @@ func TestGenerations(t *testing.T) { // With all generations destroyed, key should be re-evaluated. g2.Destroy("TestGenerations") g3 := s.Generation("g3") - h3 := g3.Bind("key", func(context.Context, memoize.Arg) interface{} { return "new res" }, nil) + h3 := g3.Bind("key", func(context.Context, memoize.Arg) interface{} { return "new res" }) expectGet(t, h3, g3, "new res") } -func TestCleanup(t *testing.T) { - s := &memoize.Store{} - g1 := s.Generation("g1") - v1 := false - v2 := false - cleanup := func(v interface{}) { - *(v.(*bool)) = true - } - h1 := g1.Bind("key1", func(context.Context, memoize.Arg) interface{} { - return &v1 - }, nil) - h2 := g1.Bind("key2", func(context.Context, memoize.Arg) interface{} { - return &v2 - }, cleanup) - expectGet(t, h1, g1, &v1) - expectGet(t, h2, g1, &v2) - g2 := s.Generation("g2") - g2.Inherit(h1) - g2.Inherit(h2) - - g1.Destroy("TestCleanup") - expectGet(t, h1, g2, &v1) - expectGet(t, h2, g2, &v2) - for k, v := range map[string]*bool{"key1": &v1, "key2": &v2} { - if got, want := *v, false; got != want { - t.Errorf("after destroying g1, bound value %q is cleaned up", k) - } - } - g2.Destroy("TestCleanup") - if got, want := v1, false; got != want { - t.Error("after destroying g2, v1 is cleaned up") - } - if got, want := v2, true; got != want { - t.Error("after destroying g2, v2 is not cleaned up") - } -} - func TestHandleRefCounting(t *testing.T) { s := &memoize.Store{} g1 := s.Generation("g1") From 7b04e8b59ec2e614a9a351d6da0ae4ae69f4ca12 Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Wed, 22 Jun 2022 23:05:59 +0000 Subject: [PATCH 072/136] internal/persistent: no-op deletion from map does not allocate We can use a property that split does a dfs search for the key before doing an actual work. This allows us to do a low-cost early return if there is no key to delete. Change-Id: I6ed8068945f9f2dacc356d72b18afce04ec89a3c Reviewed-on: https://go-review.googlesource.com/c/tools/+/413659 gopls-CI: kokoro TryBot-Result: Gopher Robot Run-TryBot: Robert Findley Reviewed-by: Alan Donovan Reviewed-by: Dmitri Shuralyov --- internal/persistent/map.go | 21 ++++++++++++++++----- internal/persistent/map_test.go | 25 +++++++++---------------- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/internal/persistent/map.go b/internal/persistent/map.go index 9c17ad09a7f..19b50480db4 100644 --- a/internal/persistent/map.go +++ b/internal/persistent/map.go @@ -185,7 +185,7 @@ func union(first, second *mapNode, less func(a, b interface{}) bool, overwrite b second, first, overwrite = first, second, !overwrite } - left, mid, right := split(second, first.key, less) + left, mid, right := split(second, first.key, less, false) var result *mapNode if overwrite && mid != nil { result = mid.shallowCloneWithRef() @@ -205,23 +205,31 @@ func union(first, second *mapNode, less func(a, b interface{}) bool, overwrite b // Return three new trees: left with all nodes with smaller than key, mid with // the node matching the key, right with all nodes larger than key. // If there are no nodes in one of trees, return nil instead of it. +// If requireMid is set (such as during deletion), then all return arguments +// are nil if mid is not found. // // split(n:-0) (left:+1, mid:+1, right:+1) // Split borrows n without affecting its refcount, and returns three // new references that that caller is expected to call decref. -func split(n *mapNode, key interface{}, less func(a, b interface{}) bool) (left, mid, right *mapNode) { +func split(n *mapNode, key interface{}, less func(a, b interface{}) bool, requireMid bool) (left, mid, right *mapNode) { if n == nil { return nil, nil, nil } if less(n.key, key) { - left, mid, right := split(n.right, key, less) + left, mid, right := split(n.right, key, less, requireMid) + if requireMid && mid == nil { + return nil, nil, nil + } newN := n.shallowCloneWithRef() newN.left = n.left.incref() newN.right = left return newN, mid, right } else if less(key, n.key) { - left, mid, right := split(n.left, key, less) + left, mid, right := split(n.left, key, less, requireMid) + if requireMid && mid == nil { + return nil, nil, nil + } newN := n.shallowCloneWithRef() newN.left = right newN.right = n.right.incref() @@ -234,7 +242,10 @@ func split(n *mapNode, key interface{}, less func(a, b interface{}) bool) (left, // Delete deletes the value for a key. func (pm *Map) Delete(key interface{}) { root := pm.root - left, mid, right := split(root, key, pm.less) + left, mid, right := split(root, key, pm.less, true) + if mid == nil { + return + } pm.root = merge(left, right) left.decref() mid.decref() diff --git a/internal/persistent/map_test.go b/internal/persistent/map_test.go index 059f0da4c03..bd2cbfa0e12 100644 --- a/internal/persistent/map_test.go +++ b/internal/persistent/map_test.go @@ -71,6 +71,15 @@ func TestSimpleMap(t *testing.T) { m1.remove(t, 1) validateRef(t, m1, m2) + gotAllocs := int(testing.AllocsPerRun(10, func() { + m1.impl.Delete(100) + m1.impl.Delete(1) + })) + wantAllocs := 0 + if gotAllocs != wantAllocs { + t.Errorf("wanted %d allocs, got %d", wantAllocs, gotAllocs) + } + for i := 10; i < 14; i++ { m1.set(t, i, i) validateRef(t, m1, m2) @@ -298,19 +307,3 @@ func assertSameMap(t *testing.T, map1, map2 interface{}) { t.Fatalf("different maps:\n%v\nvs\n%v", map1, map2) } } - -func isSameMap(map1, map2 reflect.Value) bool { - if map1.Len() != map2.Len() { - return false - } - iter := map1.MapRange() - for iter.Next() { - key := iter.Key() - value1 := iter.Value() - value2 := map2.MapIndex(key) - if value2.IsZero() || !reflect.DeepEqual(value1.Interface(), value2.Interface()) { - return false - } - } - return true -} From f487f3623ea4e13340147e56a278980f28781f8b Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 1 Jul 2022 11:52:34 -0400 Subject: [PATCH 073/136] internal/lsp/source: reduce allocation in workspace-symbols dynamicSymbolMatch is an allocation hotspot (9% of all bytes), because it allocates a 3-element []string that quickly becomes garbage. This change passes in an empty slice with spare capacity allowing the same array to be reused throughout the matchFile loop. BenchmarkSymbols on k8s shows -72% bytes, -88% allocs, -9% wall time. Change-Id: Id20c7cd649874a212e4d4c5f1aa095277b044a5b Reviewed-on: https://go-review.googlesource.com/c/tools/+/415500 TryBot-Result: Gopher Robot Run-TryBot: Alan Donovan gopls-CI: kokoro Reviewed-by: Robert Findley --- internal/lsp/source/workspace_symbol.go | 31 ++++++++++++++----------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/internal/lsp/source/workspace_symbol.go b/internal/lsp/source/workspace_symbol.go index 0822de0810d..6167c586a9a 100644 --- a/internal/lsp/source/workspace_symbol.go +++ b/internal/lsp/source/workspace_symbol.go @@ -83,17 +83,19 @@ type matcherFunc func(chunks []string) (int, float64) // []string{"myType.field"} or []string{"myType.", "field"}. // // See the comment for symbolCollector for more information. -type symbolizer func(name string, pkg Metadata, m matcherFunc) ([]string, float64) +// +// The space argument is an empty slice with spare capacity that may be used +// to allocate the result. +type symbolizer func(space []string, name string, pkg Metadata, m matcherFunc) ([]string, float64) -func fullyQualifiedSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) { - _, score := dynamicSymbolMatch(name, pkg, matcher) - if score > 0 { - return []string{pkg.PackagePath(), ".", name}, score +func fullyQualifiedSymbolMatch(space []string, name string, pkg Metadata, matcher matcherFunc) ([]string, float64) { + if _, score := dynamicSymbolMatch(space, name, pkg, matcher); score > 0 { + return append(space, pkg.PackagePath(), ".", name), score } return nil, 0 } -func dynamicSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) { +func dynamicSymbolMatch(space []string, name string, pkg Metadata, matcher matcherFunc) ([]string, float64) { var score float64 endsInPkgName := strings.HasSuffix(pkg.PackagePath(), pkg.PackageName()) @@ -101,14 +103,14 @@ func dynamicSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]strin // If the package path does not end in the package name, we need to check the // package-qualified symbol as an extra pass first. if !endsInPkgName { - pkgQualified := []string{pkg.PackageName(), ".", name} + pkgQualified := append(space, pkg.PackageName(), ".", name) idx, score := matcher(pkgQualified) nameStart := len(pkg.PackageName()) + 1 if score > 0 { // If our match is contained entirely within the unqualified portion, // just return that. if idx >= nameStart { - return []string{name}, score + return append(space, name), score } // Lower the score for matches that include the package name. return pkgQualified, score * 0.8 @@ -116,13 +118,13 @@ func dynamicSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]strin } // Now try matching the fully qualified symbol. - fullyQualified := []string{pkg.PackagePath(), ".", name} + fullyQualified := append(space, pkg.PackagePath(), ".", name) idx, score := matcher(fullyQualified) // As above, check if we matched just the unqualified symbol name. nameStart := len(pkg.PackagePath()) + 1 if idx >= nameStart { - return []string{name}, score + return append(space, name), score } // If our package path ends in the package name, we'll have skipped the @@ -131,7 +133,7 @@ func dynamicSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]strin if endsInPkgName && idx >= 0 { pkgStart := len(pkg.PackagePath()) - len(pkg.PackageName()) if idx >= pkgStart { - return []string{pkg.PackageName(), ".", name}, score + return append(space, pkg.PackageName(), ".", name), score } } @@ -140,8 +142,8 @@ func dynamicSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]strin return fullyQualified, score * 0.6 } -func packageSymbolMatch(name string, pkg Metadata, matcher matcherFunc) ([]string, float64) { - qualified := []string{pkg.PackageName(), ".", name} +func packageSymbolMatch(space []string, name string, pkg Metadata, matcher matcherFunc) ([]string, float64) { + qualified := append(space, pkg.PackageName(), ".", name) if _, s := matcher(qualified); s > 0 { return qualified, s } @@ -387,8 +389,9 @@ type symbolFile struct { // matchFile scans a symbol file and adds matching symbols to the store. func matchFile(store *symbolStore, symbolizer symbolizer, matcher matcherFunc, roots []string, i symbolFile) { + space := make([]string, 0, 3) for _, sym := range i.syms { - symbolParts, score := symbolizer(sym.Name, i.md, matcher) + symbolParts, score := symbolizer(space, sym.Name, i.md, matcher) // Check if the score is too low before applying any downranking. if store.tooLow(score) { From e92a18fd15832b49c64c4d7637711b91a50dcde8 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 1 Jul 2022 12:22:37 -0400 Subject: [PATCH 074/136] internal/lsp/lsppos: reduce allocations in NewMapper Before, NewMapper accounts for 2.1% of bytes allocated in the WorkspaceSymbols benchmark. This change causes the newline index table to be allocated once instead of by appending. The function now accounts for 0.55%. Change-Id: I9172dd34ee2be9e7175e311d4a6518f1e6660a5f Reviewed-on: https://go-review.googlesource.com/c/tools/+/415501 Auto-Submit: Alan Donovan gopls-CI: kokoro Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot Reviewed-by: Robert Findley --- internal/lsp/lsppos/lsppos.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/lsp/lsppos/lsppos.go b/internal/lsp/lsppos/lsppos.go index 35f6f134854..6afad47b7d2 100644 --- a/internal/lsp/lsppos/lsppos.go +++ b/internal/lsp/lsppos/lsppos.go @@ -17,6 +17,7 @@ package lsppos import ( + "bytes" "errors" "sort" "unicode/utf8" @@ -36,9 +37,10 @@ type Mapper struct { // NewMapper creates a new Mapper for the given content. func NewMapper(content []byte) *Mapper { + nlines := bytes.Count(content, []byte("\n")) m := &Mapper{ content: content, - lines: []int{0}, + lines: make([]int, 1, nlines+1), // initially []int{0} } for offset, b := range content { if b == '\n' { From f79f3aac190554ef62f0257555394b31c29f0320 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 14 Jun 2022 17:51:56 -0400 Subject: [PATCH 075/136] internal/lsp/cache: clarify buildPackageHandle Before, buildPackageHandle and buildKey were mutually recursive. Together they performed a sequential recursion over Metadata.Deps, calling GetFile and parseGoHandle for every file, and then finally (in postorder) binding a Handle for the type checking step. This change inlines buildKey to make the recursion more obvious, performs the recursion over dependencies first, followed by the reading of Go source files for this package, in parallel. (The IWL benchmark reports improvement but its variance is so high I'm not sure I trust it.) Other opportunities for parallelism are pointed out in new comments. The Bind operation for typechecking calls dep.check for each dependency in a separate goroutine. It no longer waits for each one since it is only prefetching the information that will be required during import processing, which will block until the information becomes available. Before, both reading and parsing appear to occur twice: once in buildPackageHandle and once in doTypeCheck. (Perhaps the second was a cache hits, but there's no need to rely on a cache.) Now, only file reading (GetFile) occurs in buildPackageHandle, since that's all that's needed for the packageKey. And parsing only occurs in doTypeCheck. The source.FileHandles are plumbed through as parameters. Also: - move parseGoHandles to a local function, since it exists only for buildPackageKey. It no longer parses, it only reads. - lots of TODO comments for possible optimizations, and typical measured times of various operations. - remove obsolete comment re: Bind and finalizers. Change-Id: Iad049884607b73eaa6701bdf7771f96b042142d5 Reviewed-on: https://go-review.googlesource.com/c/tools/+/411913 Run-TryBot: Alan Donovan gopls-CI: kokoro Auto-Submit: Alan Donovan Reviewed-by: Robert Findley TryBot-Result: Gopher Robot --- internal/lsp/cache/cache.go | 4 +- internal/lsp/cache/check.go | 271 +++++++++++++++++---------------- internal/lsp/cache/mod_tidy.go | 4 +- internal/lsp/cache/symbols.go | 3 +- 4 files changed, 147 insertions(+), 135 deletions(-) diff --git a/internal/lsp/cache/cache.go b/internal/lsp/cache/cache.go index 3640272688f..8ac8b851a25 100644 --- a/internal/lsp/cache/cache.go +++ b/internal/lsp/cache/cache.go @@ -101,7 +101,7 @@ func (c *Cache) getFile(ctx context.Context, uri span.URI) (*fileHandle, error) return fh, nil } - fh, err := readFile(ctx, uri, fi) + fh, err := readFile(ctx, uri, fi) // ~25us if err != nil { return nil, err } @@ -126,7 +126,7 @@ func readFile(ctx context.Context, uri span.URI, fi os.FileInfo) (*fileHandle, e _ = ctx defer done() - data, err := ioutil.ReadFile(uri.Filename()) + data, err := ioutil.ReadFile(uri.Filename()) // ~20us if err != nil { return &fileHandle{ modTime: fi.ModTime(), diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index 797298a4192..aeb45635c3b 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -15,10 +15,12 @@ import ( "path/filepath" "regexp" "sort" + "strconv" "strings" "sync" "golang.org/x/mod/module" + "golang.org/x/sync/errgroup" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/packages" "golang.org/x/tools/internal/event" @@ -37,7 +39,10 @@ type packageHandleKey source.Hash type packageHandle struct { handle *memoize.Handle - goFiles, compiledGoFiles []*parseGoHandle + // goFiles and compiledGoFiles are the lists of files in the package. + // The latter is the list of files seen by the type checker (in which + // those that import "C" have been replaced by generated code). + goFiles, compiledGoFiles []source.FileHandle // mode is the mode the files were parsed in. mode source.ParseMode @@ -57,14 +62,14 @@ func (ph *packageHandle) packageKey() packageKey { } func (ph *packageHandle) imports(ctx context.Context, s source.Snapshot) (result []string) { - for _, pgh := range ph.goFiles { - f, err := s.ParseGo(ctx, pgh.file, source.ParseHeader) + for _, goFile := range ph.goFiles { + f, err := s.ParseGo(ctx, goFile, source.ParseHeader) if err != nil { continue } seen := map[string]struct{}{} for _, impSpec := range f.File.Imports { - imp := strings.Trim(impSpec.Path.Value, `"`) + imp, _ := strconv.Unquote(impSpec.Path.Value) if _, ok := seen[imp]; !ok { seen[imp] = struct{}{} result = append(result, imp) @@ -82,7 +87,8 @@ type packageData struct { err error } -// buildPackageHandle returns a packageHandle for a given package and mode. +// buildPackageHandle returns a handle for the future results of +// type-checking the package identified by id in the given mode. // It assumes that the given ID already has metadata available, so it does not // attempt to reload missing or invalid metadata. The caller must reload // metadata if needed. @@ -91,86 +97,33 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so return ph, nil } - // Build the packageHandle for this ID and its dependencies. - ph, deps, err := s.buildKey(ctx, id, mode) - if err != nil { - return nil, err - } - - // Do not close over the packageHandle or the snapshot in the Bind function. - // This creates a cycle, which causes the finalizers to never run on the handles. - // The possible cycles are: - // - // packageHandle.h.function -> packageHandle - // packageHandle.h.function -> snapshot -> packageHandle - // - - m := ph.m - key := ph.key - - h, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} { - snapshot := arg.(*snapshot) - - // Begin loading the direct dependencies, in parallel. - var wg sync.WaitGroup - for _, dep := range deps { - wg.Add(1) - go func(dep *packageHandle) { - dep.check(ctx, snapshot) - wg.Done() - }(dep) - } - - data := &packageData{} - data.pkg, data.err = typeCheck(ctx, snapshot, m.Metadata, mode, deps) - // Make sure that the workers above have finished before we return, - // especially in case of cancellation. - wg.Wait() - - return data - }) - ph.handle = h - - // Cache the handle in the snapshot. If a package handle has already - // been cached, addPackage will return the cached value. This is fine, - // since the original package handle above will have no references and be - // garbage collected. - ph = s.addPackageHandle(ph, release) - - return ph, nil -} - -// buildKey computes the key for a given packageHandle. -func (s *snapshot) buildKey(ctx context.Context, id PackageID, mode source.ParseMode) (*packageHandle, map[PackagePath]*packageHandle, error) { m := s.getMetadata(id) if m == nil { - return nil, nil, fmt.Errorf("no metadata for %s", id) - } - goFiles, err := s.parseGoHandles(ctx, m.GoFiles, mode) - if err != nil { - return nil, nil, err - } - compiledGoFiles, err := s.parseGoHandles(ctx, m.CompiledGoFiles, mode) - if err != nil { - return nil, nil, err - } - ph := &packageHandle{ - m: m, - goFiles: goFiles, - compiledGoFiles: compiledGoFiles, - mode: mode, + return nil, fmt.Errorf("no metadata for %s", id) } - // Make sure all of the depList are sorted. + + // For key stability, sort depList. + // TODO(adonovan): make m.Deps have a well defined order. depList := append([]PackageID{}, m.Deps...) sort.Slice(depList, func(i, j int) bool { return depList[i] < depList[j] }) - deps := make(map[PackagePath]*packageHandle) - // Begin computing the key by getting the depKeys for all dependencies. - var depKeys []packageHandleKey - for _, depID := range depList { + // This requires reading the transitive closure of dependencies' source files. + // + // It is tempting to parallelize the recursion here, but + // without de-duplication of subtasks this would lead to an + // exponential amount of work, and computing the key is + // expensive as it reads all the source files transitively. + // Notably, we don't update the s.packages cache until the + // entire key has been computed. + // TODO(adonovan): use a promise cache to ensure that the key + // for each package is computed by at most one thread, then do + // the recursive key building of dependencies in parallel. + deps := make(map[PackagePath]*packageHandle) + depKeys := make([]packageHandleKey, len(depList)) + for i, depID := range depList { depHandle, err := s.buildPackageHandle(ctx, depID, s.workspaceParseMode(depID)) // Don't use invalid metadata for dependencies if the top-level // metadata is valid. We only load top-level packages, so if the @@ -182,20 +135,90 @@ func (s *snapshot) buildKey(ctx context.Context, id PackageID, mode source.Parse event.Log(ctx, fmt.Sprintf("%s: invalid dep handle for %s", id, depID), tag.Snapshot.Of(s.id)) } + // This check ensures we break out of the slow + // buildPackageHandle recursion quickly when + // context cancelation is detected within GetFile. if ctx.Err() != nil { - return nil, nil, ctx.Err() + return nil, ctx.Err() // cancelled } - // One bad dependency should not prevent us from checking the entire package. - // Add a special key to mark a bad dependency. - depKeys = append(depKeys, packageHandleKey(source.Hashf("%s import not found", depID))) + + // One bad dependency should not prevent us from + // checking the entire package. Leave depKeys[i] unset. continue } + deps[depHandle.m.PkgPath] = depHandle - depKeys = append(depKeys, depHandle.key) + depKeys[i] = depHandle.key + } + + // Read both lists of files of this package, in parallel. + var group errgroup.Group + getFileHandles := func(files []span.URI) []source.FileHandle { + fhs := make([]source.FileHandle, len(files)) + for i, uri := range files { + i, uri := i, uri + group.Go(func() (err error) { + fhs[i], err = s.GetFile(ctx, uri) // ~25us + return + }) + } + return fhs } + goFiles := getFileHandles(m.GoFiles) + compiledGoFiles := getFileHandles(m.CompiledGoFiles) + if err := group.Wait(); err != nil { + return nil, err + } + + // All the file reading has now been done. + // Create a handle for the result of type checking. experimentalKey := s.View().Options().ExperimentalPackageCacheKey - ph.key = checkPackageKey(ph.m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey) - return ph, deps, nil + key := computePackageKey(m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey) + handle, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} { + // TODO(adonovan): eliminate use of arg with this handle. + // (In all cases snapshot is equal to the enclosing s.) + snapshot := arg.(*snapshot) + + // Start type checking of direct dependencies, + // in parallel and asynchronously. + // As the type checker imports each of these + // packages, it will wait for its completion. + var wg sync.WaitGroup + for _, dep := range deps { + wg.Add(1) + go func(dep *packageHandle) { + dep.check(ctx, snapshot) // ignore result + wg.Done() + }(dep) + } + // The 'defer' below is unusual but intentional: + // it is not necessary that each call to dep.check + // complete before type checking begins, as the type + // checker will wait for those it needs. But they do + // need to complete before this function returns and + // the snapshot is possibly destroyed. + defer wg.Wait() + + pkg, err := typeCheck(ctx, snapshot, goFiles, compiledGoFiles, m.Metadata, mode, deps) + return &packageData{pkg, err} + }) + + ph := &packageHandle{ + handle: handle, + goFiles: goFiles, + compiledGoFiles: compiledGoFiles, + mode: mode, + m: m, + key: key, + } + + // Cache the handle in the snapshot. If a package handle has already + // been cached, addPackage will return the cached value. This is fine, + // since the original package handle above will have no references and be + // garbage collected. + ph = s.addPackageHandle(ph, release) + + return ph, nil } func (s *snapshot) workspaceParseMode(id PackageID) source.ParseMode { @@ -214,7 +237,10 @@ func (s *snapshot) workspaceParseMode(id PackageID) source.ParseMode { return source.ParseExported } -func checkPackageKey(id PackageID, pghs []*parseGoHandle, m *KnownMetadata, deps []packageHandleKey, mode source.ParseMode, experimentalKey bool) packageHandleKey { +// computePackageKey returns a key representing the act of type checking +// a package named id containing the specified files, metadata, and +// dependency hashes. +func computePackageKey(id PackageID, files []source.FileHandle, m *KnownMetadata, deps []packageHandleKey, mode source.ParseMode, experimentalKey bool) packageHandleKey { // TODO(adonovan): opt: no need to materalize the bytes; hash them directly. // Also, use field separators to avoid spurious collisions. b := bytes.NewBuffer(nil) @@ -234,8 +260,8 @@ func checkPackageKey(id PackageID, pghs []*parseGoHandle, m *KnownMetadata, deps for _, dep := range deps { b.Write(dep[:]) } - for _, cgf := range pghs { - b.WriteString(cgf.file.FileIdentity().String()) + for _, file := range files { + b.WriteString(file.FileIdentity().String()) } return packageHandleKey(source.HashOf(b.Bytes())) } @@ -268,10 +294,6 @@ func hashConfig(config *packages.Config) source.Hash { return source.HashOf(b.Bytes()) } -func (ph *packageHandle) Check(ctx context.Context, s source.Snapshot) (source.Package, error) { - return ph.check(ctx, s.(*snapshot)) -} - func (ph *packageHandle) check(ctx context.Context, s *snapshot) (*pkg, error) { v, err := ph.handle.Get(ctx, s.generation, s) if err != nil { @@ -298,24 +320,15 @@ func (ph *packageHandle) cached(g *memoize.Generation) (*pkg, error) { return data.pkg, data.err } -func (s *snapshot) parseGoHandles(ctx context.Context, files []span.URI, mode source.ParseMode) ([]*parseGoHandle, error) { - pghs := make([]*parseGoHandle, 0, len(files)) - for _, uri := range files { - fh, err := s.GetFile(ctx, uri) - if err != nil { - return nil, err - } - pghs = append(pghs, s.parseGoHandle(ctx, fh, mode)) - } - return pghs, nil -} - -func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle) (*pkg, error) { +// typeCheck type checks the parsed source files in compiledGoFiles. +// (The resulting pkg also holds the parsed but not type-checked goFiles.) +// deps holds the future results of type-checking the direct dependencies. +func typeCheck(ctx context.Context, snapshot *snapshot, goFiles, compiledGoFiles []source.FileHandle, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle) (*pkg, error) { var filter *unexportedFilter if mode == source.ParseExported { filter = &unexportedFilter{uses: map[string]bool{}} } - pkg, err := doTypeCheck(ctx, snapshot, m, mode, deps, filter) + pkg, err := doTypeCheck(ctx, snapshot, goFiles, compiledGoFiles, m, mode, deps, filter) if err != nil { return nil, err } @@ -327,7 +340,7 @@ func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source missing, unexpected := filter.ProcessErrors(pkg.typeErrors) if len(unexpected) == 0 && len(missing) != 0 { event.Log(ctx, fmt.Sprintf("discovered missing identifiers: %v", missing), tag.Package.Of(string(m.ID))) - pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, filter) + pkg, err = doTypeCheck(ctx, snapshot, goFiles, compiledGoFiles, m, mode, deps, filter) if err != nil { return nil, err } @@ -335,7 +348,7 @@ func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source } if len(unexpected) != 0 || len(missing) != 0 { event.Log(ctx, fmt.Sprintf("falling back to safe trimming due to type errors: %v or still-missing identifiers: %v", unexpected, missing), tag.Package.Of(string(m.ID))) - pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, nil) + pkg, err = doTypeCheck(ctx, snapshot, goFiles, compiledGoFiles, m, mode, deps, nil) if err != nil { return nil, err } @@ -427,7 +440,7 @@ func typeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source var goVersionRx = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`) -func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle, astFilter *unexportedFilter) (*pkg, error) { +func doTypeCheck(ctx context.Context, snapshot *snapshot, goFiles, compiledGoFiles []source.FileHandle, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle, astFilter *unexportedFilter) (*pkg, error) { ctx, done := event.Start(ctx, "cache.typeCheck", tag.Package.Of(string(m.ID))) defer done() @@ -448,19 +461,19 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour } typeparams.InitInstanceInfo(pkg.typesInfo) - for _, gf := range pkg.m.GoFiles { - // In the presence of line directives, we may need to report errors in - // non-compiled Go files, so we need to register them on the package. - // However, we only need to really parse them in ParseFull mode, when - // the user might actually be looking at the file. - fh, err := snapshot.GetFile(ctx, gf) - if err != nil { - return nil, err - } - goMode := source.ParseFull - if mode != source.ParseFull { - goMode = source.ParseHeader - } + // In the presence of line directives, we may need to report errors in + // non-compiled Go files, so we need to register them on the package. + // However, we only need to really parse them in ParseFull mode, when + // the user might actually be looking at the file. + goMode := source.ParseFull + if mode != source.ParseFull { + goMode = source.ParseHeader + } + + // Parse the GoFiles. (These aren't presented to the type + // checker but are part of the returned pkg.) + // TODO(adonovan): opt: parallelize parsing. + for _, fh := range goFiles { pgf, err := snapshot.ParseGo(ctx, fh, goMode) if err != nil { return nil, err @@ -468,7 +481,8 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour pkg.goFiles = append(pkg.goFiles, pgf) } - if err := parseCompiledGoFiles(ctx, snapshot, mode, pkg, astFilter); err != nil { + // Parse the CompiledGoFiles: those seen by the compiler/typechecker. + if err := parseCompiledGoFiles(ctx, compiledGoFiles, snapshot, mode, pkg, astFilter); err != nil { return nil, err } @@ -549,7 +563,7 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour } // Type checking errors are handled via the config, so ignore them here. - _ = check.Files(files) + _ = check.Files(files) // 50us-15ms, depending on size of package // If the context was cancelled, we may have returned a ton of transient // errors to the type checker. Swallow them. @@ -559,21 +573,18 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour return pkg, nil } -func parseCompiledGoFiles(ctx context.Context, snapshot *snapshot, mode source.ParseMode, pkg *pkg, astFilter *unexportedFilter) error { - for _, cgf := range pkg.m.CompiledGoFiles { - fh, err := snapshot.GetFile(ctx, cgf) - if err != nil { - return err - } - +func parseCompiledGoFiles(ctx context.Context, compiledGoFiles []source.FileHandle, snapshot *snapshot, mode source.ParseMode, pkg *pkg, astFilter *unexportedFilter) error { + // TODO(adonovan): opt: parallelize this loop, which takes 1-25ms. + for _, fh := range compiledGoFiles { var pgf *source.ParsedGoFile var fixed bool + var err error // Only parse Full through the cache -- we need to own Exported ASTs // to prune them. if mode == source.ParseFull { pgf, fixed, err = snapshot.parseGo(ctx, fh, mode) } else { - d := parseGo(ctx, snapshot.FileSet(), fh, mode) + d := parseGo(ctx, snapshot.FileSet(), fh, mode) // ~20us/KB pgf, fixed, err = d.parsed, d.fixed, d.err } if err != nil { diff --git a/internal/lsp/cache/mod_tidy.go b/internal/lsp/cache/mod_tidy.go index 0f36249e057..b81caabde5f 100644 --- a/internal/lsp/cache/mod_tidy.go +++ b/internal/lsp/cache/mod_tidy.go @@ -252,8 +252,8 @@ func modTidyDiagnostics(ctx context.Context, snapshot source.Snapshot, pm *sourc if len(missingImports) == 0 { continue } - for _, pgh := range ph.compiledGoFiles { - pgf, err := snapshot.ParseGo(ctx, pgh.file, source.ParseHeader) + for _, goFile := range ph.compiledGoFiles { + pgf, err := snapshot.ParseGo(ctx, goFile, source.ParseHeader) if err != nil { continue } diff --git a/internal/lsp/cache/symbols.go b/internal/lsp/cache/symbols.go index f27178e2e7f..50d7b123ec9 100644 --- a/internal/lsp/cache/symbols.go +++ b/internal/lsp/cache/symbols.go @@ -29,7 +29,8 @@ type symbolData struct { err error } -// buildSymbolHandle returns a handle to the result of symbolizing a file, +// buildSymbolHandle returns a handle to the future result of +// symbolizing the file identified by fh, // if necessary creating it and saving it in the snapshot. func (s *snapshot) buildSymbolHandle(ctx context.Context, fh source.FileHandle) *symbolHandle { if h := s.getSymbolHandle(fh.URI()); h != nil { From 698251aaa532d49ac69d2c416b0241afb2f65ea5 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 1 Jul 2022 13:10:44 -0400 Subject: [PATCH 076/136] internal/lsp/cache: sort Metadata.Deps, for determinism ...so that each client doesn't have to. Change-Id: I039c493031c5c90c4479741cf6f7572dad480808 Reviewed-on: https://go-review.googlesource.com/c/tools/+/415502 Run-TryBot: Alan Donovan Reviewed-by: Robert Findley Auto-Submit: Alan Donovan gopls-CI: kokoro TryBot-Result: Gopher Robot --- internal/lsp/cache/analysis.go | 8 +------- internal/lsp/cache/check.go | 11 ++--------- internal/lsp/cache/load.go | 1 + internal/lsp/cache/metadata.go | 2 +- 4 files changed, 5 insertions(+), 17 deletions(-) diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go index 847ac2db8d0..db2ca2a8b09 100644 --- a/internal/lsp/cache/analysis.go +++ b/internal/lsp/cache/analysis.go @@ -10,7 +10,6 @@ import ( "go/ast" "go/types" "reflect" - "sort" "sync" "golang.org/x/sync/errgroup" @@ -122,13 +121,8 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A // An analysis that consumes/produces facts // must run on the package's dependencies too. if len(a.FactTypes) > 0 { - importIDs := make([]string, 0, len(ph.m.Deps)) for _, importID := range ph.m.Deps { - importIDs = append(importIDs, string(importID)) - } - sort.Strings(importIDs) // for determinism - for _, importID := range importIDs { - depActionHandle, err := s.actionHandle(ctx, PackageID(importID), a) + depActionHandle, err := s.actionHandle(ctx, importID, a) if err != nil { return nil, err } diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index aeb45635c3b..5ded278f514 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -102,13 +102,6 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so return nil, fmt.Errorf("no metadata for %s", id) } - // For key stability, sort depList. - // TODO(adonovan): make m.Deps have a well defined order. - depList := append([]PackageID{}, m.Deps...) - sort.Slice(depList, func(i, j int) bool { - return depList[i] < depList[j] - }) - // Begin computing the key by getting the depKeys for all dependencies. // This requires reading the transitive closure of dependencies' source files. // @@ -122,8 +115,8 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so // for each package is computed by at most one thread, then do // the recursive key building of dependencies in parallel. deps := make(map[PackagePath]*packageHandle) - depKeys := make([]packageHandleKey, len(depList)) - for i, depID := range depList { + depKeys := make([]packageHandleKey, len(m.Deps)) + for i, depID := range m.Deps { depHandle, err := s.buildPackageHandle(ctx, depID, s.workspaceParseMode(depID)) // Don't use invalid metadata for dependencies if the top-level // metadata is valid. We only load top-level packages, so if the diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index d613dc3337e..00d4ab2cbb3 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -539,6 +539,7 @@ func computeMetadataUpdates(ctx context.Context, g *metadataGraph, pkgPath Packa event.Error(ctx, "error in dependency", err) } } + sort.Slice(m.Deps, func(i, j int) bool { return m.Deps[i] < m.Deps[j] }) // for determinism return nil } diff --git a/internal/lsp/cache/metadata.go b/internal/lsp/cache/metadata.go index b4da7130c23..486035f9390 100644 --- a/internal/lsp/cache/metadata.go +++ b/internal/lsp/cache/metadata.go @@ -32,7 +32,7 @@ type Metadata struct { ForTest PackagePath TypesSizes types.Sizes Errors []packages.Error - Deps []PackageID + Deps []PackageID // direct dependencies, in string order MissingDeps map[PackagePath]struct{} Module *packages.Module depsErrors []*packagesinternal.PackageError From afa4a9562fc7af3f6dfff75f7c0ac8106870d51e Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Wed, 15 Jun 2022 02:28:52 +0000 Subject: [PATCH 077/136] internal/lsp/cache: persist known subdirs This on average reduces latency from 12ms to 4ms on internal codebase. Updates golang/go#45686 Change-Id: Id376fcd97ce375210f2ad8b88e42f6ca283d29d3 Reviewed-on: https://go-review.googlesource.com/c/tools/+/413657 Reviewed-by: Robert Findley Reviewed-by: Alan Donovan --- internal/lsp/cache/maps.go | 45 +++++++++++++++++++++++++++++ internal/lsp/cache/session.go | 18 +++++++----- internal/lsp/cache/snapshot.go | 51 ++++++++++++++++----------------- internal/persistent/map.go | 11 +++++++ internal/persistent/map_test.go | 33 +++++++++++++++++++++ 5 files changed, 125 insertions(+), 33 deletions(-) diff --git a/internal/lsp/cache/maps.go b/internal/lsp/cache/maps.go index 14026abd92b..f2958b0e121 100644 --- a/internal/lsp/cache/maps.go +++ b/internal/lsp/cache/maps.go @@ -206,3 +206,48 @@ func (m packagesMap) Set(key packageKey, value *packageHandle, release func()) { func (m packagesMap) Delete(key packageKey) { m.impl.Delete(key) } + +type knownDirsSet struct { + impl *persistent.Map +} + +func newKnownDirsSet() knownDirsSet { + return knownDirsSet{ + impl: persistent.NewMap(func(a, b interface{}) bool { + return a.(span.URI) < b.(span.URI) + }), + } +} + +func (s knownDirsSet) Clone() knownDirsSet { + return knownDirsSet{ + impl: s.impl.Clone(), + } +} + +func (s knownDirsSet) Destroy() { + s.impl.Destroy() +} + +func (s knownDirsSet) Contains(key span.URI) bool { + _, ok := s.impl.Get(key) + return ok +} + +func (s knownDirsSet) Range(do func(key span.URI)) { + s.impl.Range(func(key, value interface{}) { + do(key.(span.URI)) + }) +} + +func (s knownDirsSet) SetAll(other knownDirsSet) { + s.impl.SetAll(other.impl) +} + +func (s knownDirsSet) Insert(key span.URI) { + s.impl.Set(key, nil, nil) +} + +func (s knownDirsSet) Remove(key span.URI) { + s.impl.Delete(key) +} diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 8d8e63f13e8..52b141a78d7 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -244,6 +244,7 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, parseWorkHandles: make(map[span.URI]*parseWorkHandle), modTidyHandles: make(map[span.URI]*modTidyHandle), modWhyHandles: make(map[span.URI]*modWhyHandle), + knownSubdirs: newKnownDirsSet(), workspace: workspace, } @@ -537,9 +538,11 @@ func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes snapshots = append(snapshots, snapshot) } knownDirs := knownDirectories(ctx, snapshots) + defer knownDirs.Destroy() + var result []source.FileModification for _, c := range changes { - if _, ok := knownDirs[c.URI]; !ok { + if !knownDirs.Contains(c.URI) { result = append(result, c) continue } @@ -561,16 +564,17 @@ func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes // knownDirectories returns all of the directories known to the given // snapshots, including workspace directories and their subdirectories. -func knownDirectories(ctx context.Context, snapshots []*snapshot) map[span.URI]struct{} { - result := map[span.URI]struct{}{} +// It is responsibility of the caller to destroy the returned set. +func knownDirectories(ctx context.Context, snapshots []*snapshot) knownDirsSet { + result := newKnownDirsSet() for _, snapshot := range snapshots { dirs := snapshot.workspace.dirs(ctx, snapshot) for _, dir := range dirs { - result[dir] = struct{}{} - } - for _, dir := range snapshot.getKnownSubdirs(dirs) { - result[dir] = struct{}{} + result.Insert(dir) } + knownSubdirs := snapshot.getKnownSubdirs(dirs) + result.SetAll(knownSubdirs) + knownSubdirs.Destroy() } return result } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 259345bdc8f..85232ea8357 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -123,7 +123,7 @@ type snapshot struct { // knownSubdirs is the set of subdirectories in the workspace, used to // create glob patterns for file watching. - knownSubdirs map[span.URI]struct{} + knownSubdirs knownDirsSet knownSubdirsPatternCache string // unprocessedSubdirChanges are any changes that might affect the set of // subdirectories in the workspace. They are not reflected to knownSubdirs @@ -147,6 +147,7 @@ func (s *snapshot) Destroy(destroyedBy string) { s.files.Destroy() s.goFiles.Destroy() s.parseKeysByURI.Destroy() + s.knownSubdirs.Destroy() if s.workspaceDir != "" { if err := os.RemoveAll(s.workspaceDir); err != nil { @@ -842,17 +843,20 @@ func (s *snapshot) getKnownSubdirsPattern(wsDirs []span.URI) string { // It may change list of known subdirs and therefore invalidate the cache. s.applyKnownSubdirsChangesLocked(wsDirs) - if len(s.knownSubdirs) == 0 { - return "" - } - if s.knownSubdirsPatternCache == "" { - dirNames := make([]string, 0, len(s.knownSubdirs)) - for uri := range s.knownSubdirs { - dirNames = append(dirNames, uri.Filename()) + var builder strings.Builder + s.knownSubdirs.Range(func(uri span.URI) { + if builder.Len() == 0 { + builder.WriteString("{") + } else { + builder.WriteString(",") + } + builder.WriteString(uri.Filename()) + }) + if builder.Len() > 0 { + builder.WriteString("}") + s.knownSubdirsPatternCache = builder.String() } - sort.Strings(dirNames) - s.knownSubdirsPatternCache = fmt.Sprintf("{%s}", strings.Join(dirNames, ",")) } return s.knownSubdirsPatternCache @@ -867,14 +871,15 @@ func (s *snapshot) collectAllKnownSubdirs(ctx context.Context) { s.mu.Lock() defer s.mu.Unlock() - s.knownSubdirs = map[span.URI]struct{}{} + s.knownSubdirs.Destroy() + s.knownSubdirs = newKnownDirsSet() s.knownSubdirsPatternCache = "" s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) { s.addKnownSubdirLocked(uri, dirs) }) } -func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) []span.URI { +func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) knownDirsSet { s.mu.Lock() defer s.mu.Unlock() @@ -882,11 +887,7 @@ func (s *snapshot) getKnownSubdirs(wsDirs []span.URI) []span.URI { // subdirectories. s.applyKnownSubdirsChangesLocked(wsDirs) - result := make([]span.URI, 0, len(s.knownSubdirs)) - for uri := range s.knownSubdirs { - result = append(result, uri) - } - return result + return s.knownSubdirs.Clone() } func (s *snapshot) applyKnownSubdirsChangesLocked(wsDirs []span.URI) { @@ -907,7 +908,7 @@ func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) { dir := filepath.Dir(uri.Filename()) // First check if the directory is already known, because then we can // return early. - if _, ok := s.knownSubdirs[span.URIFromPath(dir)]; ok { + if s.knownSubdirs.Contains(span.URIFromPath(dir)) { return } var matched span.URI @@ -926,10 +927,10 @@ func (s *snapshot) addKnownSubdirLocked(uri span.URI, dirs []span.URI) { break } uri := span.URIFromPath(dir) - if _, ok := s.knownSubdirs[uri]; ok { + if s.knownSubdirs.Contains(uri) { break } - s.knownSubdirs[uri] = struct{}{} + s.knownSubdirs.Insert(uri) dir = filepath.Dir(dir) s.knownSubdirsPatternCache = "" } @@ -939,11 +940,11 @@ func (s *snapshot) removeKnownSubdirLocked(uri span.URI) { dir := filepath.Dir(uri.Filename()) for dir != "" { uri := span.URIFromPath(dir) - if _, ok := s.knownSubdirs[uri]; !ok { + if !s.knownSubdirs.Contains(uri) { break } if info, _ := os.Stat(dir); info == nil { - delete(s.knownSubdirs, uri) + s.knownSubdirs.Remove(uri) s.knownSubdirsPatternCache = "" } dir = filepath.Dir(dir) @@ -1714,7 +1715,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC parseWorkHandles: make(map[span.URI]*parseWorkHandle, len(s.parseWorkHandles)), modTidyHandles: make(map[span.URI]*modTidyHandle, len(s.modTidyHandles)), modWhyHandles: make(map[span.URI]*modWhyHandle, len(s.modWhyHandles)), - knownSubdirs: make(map[span.URI]struct{}, len(s.knownSubdirs)), + knownSubdirs: s.knownSubdirs.Clone(), workspace: newWorkspace, } @@ -1771,9 +1772,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC // Add all of the known subdirectories, but don't update them for the // changed files. We need to rebuild the workspace module to know the // true set of known subdirectories, but we don't want to do that in clone. - for k, v := range s.knownSubdirs { - result.knownSubdirs[k] = v - } + result.knownSubdirs = s.knownSubdirs.Clone() result.knownSubdirsPatternCache = s.knownSubdirsPatternCache for _, c := range changes { result.unprocessedSubdirChanges = append(result.unprocessedSubdirChanges, c) diff --git a/internal/persistent/map.go b/internal/persistent/map.go index 19b50480db4..55b7065e9f7 100644 --- a/internal/persistent/map.go +++ b/internal/persistent/map.go @@ -28,6 +28,8 @@ import ( // client-provided function that implements a strict weak order. // // Maps can be Cloned in constant time. +// Get, Store, and Delete operations are done on average in logarithmic time. +// Maps can be Updated in O(m log(n/m)) time for maps of size n and m, where m < n. // // Values are reference counted, and a client-supplied release function // is called when a value is no longer referenced by a map or any clone. @@ -156,6 +158,15 @@ func (pm *Map) Get(key interface{}) (interface{}, bool) { return nil, false } +// SetAll updates the map with key/value pairs from the other map, overwriting existing keys. +// It is equivalent to calling Set for each entry in the other map but is more efficient. +// Both maps must have the same comparison function, otherwise behavior is undefined. +func (pm *Map) SetAll(other *Map) { + root := pm.root + pm.root = union(root, other.root, pm.less, true) + root.decref() +} + // Set updates the value associated with the specified key. // If release is non-nil, it will be called with entry's key and value once the // key is no longer contained in the map or any clone. diff --git a/internal/persistent/map_test.go b/internal/persistent/map_test.go index bd2cbfa0e12..1c413d78fa7 100644 --- a/internal/persistent/map_test.go +++ b/internal/persistent/map_test.go @@ -151,6 +151,31 @@ func TestRandomMap(t *testing.T) { assertSameMap(t, seenEntries, deletedEntries) } +func TestUpdate(t *testing.T) { + deletedEntries := make(map[mapEntry]struct{}) + seenEntries := make(map[mapEntry]struct{}) + + m1 := &validatedMap{ + impl: NewMap(func(a, b interface{}) bool { + return a.(int) < b.(int) + }), + expected: make(map[int]int), + deleted: deletedEntries, + seen: seenEntries, + } + m2 := m1.clone() + + m1.set(t, 1, 1) + m1.set(t, 2, 2) + m2.set(t, 2, 20) + m2.set(t, 3, 3) + m1.setAll(t, m2) + + m1.destroy() + m2.destroy() + assertSameMap(t, seenEntries, deletedEntries) +} + func (vm *validatedMap) onDelete(t *testing.T, key, value int) { entry := mapEntry{key: key, value: value} if _, ok := vm.deleted[entry]; ok { @@ -254,6 +279,14 @@ func validateNode(t *testing.T, node *mapNode, less func(a, b interface{}) bool) validateNode(t, node.right, less) } +func (vm *validatedMap) setAll(t *testing.T, other *validatedMap) { + vm.impl.SetAll(other.impl) + for key, value := range other.expected { + vm.expected[key] = value + } + vm.validate(t) +} + func (vm *validatedMap) set(t *testing.T, key, value int) { vm.seen[mapEntry{key: key, value: value}] = struct{}{} vm.impl.Set(key, value, func(deletedKey, deletedValue interface{}) { From d69bac6d882444114a07779657ae97128a4fac76 Mon Sep 17 00:00:00 2001 From: Ruslan Nigmatullin Date: Wed, 22 Jun 2022 22:39:54 +0000 Subject: [PATCH 078/136] internal/lsp/cache: cache isActiveLocked calculation across snapshots This speeds up workspace initialization time in DegradeClosed memory mode from 3m to 1m by avoiding unnecessary recomputation of results. Change-Id: Ie5df82952d50ab42125defd148136329f0d50a48 Reviewed-on: https://go-review.googlesource.com/c/tools/+/413658 Reviewed-by: Alan Donovan Reviewed-by: David Chase --- internal/lsp/cache/check.go | 2 +- internal/lsp/cache/load.go | 2 +- internal/lsp/cache/maps.go | 34 +++++++++++++++ internal/lsp/cache/session.go | 43 +++++++++--------- internal/lsp/cache/snapshot.go | 79 ++++++++++++++++++++-------------- 5 files changed, 104 insertions(+), 56 deletions(-) diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index 5ded278f514..7ebc777d762 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -224,7 +224,7 @@ func (s *snapshot) workspaceParseMode(id PackageID) source.ParseMode { if s.view.Options().MemoryMode == source.ModeNormal { return source.ParseFull } - if s.isActiveLocked(id, nil) { + if s.isActiveLocked(id) { return source.ParseFull } return source.ParseExported diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index 00d4ab2cbb3..e4fd6719872 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -220,7 +220,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf // invalidate the reverse transitive closure of packages that have changed. invalidatedPackages := s.meta.reverseTransitiveClosure(true, loadedIDs...) s.meta = s.meta.Clone(updates) - + s.resetIsActivePackageLocked() // Invalidate any packages we may have associated with this metadata. // // TODO(rfindley): this should not be necessary, as we should have already diff --git a/internal/lsp/cache/maps.go b/internal/lsp/cache/maps.go index f2958b0e121..af041777188 100644 --- a/internal/lsp/cache/maps.go +++ b/internal/lsp/cache/maps.go @@ -112,6 +112,40 @@ func (m goFilesMap) Delete(key parseKey) { m.impl.Delete(key) } +type isActivePackageCacheMap struct { + impl *persistent.Map +} + +func newIsActivePackageCacheMap() isActivePackageCacheMap { + return isActivePackageCacheMap{ + impl: persistent.NewMap(func(a, b interface{}) bool { + return a.(PackageID) < b.(PackageID) + }), + } +} + +func (m isActivePackageCacheMap) Clone() isActivePackageCacheMap { + return isActivePackageCacheMap{ + impl: m.impl.Clone(), + } +} + +func (m isActivePackageCacheMap) Destroy() { + m.impl.Destroy() +} + +func (m isActivePackageCacheMap) Get(key PackageID) (bool, bool) { + value, ok := m.impl.Get(key) + if !ok { + return false, false + } + return value.(bool), true +} + +func (m isActivePackageCacheMap) Set(key PackageID, value bool) { + m.impl.Set(key, value, nil) +} + type parseKeysByURIMap struct { impl *persistent.Map } diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 52b141a78d7..aca46dd2a88 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -225,27 +225,28 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, }, } v.snapshot = &snapshot{ - id: snapshotID, - view: v, - backgroundCtx: backgroundCtx, - cancel: cancel, - initializeOnce: &sync.Once{}, - generation: s.cache.store.Generation(generationName(v, 0)), - packages: newPackagesMap(), - meta: &metadataGraph{}, - files: newFilesMap(), - goFiles: newGoFilesMap(), - parseKeysByURI: newParseKeysByURIMap(), - symbols: make(map[span.URI]*symbolHandle), - actions: make(map[actionKey]*actionHandle), - workspacePackages: make(map[PackageID]PackagePath), - unloadableFiles: make(map[span.URI]struct{}), - parseModHandles: make(map[span.URI]*parseModHandle), - parseWorkHandles: make(map[span.URI]*parseWorkHandle), - modTidyHandles: make(map[span.URI]*modTidyHandle), - modWhyHandles: make(map[span.URI]*modWhyHandle), - knownSubdirs: newKnownDirsSet(), - workspace: workspace, + id: snapshotID, + view: v, + backgroundCtx: backgroundCtx, + cancel: cancel, + initializeOnce: &sync.Once{}, + generation: s.cache.store.Generation(generationName(v, 0)), + packages: newPackagesMap(), + meta: &metadataGraph{}, + files: newFilesMap(), + isActivePackageCache: newIsActivePackageCacheMap(), + goFiles: newGoFilesMap(), + parseKeysByURI: newParseKeysByURIMap(), + symbols: make(map[span.URI]*symbolHandle), + actions: make(map[actionKey]*actionHandle), + workspacePackages: make(map[PackageID]PackagePath), + unloadableFiles: make(map[span.URI]struct{}), + parseModHandles: make(map[span.URI]*parseModHandle), + parseWorkHandles: make(map[span.URI]*parseWorkHandle), + modTidyHandles: make(map[span.URI]*modTidyHandle), + modWhyHandles: make(map[span.URI]*modWhyHandle), + knownSubdirs: newKnownDirsSet(), + workspace: workspace, } // Initialize the view without blocking. diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 85232ea8357..36dcafeaca6 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -91,6 +91,10 @@ type snapshot struct { // It may be invalidated when a file's content changes. packages packagesMap + // isActivePackageCache maps package ID to the cached value if it is active or not. + // It may be invalidated when metadata changes or a new file is opened or closed. + isActivePackageCache isActivePackageCacheMap + // actions maps an actionkey to its actionHandle. actions map[actionKey]*actionHandle @@ -144,6 +148,7 @@ type actionKey struct { func (s *snapshot) Destroy(destroyedBy string) { s.generation.Destroy(destroyedBy) s.packages.Destroy() + s.isActivePackageCache.Destroy() s.files.Destroy() s.goFiles.Destroy() s.parseKeysByURI.Destroy() @@ -754,24 +759,20 @@ func (s *snapshot) activePackageIDs() (ids []PackageID) { s.mu.Lock() defer s.mu.Unlock() - seen := make(map[PackageID]bool) for id := range s.workspacePackages { - if s.isActiveLocked(id, seen) { + if s.isActiveLocked(id) { ids = append(ids, id) } } return ids } -func (s *snapshot) isActiveLocked(id PackageID, seen map[PackageID]bool) (active bool) { - if seen == nil { - seen = make(map[PackageID]bool) - } - if seen, ok := seen[id]; ok { +func (s *snapshot) isActiveLocked(id PackageID) (active bool) { + if seen, ok := s.isActivePackageCache.Get(id); ok { return seen } defer func() { - seen[id] = active + s.isActivePackageCache.Set(id, active) }() m, ok := s.meta.metadata[id] if !ok { @@ -785,13 +786,18 @@ func (s *snapshot) isActiveLocked(id PackageID, seen map[PackageID]bool) (active // TODO(rfindley): it looks incorrect that we don't also check GoFiles here. // If a CGo file is open, we want to consider the package active. for _, dep := range m.Deps { - if s.isActiveLocked(dep, seen) { + if s.isActiveLocked(dep) { return true } } return false } +func (s *snapshot) resetIsActivePackageLocked() { + s.isActivePackageCache.Destroy() + s.isActivePackageCache = newIsActivePackageCacheMap() +} + const fileExtensions = "go,mod,sum,work" func (s *snapshot) fileWatchingGlobPatterns(ctx context.Context) map[string]struct{} { @@ -1287,6 +1293,7 @@ func (s *snapshot) clearShouldLoad(scopes ...interface{}) { } } s.meta = g.Clone(updates) + s.resetIsActivePackageLocked() } // noValidMetadataForURILocked reports whether there is any valid metadata for @@ -1377,7 +1384,7 @@ func (s *snapshot) openFiles() []source.VersionedFileHandle { var open []source.VersionedFileHandle s.files.Range(func(uri span.URI, fh source.VersionedFileHandle) { - if s.isOpenLocked(fh.URI()) { + if isFileOpen(fh) { open = append(open, fh) } }) @@ -1386,6 +1393,10 @@ func (s *snapshot) openFiles() []source.VersionedFileHandle { func (s *snapshot) isOpenLocked(uri span.URI) bool { fh, _ := s.files.Get(uri) + return isFileOpen(fh) +} + +func isFileOpen(fh source.VersionedFileHandle) bool { _, open := fh.(*overlay) return open } @@ -1695,28 +1706,29 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC newGen := s.view.session.cache.store.Generation(generationName(s.view, s.id+1)) bgCtx, cancel := context.WithCancel(bgCtx) result := &snapshot{ - id: s.id + 1, - generation: newGen, - view: s.view, - backgroundCtx: bgCtx, - cancel: cancel, - builtin: s.builtin, - initializeOnce: s.initializeOnce, - initializedErr: s.initializedErr, - packages: s.packages.Clone(), - actions: make(map[actionKey]*actionHandle, len(s.actions)), - files: s.files.Clone(), - goFiles: s.goFiles.Clone(), - parseKeysByURI: s.parseKeysByURI.Clone(), - symbols: make(map[span.URI]*symbolHandle, len(s.symbols)), - workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)), - unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)), - parseModHandles: make(map[span.URI]*parseModHandle, len(s.parseModHandles)), - parseWorkHandles: make(map[span.URI]*parseWorkHandle, len(s.parseWorkHandles)), - modTidyHandles: make(map[span.URI]*modTidyHandle, len(s.modTidyHandles)), - modWhyHandles: make(map[span.URI]*modWhyHandle, len(s.modWhyHandles)), - knownSubdirs: s.knownSubdirs.Clone(), - workspace: newWorkspace, + id: s.id + 1, + generation: newGen, + view: s.view, + backgroundCtx: bgCtx, + cancel: cancel, + builtin: s.builtin, + initializeOnce: s.initializeOnce, + initializedErr: s.initializedErr, + packages: s.packages.Clone(), + isActivePackageCache: s.isActivePackageCache.Clone(), + actions: make(map[actionKey]*actionHandle, len(s.actions)), + files: s.files.Clone(), + goFiles: s.goFiles.Clone(), + parseKeysByURI: s.parseKeysByURI.Clone(), + symbols: make(map[span.URI]*symbolHandle, len(s.symbols)), + workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)), + unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)), + parseModHandles: make(map[span.URI]*parseModHandle, len(s.parseModHandles)), + parseWorkHandles: make(map[span.URI]*parseWorkHandle, len(s.parseWorkHandles)), + modTidyHandles: make(map[span.URI]*modTidyHandle, len(s.modTidyHandles)), + modWhyHandles: make(map[span.URI]*modWhyHandle, len(s.modWhyHandles)), + knownSubdirs: s.knownSubdirs.Clone(), + workspace: newWorkspace, } // Copy all of the FileHandles. @@ -1975,9 +1987,10 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC result.meta = s.meta } - // Update workspace packages, if necessary. + // Update workspace and active packages, if necessary. if result.meta != s.meta || anyFileOpenedOrClosed { result.workspacePackages = computeWorkspacePackagesLocked(result, result.meta) + result.resetIsActivePackageLocked() } else { result.workspacePackages = s.workspacePackages } From b929f3bf4d57d9023b392ff552aa21a4845e1a07 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 1 Jun 2022 15:20:56 -0400 Subject: [PATCH 079/136] internal/span: make NewRange accept File, not FileSet span.NewRange now accepts a *token.File and two token.Pos. It is the caller's responsibility to look up the File in the FileSet, if necessary (it usually isn't), and to ensure the Pos values are valid. Ditto NewMappedRange. This reduces the creep of Snapshot into functions that have no need to know about it. Also the bug.Report call in NewRange has been pushed up into the caller and logically eliminated in all but one case. I think we should aim for the invariant that functions that operate on a single file should accept a *token.File, not a FileSet; only functions that operate on sets of files (e.g. type checking, analysis) should use a FileSet. This is not always possible: some public functions accept a FileSet only to re-lookup a single file already known to the caller; if necessary we could provide token.File variants of these. This may ultimately allow us to create a new FileSet per call to the parser, so that Files and FileSets are in 1:1 correspondance and there is no global FileSet. (It currently grows without bound, on the order of kilobytes per keystroke.) FileSets containing multiple files, needed when we interact with the type checker and analysis, may be temporarily synthesized on demand from a set of Files, analogous to mmap'ing a few files into a blank address space. Also: - replace File.Position(pos).Line by File.Line(pos) - replace pos == token.NoPos by pos.IsValid() - avoid fishy token.Pos conversions in link.go - other minor simplifications Change-Id: Ia3119e0ac7e193801fbafa81c8f48acfa14e9ae4 Reviewed-on: https://go-review.googlesource.com/c/tools/+/409935 Auto-Submit: Alan Donovan Reviewed-by: Robert Findley Run-TryBot: Alan Donovan gopls-CI: kokoro TryBot-Result: Gopher Robot --- go/analysis/analysistest/analysistest.go | 2 +- go/analysis/passes/tests/tests.go | 2 + go/packages/packagestest/expect.go | 16 +++---- internal/lsp/cache/check.go | 2 +- internal/lsp/cache/errors.go | 28 ++++++++--- internal/lsp/cache/load.go | 2 +- internal/lsp/cache/mod_tidy.go | 7 +-- internal/lsp/code_action.go | 2 +- internal/lsp/diagnostics.go | 2 +- internal/lsp/link.go | 27 +++++++---- internal/lsp/semantic.go | 2 +- internal/lsp/source/call_hierarchy.go | 20 +++++--- internal/lsp/source/code_lens.go | 10 ++-- internal/lsp/source/completion/completion.go | 16 ++++--- internal/lsp/source/completion/definition.go | 4 +- internal/lsp/source/completion/package.go | 12 ++--- internal/lsp/source/completion/util.go | 2 +- internal/lsp/source/extract.go | 23 +++++---- internal/lsp/source/fix.go | 11 ++++- internal/lsp/source/folding_range.go | 38 +++++++-------- internal/lsp/source/identifier.go | 4 +- internal/lsp/source/references.go | 9 ++-- internal/lsp/source/rename.go | 31 ++++--------- internal/lsp/source/rename_check.go | 19 ++++---- internal/lsp/source/util.go | 49 +++++++++++--------- internal/lsp/source/util_test.go | 2 +- internal/span/token.go | 42 +++++++++++------ 27 files changed, 217 insertions(+), 167 deletions(-) diff --git a/go/analysis/analysistest/analysistest.go b/go/analysis/analysistest/analysistest.go index 6ef2e7984fa..ea67807f78c 100644 --- a/go/analysis/analysistest/analysistest.go +++ b/go/analysis/analysistest/analysistest.go @@ -142,7 +142,7 @@ func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns } fileContents[file] = contents } - spn, err := span.NewRange(act.Pass.Fset, edit.Pos, edit.End).Span() + spn, err := span.NewRange(file, edit.Pos, edit.End).Span() if err != nil { t.Errorf("error converting edit to span %s: %v", file.Name(), err) } diff --git a/go/analysis/passes/tests/tests.go b/go/analysis/passes/tests/tests.go index 56b20ebd519..cab2fa20fa5 100644 --- a/go/analysis/passes/tests/tests.go +++ b/go/analysis/passes/tests/tests.go @@ -475,10 +475,12 @@ func checkTest(pass *analysis.Pass, fn *ast.FuncDecl, prefix string) { if tparams := typeparams.ForFuncType(fn.Type); tparams != nil && len(tparams.List) > 0 { // Note: cmd/go/internal/load also errors about TestXXX and BenchmarkXXX functions with type parameters. // We have currently decided to also warn before compilation/package loading. This can help users in IDEs. + // TODO(adonovan): use ReportRangef(tparams). pass.Reportf(fn.Pos(), "%s has type parameters: it will not be run by go test as a %sXXX function", fn.Name.Name, prefix) } if !isTestSuffix(fn.Name.Name[len(prefix):]) { + // TODO(adonovan): use ReportRangef(fn.Name). pass.Reportf(fn.Pos(), "%s has malformed name: first letter after '%s' must not be lowercase", fn.Name.Name, prefix) } } diff --git a/go/packages/packagestest/expect.go b/go/packages/packagestest/expect.go index 430258681f5..841099c0cdc 100644 --- a/go/packages/packagestest/expect.go +++ b/go/packages/packagestest/expect.go @@ -409,6 +409,7 @@ func (e *Exported) buildConverter(pt reflect.Type) (converter, error) { } func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (span.Range, []interface{}, error) { + tokFile := e.ExpectFileSet.File(n.Pos) if len(args) < 1 { return span.Range{}, nil, fmt.Errorf("missing argument") } @@ -419,10 +420,9 @@ func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (span.Rang // handle the special identifiers switch arg { case eofIdentifier: - // end of file identifier, look up the current file - f := e.ExpectFileSet.File(n.Pos) - eof := f.Pos(f.Size()) - return span.NewRange(e.ExpectFileSet, eof, token.NoPos), args, nil + // end of file identifier + eof := tokFile.Pos(tokFile.Size()) + return span.NewRange(tokFile, eof, eof), args, nil default: // look up an marker by name mark, ok := e.markers[string(arg)] @@ -436,19 +436,19 @@ func (e *Exported) rangeConverter(n *expect.Note, args []interface{}) (span.Rang if err != nil { return span.Range{}, nil, err } - if start == token.NoPos { + if !start.IsValid() { return span.Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg) } - return span.NewRange(e.ExpectFileSet, start, end), args, nil + return span.NewRange(tokFile, start, end), args, nil case *regexp.Regexp: start, end, err := expect.MatchBefore(e.ExpectFileSet, e.FileContents, n.Pos, arg) if err != nil { return span.Range{}, nil, err } - if start == token.NoPos { + if !start.IsValid() { return span.Range{}, nil, fmt.Errorf("%v: pattern %s did not match", e.ExpectFileSet.Position(n.Pos), arg) } - return span.NewRange(e.ExpectFileSet, start, end), args, nil + return span.NewRange(tokFile, start, end), args, nil default: return span.Range{}, nil, fmt.Errorf("cannot convert %v to pos", arg) } diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index 7ebc777d762..aae6de0eea7 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -664,7 +664,7 @@ func (s *snapshot) depsErrors(ctx context.Context, pkg *pkg) ([]*source.Diagnost } for _, imp := range allImports[item] { - rng, err := source.NewMappedRange(s.FileSet(), imp.cgf.Mapper, imp.imp.Pos(), imp.imp.End()).Range() + rng, err := source.NewMappedRange(imp.cgf.Tok, imp.cgf.Mapper, imp.imp.Pos(), imp.imp.End()).Range() if err != nil { return nil, err } diff --git a/internal/lsp/cache/errors.go b/internal/lsp/cache/errors.go index 342f2bea5d7..a1aecb35c7b 100644 --- a/internal/lsp/cache/errors.go +++ b/internal/lsp/cache/errors.go @@ -16,6 +16,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/lsp/bug" "golang.org/x/tools/internal/lsp/command" "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/lsp/source" @@ -82,7 +83,7 @@ func parseErrorDiagnostics(snapshot *snapshot, pkg *pkg, errList scanner.ErrorLi return nil, err } pos := pgf.Tok.Pos(e.Pos.Offset) - spn, err := span.NewRange(snapshot.FileSet(), pos, pos).Span() + spn, err := span.NewRange(pgf.Tok, pos, pos).Span() if err != nil { return nil, err } @@ -196,8 +197,15 @@ func analysisDiagnosticDiagnostics(snapshot *snapshot, pkg *pkg, a *analysis.Ana break } } - - spn, err := span.NewRange(snapshot.FileSet(), e.Pos, e.End).Span() + tokFile := snapshot.FileSet().File(e.Pos) + if tokFile == nil { + return nil, bug.Errorf("no file for position of %q diagnostic", e.Category) + } + end := e.End + if !end.IsValid() { + end = e.Pos + } + spn, err := span.NewRange(tokFile, e.Pos, end).Span() if err != nil { return nil, err } @@ -282,7 +290,11 @@ func suggestedAnalysisFixes(snapshot *snapshot, pkg *pkg, diag *analysis.Diagnos for _, fix := range diag.SuggestedFixes { edits := make(map[span.URI][]protocol.TextEdit) for _, e := range fix.TextEdits { - spn, err := span.NewRange(snapshot.FileSet(), e.Pos, e.End).Span() + tokFile := snapshot.FileSet().File(e.Pos) + if tokFile == nil { + return nil, bug.Errorf("no file for edit position") + } + spn, err := span.NewRange(tokFile, e.Pos, e.End).Span() if err != nil { return nil, err } @@ -310,7 +322,11 @@ func suggestedAnalysisFixes(snapshot *snapshot, pkg *pkg, diag *analysis.Diagnos func relatedInformation(pkg *pkg, fset *token.FileSet, diag *analysis.Diagnostic) ([]source.RelatedInformation, error) { var out []source.RelatedInformation for _, related := range diag.Related { - spn, err := span.NewRange(fset, related.Pos, related.End).Span() + tokFile := fset.File(related.Pos) + if tokFile == nil { + return nil, bug.Errorf("no file for %q diagnostic position", diag.Category) + } + spn, err := span.NewRange(tokFile, related.Pos, related.End).Span() if err != nil { return nil, err } @@ -397,7 +413,7 @@ func parseGoListImportCycleError(snapshot *snapshot, e packages.Error, pkg *pkg) // Search file imports for the import that is causing the import cycle. for _, imp := range cgf.File.Imports { if imp.Path.Value == circImp { - spn, err := span.NewRange(snapshot.FileSet(), imp.Pos(), imp.End()).Span() + spn, err := span.NewRange(cgf.Tok, imp.Pos(), imp.End()).Span() if err != nil { return msg, span.Span{}, false } diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index e4fd6719872..08c88ab8e74 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -359,7 +359,7 @@ func (s *snapshot) applyCriticalErrorToFiles(ctx context.Context, msg string, fi switch s.view.FileKind(fh) { case source.Go: if pgf, err := s.ParseGo(ctx, fh, source.ParseHeader); err == nil { - pkgDecl := span.NewRange(s.FileSet(), pgf.File.Package, pgf.File.Name.End()) + pkgDecl := span.NewRange(pgf.Tok, pgf.File.Package, pgf.File.Name.End()) if spn, err := pkgDecl.Span(); err == nil { rng, _ = pgf.Mapper.Range(spn) } diff --git a/internal/lsp/cache/mod_tidy.go b/internal/lsp/cache/mod_tidy.go index b81caabde5f..91394659503 100644 --- a/internal/lsp/cache/mod_tidy.go +++ b/internal/lsp/cache/mod_tidy.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "go/ast" + "go/token" "io/ioutil" "os" "path/filepath" @@ -282,7 +283,7 @@ func modTidyDiagnostics(ctx context.Context, snapshot source.Snapshot, pm *sourc if !ok { return nil, fmt.Errorf("no missing module fix for %q (%q)", importPath, req.Mod.Path) } - srcErr, err := missingModuleForImport(snapshot, m, imp, req, fixes) + srcErr, err := missingModuleForImport(pgf.Tok, m, imp, req, fixes) if err != nil { return nil, err } @@ -445,11 +446,11 @@ func switchDirectness(req *modfile.Require, m *protocol.ColumnMapper, computeEdi // missingModuleForImport creates an error for a given import path that comes // from a missing module. -func missingModuleForImport(snapshot source.Snapshot, m *protocol.ColumnMapper, imp *ast.ImportSpec, req *modfile.Require, fixes []source.SuggestedFix) (*source.Diagnostic, error) { +func missingModuleForImport(file *token.File, m *protocol.ColumnMapper, imp *ast.ImportSpec, req *modfile.Require, fixes []source.SuggestedFix) (*source.Diagnostic, error) { if req.Syntax == nil { return nil, fmt.Errorf("no syntax for %v", req) } - spn, err := span.NewRange(snapshot.FileSet(), imp.Path.Pos(), imp.Path.End()).Span() + spn, err := span.NewRange(file, imp.Path.Pos(), imp.Path.End()).Span() if err != nil { return nil, err } diff --git a/internal/lsp/code_action.go b/internal/lsp/code_action.go index 9d78e3c9ac9..4147e17ce6d 100644 --- a/internal/lsp/code_action.go +++ b/internal/lsp/code_action.go @@ -294,7 +294,7 @@ func extractionFixes(ctx context.Context, snapshot source.Snapshot, pkg source.P } puri := protocol.URIFromSpanURI(uri) var commands []protocol.Command - if _, ok, methodOk, _ := source.CanExtractFunction(snapshot.FileSet(), srng, pgf.Src, pgf.File); ok { + if _, ok, methodOk, _ := source.CanExtractFunction(pgf.Tok, srng, pgf.Src, pgf.File); ok { cmd, err := command.NewApplyFixCommand("Extract function", command.ApplyFixArgs{ URI: puri, Fix: source.ExtractFunction, diff --git a/internal/lsp/diagnostics.go b/internal/lsp/diagnostics.go index 9648921ef5d..59fe1716b9b 100644 --- a/internal/lsp/diagnostics.go +++ b/internal/lsp/diagnostics.go @@ -467,7 +467,7 @@ func (s *Server) checkForOrphanedFile(ctx context.Context, snapshot source.Snaps if !pgf.File.Name.Pos().IsValid() { return nil } - spn, err := span.NewRange(snapshot.FileSet(), pgf.File.Name.Pos(), pgf.File.Name.End()).Span() + spn, err := span.NewRange(pgf.Tok, pgf.File.Name.Pos(), pgf.File.Name.End()).Span() if err != nil { return nil } diff --git a/internal/lsp/link.go b/internal/lsp/link.go index 7bb09b40355..a2962b6659a 100644 --- a/internal/lsp/link.go +++ b/internal/lsp/link.go @@ -68,7 +68,7 @@ func modLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandl // dependency within the require statement. start, end := token.Pos(s+i), token.Pos(s+i+len(dep)) target := source.BuildLink(snapshot.View().Options().LinkTarget, "mod/"+req.Mod.String(), "") - l, err := toProtocolLink(snapshot, pm.Mapper, target, start, end, source.Mod) + l, err := toProtocolLink(nil, pm.Mapper, target, start, end, source.Mod) if err != nil { return nil, err } @@ -78,6 +78,10 @@ func modLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandl if syntax := pm.File.Syntax; syntax == nil { return links, nil } + + // Create a throwaway token.File. + tokFile := token.NewFileSet().AddFile(fh.URI().Filename(), -1, len(pm.Mapper.Content)) + // Get all the links that are contained in the comments of the file. for _, expr := range pm.File.Syntax.Stmt { comments := expr.Comment() @@ -86,7 +90,8 @@ func modLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandl } for _, section := range [][]modfile.Comment{comments.Before, comments.Suffix, comments.After} { for _, comment := range section { - l, err := findLinksInString(ctx, snapshot, comment.Token, token.Pos(comment.Start.Byte), pm.Mapper, source.Mod) + start := tokFile.Pos(comment.Start.Byte) + l, err := findLinksInString(ctx, snapshot, comment.Token, start, tokFile, pm.Mapper, source.Mod) if err != nil { return nil, err } @@ -144,7 +149,7 @@ func goLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle start := imp.Path.Pos() + 1 end := imp.Path.End() - 1 target = source.BuildLink(view.Options().LinkTarget, target, "") - l, err := toProtocolLink(snapshot, pgf.Mapper, target, start, end, source.Go) + l, err := toProtocolLink(pgf.Tok, pgf.Mapper, target, start, end, source.Go) if err != nil { return nil, err } @@ -152,7 +157,7 @@ func goLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle } } for _, s := range str { - l, err := findLinksInString(ctx, snapshot, s.Value, s.Pos(), pgf.Mapper, source.Go) + l, err := findLinksInString(ctx, snapshot, s.Value, s.Pos(), pgf.Tok, pgf.Mapper, source.Go) if err != nil { return nil, err } @@ -160,7 +165,7 @@ func goLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle } for _, commentGroup := range pgf.File.Comments { for _, comment := range commentGroup.List { - l, err := findLinksInString(ctx, snapshot, comment.Text, comment.Pos(), pgf.Mapper, source.Go) + l, err := findLinksInString(ctx, snapshot, comment.Text, comment.Pos(), pgf.Tok, pgf.Mapper, source.Go) if err != nil { return nil, err } @@ -193,7 +198,8 @@ var acceptedSchemes = map[string]bool{ "https": true, } -func findLinksInString(ctx context.Context, snapshot source.Snapshot, src string, pos token.Pos, m *protocol.ColumnMapper, fileKind source.FileKind) ([]protocol.DocumentLink, error) { +// tokFile may be a throwaway File for non-Go files. +func findLinksInString(ctx context.Context, snapshot source.Snapshot, src string, pos token.Pos, tokFile *token.File, m *protocol.ColumnMapper, fileKind source.FileKind) ([]protocol.DocumentLink, error) { var links []protocol.DocumentLink for _, index := range snapshot.View().Options().URLRegexp.FindAllIndex([]byte(src), -1) { start, end := index[0], index[1] @@ -216,7 +222,7 @@ func findLinksInString(ctx context.Context, snapshot source.Snapshot, src string if !acceptedSchemes[linkURL.Scheme] { continue } - l, err := toProtocolLink(snapshot, m, linkURL.String(), startPos, endPos, fileKind) + l, err := toProtocolLink(tokFile, m, linkURL.String(), startPos, endPos, fileKind) if err != nil { return nil, err } @@ -234,7 +240,7 @@ func findLinksInString(ctx context.Context, snapshot source.Snapshot, src string } org, repo, number := matches[1], matches[2], matches[3] target := fmt.Sprintf("https://github.com/%s/%s/issues/%s", org, repo, number) - l, err := toProtocolLink(snapshot, m, target, startPos, endPos, fileKind) + l, err := toProtocolLink(tokFile, m, target, startPos, endPos, fileKind) if err != nil { return nil, err } @@ -255,11 +261,12 @@ var ( issueRegexp *regexp.Regexp ) -func toProtocolLink(snapshot source.Snapshot, m *protocol.ColumnMapper, target string, start, end token.Pos, fileKind source.FileKind) (protocol.DocumentLink, error) { +func toProtocolLink(tokFile *token.File, m *protocol.ColumnMapper, target string, start, end token.Pos, fileKind source.FileKind) (protocol.DocumentLink, error) { var rng protocol.Range switch fileKind { case source.Go: - spn, err := span.NewRange(snapshot.FileSet(), start, end).Span() + // TODO(adonovan): can we now use this logic for the Mod case too? + spn, err := span.NewRange(tokFile, start, end).Span() if err != nil { return protocol.DocumentLink{}, err } diff --git a/internal/lsp/semantic.go b/internal/lsp/semantic.go index 286d2fd160d..429dc0660b2 100644 --- a/internal/lsp/semantic.go +++ b/internal/lsp/semantic.go @@ -186,7 +186,7 @@ func (e *encoded) token(start token.Pos, leng int, typ tokenType, mods []string) } // want a line and column from start (in LSP coordinates) // [//line directives should be ignored] - rng := source.NewMappedRange(e.fset, e.pgf.Mapper, start, start+token.Pos(leng)) + rng := source.NewMappedRange(e.pgf.Tok, e.pgf.Mapper, start, start+token.Pos(leng)) lspRange, err := rng.Range() if err != nil { // possibly a //line directive. TODO(pjw): fix this somehow diff --git a/internal/lsp/source/call_hierarchy.go b/internal/lsp/source/call_hierarchy.go index c2c8a1866d0..4e7daf0f9bc 100644 --- a/internal/lsp/source/call_hierarchy.go +++ b/internal/lsp/source/call_hierarchy.go @@ -152,12 +152,12 @@ outer: kind = protocol.Function } - nameStart, nameEnd := nameIdent.NamePos, nameIdent.NamePos+token.Pos(len(nameIdent.Name)) + nameStart, nameEnd := nameIdent.Pos(), nameIdent.End() if funcLit != nil { nameStart, nameEnd = funcLit.Type.Func, funcLit.Type.Params.Pos() kind = protocol.Function } - rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, nameStart, nameEnd).Range() + rng, err := NewMappedRange(pgf.Tok, pgf.Mapper, nameStart, nameEnd).Range() if err != nil { return protocol.CallHierarchyItem{}, err } @@ -194,14 +194,22 @@ func OutgoingCalls(ctx context.Context, snapshot Snapshot, fh FileHandle, pos pr if _, ok := identifier.Declaration.obj.Type().Underlying().(*types.Signature); !ok { return nil, nil } - if identifier.Declaration.node == nil { + node := identifier.Declaration.node + if node == nil { return nil, nil } if len(identifier.Declaration.MappedRange) == 0 { return nil, nil } declMappedRange := identifier.Declaration.MappedRange[0] - callExprs, err := collectCallExpressions(snapshot.FileSet(), declMappedRange.m, identifier.Declaration.node) + // TODO(adonovan): avoid Fileset.File call by somehow getting at + // declMappedRange.spanRange.TokFile, or making Identifier retain the + // token.File of the identifier and its declaration, since it looks up both anyway. + tokFile := snapshot.FileSet().File(node.Pos()) + if tokFile == nil { + return nil, fmt.Errorf("no file for position") + } + callExprs, err := collectCallExpressions(tokFile, declMappedRange.m, node) if err != nil { return nil, err } @@ -210,7 +218,7 @@ func OutgoingCalls(ctx context.Context, snapshot Snapshot, fh FileHandle, pos pr } // collectCallExpressions collects call expression ranges inside a function. -func collectCallExpressions(fset *token.FileSet, mapper *protocol.ColumnMapper, node ast.Node) ([]protocol.Range, error) { +func collectCallExpressions(tokFile *token.File, mapper *protocol.ColumnMapper, node ast.Node) ([]protocol.Range, error) { type callPos struct { start, end token.Pos } @@ -240,7 +248,7 @@ func collectCallExpressions(fset *token.FileSet, mapper *protocol.ColumnMapper, callRanges := []protocol.Range{} for _, call := range callPositions { - callRange, err := NewMappedRange(fset, mapper, call.start, call.end).Range() + callRange, err := NewMappedRange(tokFile, mapper, call.start, call.end).Range() if err != nil { return nil, err } diff --git a/internal/lsp/source/code_lens.go b/internal/lsp/source/code_lens.go index 0ab857ac600..0e9453a662b 100644 --- a/internal/lsp/source/code_lens.go +++ b/internal/lsp/source/code_lens.go @@ -67,7 +67,7 @@ func runTestCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([]p return nil, err } // add a code lens to the top of the file which runs all benchmarks in the file - rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, pgf.File.Package, pgf.File.Package).Range() + rng, err := NewMappedRange(pgf.Tok, pgf.Mapper, pgf.File.Package, pgf.File.Package).Range() if err != nil { return nil, err } @@ -111,7 +111,7 @@ func TestsAndBenchmarks(ctx context.Context, snapshot Snapshot, fh FileHandle) ( continue } - rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, d.Pos(), fn.End()).Range() + rng, err := NewMappedRange(pgf.Tok, pgf.Mapper, fn.Pos(), fn.End()).Range() if err != nil { return out, err } @@ -177,7 +177,7 @@ func goGenerateCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ( if !strings.HasPrefix(l.Text, ggDirective) { continue } - rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, l.Pos(), l.Pos()+token.Pos(len(ggDirective))).Range() + rng, err := NewMappedRange(pgf.Tok, pgf.Mapper, l.Pos(), l.Pos()+token.Pos(len(ggDirective))).Range() if err != nil { return nil, err } @@ -214,7 +214,7 @@ func regenerateCgoLens(ctx context.Context, snapshot Snapshot, fh FileHandle) ([ if c == nil { return nil, nil } - rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, c.Pos(), c.EndPos).Range() + rng, err := NewMappedRange(pgf.Tok, pgf.Mapper, c.Pos(), c.End()).Range() if err != nil { return nil, err } @@ -231,7 +231,7 @@ func toggleDetailsCodeLens(ctx context.Context, snapshot Snapshot, fh FileHandle if err != nil { return nil, err } - rng, err := NewMappedRange(snapshot.FileSet(), pgf.Mapper, pgf.File.Package, pgf.File.Package).Range() + rng, err := NewMappedRange(pgf.Tok, pgf.Mapper, pgf.File.Package, pgf.File.Package).Range() if err != nil { return nil, err } diff --git a/internal/lsp/source/completion/completion.go b/internal/lsp/source/completion/completion.go index a2dfae69841..be613d3e3e3 100644 --- a/internal/lsp/source/completion/completion.go +++ b/internal/lsp/source/completion/completion.go @@ -173,8 +173,9 @@ type completer struct { // file is the AST of the file associated with this completion request. file *ast.File - // pos is the position at which the request was triggered. - pos token.Pos + // (tokFile, pos) is the position at which the request was triggered. + tokFile *token.File + pos token.Pos // path is the path of AST nodes enclosing the position. path []ast.Node @@ -325,7 +326,7 @@ func (c *completer) setSurrounding(ident *ast.Ident) { content: ident.Name, cursor: c.pos, // Overwrite the prefix only. - rng: span.NewRange(c.snapshot.FileSet(), ident.Pos(), ident.End()), + rng: span.NewRange(c.tokFile, ident.Pos(), ident.End()), } c.setMatcherFromPrefix(c.surrounding.Prefix()) @@ -347,7 +348,7 @@ func (c *completer) getSurrounding() *Selection { c.surrounding = &Selection{ content: "", cursor: c.pos, - rng: span.NewRange(c.snapshot.FileSet(), c.pos, c.pos), + rng: span.NewRange(c.tokFile, c.pos, c.pos), } } return c.surrounding @@ -486,7 +487,7 @@ func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHan qual := types.RelativeTo(pkg.GetTypes()) objStr = types.ObjectString(obj, qual) } - ans, sel := definition(path, obj, snapshot.FileSet(), fh) + ans, sel := definition(path, obj, pgf.Tok, fh) if ans != nil { sort.Slice(ans, func(i, j int) bool { return ans[i].Score > ans[j].Score @@ -513,6 +514,7 @@ func Completion(ctx context.Context, snapshot source.Snapshot, fh source.FileHan }, fh: fh, filename: fh.URI().Filename(), + tokFile: pgf.Tok, file: pgf.File, path: path, pos: pos, @@ -798,7 +800,7 @@ func (c *completer) populateImportCompletions(ctx context.Context, searchImport c.surrounding = &Selection{ content: content, cursor: c.pos, - rng: span.NewRange(c.snapshot.FileSet(), start, end), + rng: span.NewRange(c.tokFile, start, end), } seenImports := make(map[string]struct{}) @@ -1018,7 +1020,7 @@ func (c *completer) setSurroundingForComment(comments *ast.CommentGroup) { c.surrounding = &Selection{ content: cursorComment.Text[start:end], cursor: c.pos, - rng: span.NewRange(c.snapshot.FileSet(), token.Pos(int(cursorComment.Slash)+start), token.Pos(int(cursorComment.Slash)+end)), + rng: span.NewRange(c.tokFile, token.Pos(int(cursorComment.Slash)+start), token.Pos(int(cursorComment.Slash)+end)), } c.setMatcherFromPrefix(c.surrounding.Prefix()) } diff --git a/internal/lsp/source/completion/definition.go b/internal/lsp/source/completion/definition.go index 44d5a33b2f4..7644fc443d6 100644 --- a/internal/lsp/source/completion/definition.go +++ b/internal/lsp/source/completion/definition.go @@ -23,7 +23,7 @@ import ( // BenchmarkFoo(b *testing.B), FuzzFoo(f *testing.F) // path[0] is known to be *ast.Ident -func definition(path []ast.Node, obj types.Object, fset *token.FileSet, fh source.FileHandle) ([]CompletionItem, *Selection) { +func definition(path []ast.Node, obj types.Object, tokFile *token.File, fh source.FileHandle) ([]CompletionItem, *Selection) { if _, ok := obj.(*types.Func); !ok { return nil, nil // not a function at all } @@ -40,7 +40,7 @@ func definition(path []ast.Node, obj types.Object, fset *token.FileSet, fh sourc sel := &Selection{ content: "", cursor: pos, - rng: span.NewRange(fset, pos, pos), + rng: span.NewRange(tokFile, pos, pos), } var ans []CompletionItem diff --git a/internal/lsp/source/completion/package.go b/internal/lsp/source/completion/package.go index 21244efb5ec..566d8ee2a05 100644 --- a/internal/lsp/source/completion/package.go +++ b/internal/lsp/source/completion/package.go @@ -104,7 +104,7 @@ func packageCompletionSurrounding(fset *token.FileSet, pgf *source.ParsedGoFile, return &Selection{ content: name.Name, cursor: cursor, - rng: span.NewRange(fset, name.Pos(), name.End()), + rng: span.NewRange(tok, name.Pos(), name.End()), }, nil } } @@ -141,7 +141,7 @@ func packageCompletionSurrounding(fset *token.FileSet, pgf *source.ParsedGoFile, return &Selection{ content: content, cursor: cursor, - rng: span.NewRange(fset, start, end), + rng: span.NewRange(tok, start, end), }, nil } } @@ -154,7 +154,7 @@ func packageCompletionSurrounding(fset *token.FileSet, pgf *source.ParsedGoFile, } // If the cursor is in a comment, don't offer any completions. - if cursorInComment(fset, cursor, pgf.Src) { + if cursorInComment(fset.File(cursor), cursor, pgf.Src) { return nil, fmt.Errorf("cursor in comment") } @@ -168,13 +168,13 @@ func packageCompletionSurrounding(fset *token.FileSet, pgf *source.ParsedGoFile, return &Selection{ content: "", cursor: cursor, - rng: span.NewRange(fset, start, end), + rng: span.NewRange(tok, start, end), }, nil } -func cursorInComment(fset *token.FileSet, cursor token.Pos, src []byte) bool { +func cursorInComment(file *token.File, cursor token.Pos, src []byte) bool { var s scanner.Scanner - s.Init(fset.File(cursor), src, func(_ token.Position, _ string) {}, scanner.ScanComments) + s.Init(file, src, func(_ token.Position, _ string) {}, scanner.ScanComments) for { pos, tok, lit := s.Scan() if pos <= cursor && cursor <= token.Pos(int(pos)+len(lit)) { diff --git a/internal/lsp/source/completion/util.go b/internal/lsp/source/completion/util.go index e6d3bfd745f..e0a264bef9e 100644 --- a/internal/lsp/source/completion/util.go +++ b/internal/lsp/source/completion/util.go @@ -311,7 +311,7 @@ func isBasicKind(t types.Type, k types.BasicInfo) bool { } func (c *completer) editText(from, to token.Pos, newText string) ([]protocol.TextEdit, error) { - rng := source.NewMappedRange(c.snapshot.FileSet(), c.mapper, from, to) + rng := source.NewMappedRange(c.tokFile, c.mapper, from, to) spn, err := rng.Span() if err != nil { return nil, err diff --git a/internal/lsp/source/extract.go b/internal/lsp/source/extract.go index 90999d821a6..a4e0a148adb 100644 --- a/internal/lsp/source/extract.go +++ b/internal/lsp/source/extract.go @@ -18,11 +18,13 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/lsp/bug" "golang.org/x/tools/internal/lsp/safetoken" "golang.org/x/tools/internal/span" ) func extractVariable(fset *token.FileSet, rng span.Range, src []byte, file *ast.File, _ *types.Package, info *types.Info) (*analysis.SuggestedFix, error) { + tokFile := fset.File(file.Pos()) expr, path, ok, err := CanExtractVariable(rng, file) if !ok { return nil, fmt.Errorf("extractVariable: cannot extract %s: %v", fset.Position(rng.Start), err) @@ -60,11 +62,7 @@ func extractVariable(fset *token.FileSet, rng span.Range, src []byte, file *ast. if insertBeforeStmt == nil { return nil, fmt.Errorf("cannot find location to insert extraction") } - tok := fset.File(expr.Pos()) - if tok == nil { - return nil, fmt.Errorf("no file for pos %v", fset.Position(file.Pos())) - } - indent, err := calculateIndentation(src, tok, insertBeforeStmt) + indent, err := calculateIndentation(src, tokFile, insertBeforeStmt) if err != nil { return nil, err } @@ -217,7 +215,12 @@ func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file if isMethod { errorPrefix = "extractMethod" } - p, ok, methodOk, err := CanExtractFunction(fset, rng, src, file) + + tok := fset.File(file.Pos()) + if tok == nil { + return nil, bug.Errorf("no file for position") + } + p, ok, methodOk, err := CanExtractFunction(tok, rng, src, file) if (!ok && !isMethod) || (!methodOk && isMethod) { return nil, fmt.Errorf("%s: cannot extract %s: %v", errorPrefix, fset.Position(rng.Start), err) @@ -344,7 +347,7 @@ func extractFunctionMethod(fset *token.FileSet, rng span.Range, src []byte, file if v.obj.Parent() == nil { return nil, fmt.Errorf("parent nil") } - isUsed, firstUseAfter := objUsed(info, span.NewRange(fset, rng.End, v.obj.Parent().End()), v.obj) + isUsed, firstUseAfter := objUsed(info, span.NewRange(tok, rng.End, v.obj.Parent().End()), v.obj) if v.assigned && isUsed && !varOverridden(info, firstUseAfter, v.obj, v.free, outer) { returnTypes = append(returnTypes, &ast.Field{Type: typ}) returns = append(returns, identifier) @@ -941,14 +944,10 @@ type fnExtractParams struct { // CanExtractFunction reports whether the code in the given range can be // extracted to a function. -func CanExtractFunction(fset *token.FileSet, rng span.Range, src []byte, file *ast.File) (*fnExtractParams, bool, bool, error) { +func CanExtractFunction(tok *token.File, rng span.Range, src []byte, file *ast.File) (*fnExtractParams, bool, bool, error) { if rng.Start == rng.End { return nil, false, false, fmt.Errorf("start and end are equal") } - tok := fset.File(file.Pos()) - if tok == nil { - return nil, false, false, fmt.Errorf("no file for pos %v", fset.Position(file.Pos())) - } var err error rng, err = adjustRangeForWhitespace(rng, tok, src) if err != nil { diff --git a/internal/lsp/source/fix.go b/internal/lsp/source/fix.go index 6a7f77dab36..dce279e2016 100644 --- a/internal/lsp/source/fix.go +++ b/internal/lsp/source/fix.go @@ -14,6 +14,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/internal/lsp/analysis/fillstruct" "golang.org/x/tools/internal/lsp/analysis/undeclaredname" + "golang.org/x/tools/internal/lsp/bug" "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/span" ) @@ -84,7 +85,15 @@ func ApplyFix(ctx context.Context, fix string, snapshot Snapshot, fh VersionedFi fset := snapshot.FileSet() editsPerFile := map[span.URI]*protocol.TextDocumentEdit{} for _, edit := range suggestion.TextEdits { - spn, err := span.NewRange(fset, edit.Pos, edit.End).Span() + tokFile := fset.File(edit.Pos) + if tokFile == nil { + return nil, bug.Errorf("no file for edit position") + } + end := edit.End + if !end.IsValid() { + end = edit.Pos + } + spn, err := span.NewRange(tokFile, edit.Pos, end).Span() if err != nil { return nil, err } diff --git a/internal/lsp/source/folding_range.go b/internal/lsp/source/folding_range.go index 576308f9967..b70cb4decd8 100644 --- a/internal/lsp/source/folding_range.go +++ b/internal/lsp/source/folding_range.go @@ -41,13 +41,11 @@ func FoldingRange(ctx context.Context, snapshot Snapshot, fh FileHandle, lineFol return nil, nil } - fset := snapshot.FileSet() - // Get folding ranges for comments separately as they are not walked by ast.Inspect. - ranges = append(ranges, commentsFoldingRange(fset, pgf.Mapper, pgf.File)...) + ranges = append(ranges, commentsFoldingRange(pgf.Tok, pgf.Mapper, pgf.File)...) visit := func(n ast.Node) bool { - rng := foldingRangeFunc(fset, pgf.Mapper, n, lineFoldingOnly) + rng := foldingRangeFunc(pgf.Tok, pgf.Mapper, n, lineFoldingOnly) if rng != nil { ranges = append(ranges, rng) } @@ -66,7 +64,7 @@ func FoldingRange(ctx context.Context, snapshot Snapshot, fh FileHandle, lineFol } // foldingRangeFunc calculates the line folding range for ast.Node n -func foldingRangeFunc(fset *token.FileSet, m *protocol.ColumnMapper, n ast.Node, lineFoldingOnly bool) *FoldingRangeInfo { +func foldingRangeFunc(tokFile *token.File, m *protocol.ColumnMapper, n ast.Node, lineFoldingOnly bool) *FoldingRangeInfo { // TODO(suzmue): include trailing empty lines before the closing // parenthesis/brace. var kind protocol.FoldingRangeKind @@ -78,7 +76,7 @@ func foldingRangeFunc(fset *token.FileSet, m *protocol.ColumnMapper, n ast.Node, if num := len(n.List); num != 0 { startList, endList = n.List[0].Pos(), n.List[num-1].End() } - start, end = validLineFoldingRange(fset, n.Lbrace, n.Rbrace, startList, endList, lineFoldingOnly) + start, end = validLineFoldingRange(tokFile, n.Lbrace, n.Rbrace, startList, endList, lineFoldingOnly) case *ast.CaseClause: // Fold from position of ":" to end. start, end = n.Colon+1, n.End() @@ -94,7 +92,7 @@ func foldingRangeFunc(fset *token.FileSet, m *protocol.ColumnMapper, n ast.Node, if num := len(n.List); num != 0 { startList, endList = n.List[0].Pos(), n.List[num-1].End() } - start, end = validLineFoldingRange(fset, n.Opening, n.Closing, startList, endList, lineFoldingOnly) + start, end = validLineFoldingRange(tokFile, n.Opening, n.Closing, startList, endList, lineFoldingOnly) case *ast.GenDecl: // If this is an import declaration, set the kind to be protocol.Imports. if n.Tok == token.IMPORT { @@ -105,7 +103,7 @@ func foldingRangeFunc(fset *token.FileSet, m *protocol.ColumnMapper, n ast.Node, if num := len(n.Specs); num != 0 { startSpecs, endSpecs = n.Specs[0].Pos(), n.Specs[num-1].End() } - start, end = validLineFoldingRange(fset, n.Lparen, n.Rparen, startSpecs, endSpecs, lineFoldingOnly) + start, end = validLineFoldingRange(tokFile, n.Lparen, n.Rparen, startSpecs, endSpecs, lineFoldingOnly) case *ast.BasicLit: // Fold raw string literals from position of "`" to position of "`". if n.Kind == token.STRING && len(n.Value) >= 2 && n.Value[0] == '`' && n.Value[len(n.Value)-1] == '`' { @@ -117,7 +115,7 @@ func foldingRangeFunc(fset *token.FileSet, m *protocol.ColumnMapper, n ast.Node, if num := len(n.Elts); num != 0 { startElts, endElts = n.Elts[0].Pos(), n.Elts[num-1].End() } - start, end = validLineFoldingRange(fset, n.Lbrace, n.Rbrace, startElts, endElts, lineFoldingOnly) + start, end = validLineFoldingRange(tokFile, n.Lbrace, n.Rbrace, startElts, endElts, lineFoldingOnly) } // Check that folding positions are valid. @@ -125,18 +123,18 @@ func foldingRangeFunc(fset *token.FileSet, m *protocol.ColumnMapper, n ast.Node, return nil } // in line folding mode, do not fold if the start and end lines are the same. - if lineFoldingOnly && fset.Position(start).Line == fset.Position(end).Line { + if lineFoldingOnly && tokFile.Line(start) == tokFile.Line(end) { return nil } return &FoldingRangeInfo{ - MappedRange: NewMappedRange(fset, m, start, end), + MappedRange: NewMappedRange(tokFile, m, start, end), Kind: kind, } } // validLineFoldingRange returns start and end token.Pos for folding range if the range is valid. // returns token.NoPos otherwise, which fails token.IsValid check -func validLineFoldingRange(fset *token.FileSet, open, close, start, end token.Pos, lineFoldingOnly bool) (token.Pos, token.Pos) { +func validLineFoldingRange(tokFile *token.File, open, close, start, end token.Pos, lineFoldingOnly bool) (token.Pos, token.Pos) { if lineFoldingOnly { if !open.IsValid() || !close.IsValid() { return token.NoPos, token.NoPos @@ -146,8 +144,8 @@ func validLineFoldingRange(fset *token.FileSet, open, close, start, end token.Po // as an example, the example below should *not* fold: // var x = [2]string{"d", // "e" } - if fset.Position(open).Line == fset.Position(start).Line || - fset.Position(close).Line == fset.Position(end).Line { + if tokFile.Line(open) == tokFile.Line(start) || + tokFile.Line(close) == tokFile.Line(end) { return token.NoPos, token.NoPos } @@ -159,25 +157,25 @@ func validLineFoldingRange(fset *token.FileSet, open, close, start, end token.Po // commentsFoldingRange returns the folding ranges for all comment blocks in file. // The folding range starts at the end of the first line of the comment block, and ends at the end of the // comment block and has kind protocol.Comment. -func commentsFoldingRange(fset *token.FileSet, m *protocol.ColumnMapper, file *ast.File) (comments []*FoldingRangeInfo) { +func commentsFoldingRange(tokFile *token.File, m *protocol.ColumnMapper, file *ast.File) (comments []*FoldingRangeInfo) { for _, commentGrp := range file.Comments { - startGrp, endGrp := fset.Position(commentGrp.Pos()), fset.Position(commentGrp.End()) - if startGrp.Line == endGrp.Line { + startGrpLine, endGrpLine := tokFile.Line(commentGrp.Pos()), tokFile.Line(commentGrp.End()) + if startGrpLine == endGrpLine { // Don't fold single line comments. continue } firstComment := commentGrp.List[0] startPos, endLinePos := firstComment.Pos(), firstComment.End() - startCmmnt, endCmmnt := fset.Position(startPos), fset.Position(endLinePos) - if startCmmnt.Line != endCmmnt.Line { + startCmmntLine, endCmmntLine := tokFile.Line(startPos), tokFile.Line(endLinePos) + if startCmmntLine != endCmmntLine { // If the first comment spans multiple lines, then we want to have the // folding range start at the end of the first line. endLinePos = token.Pos(int(startPos) + len(strings.Split(firstComment.Text, "\n")[0])) } comments = append(comments, &FoldingRangeInfo{ // Fold from the end of the first line comment to the end of the comment block. - MappedRange: NewMappedRange(fset, m, endLinePos, commentGrp.End()), + MappedRange: NewMappedRange(tokFile, m, endLinePos, commentGrp.End()), Kind: protocol.Comment, }) } diff --git a/internal/lsp/source/identifier.go b/internal/lsp/source/identifier.go index 40655e20779..c87725c4854 100644 --- a/internal/lsp/source/identifier.go +++ b/internal/lsp/source/identifier.go @@ -226,7 +226,7 @@ func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *Pa // The builtin package isn't in the dependency graph, so the usual // utilities won't work here. - rng := NewMappedRange(snapshot.FileSet(), builtin.Mapper, decl.Pos(), decl.Pos()+token.Pos(len(result.Name))) + rng := NewMappedRange(builtin.Tok, builtin.Mapper, decl.Pos(), decl.Pos()+token.Pos(len(result.Name))) result.Declaration.MappedRange = append(result.Declaration.MappedRange, rng) return result, nil } @@ -267,7 +267,7 @@ func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *Pa } name := method.Names[0].Name result.Declaration.node = method - rng := NewMappedRange(snapshot.FileSet(), builtin.Mapper, method.Pos(), method.Pos()+token.Pos(len(name))) + rng := NewMappedRange(builtin.Tok, builtin.Mapper, method.Pos(), method.Pos()+token.Pos(len(name))) result.Declaration.MappedRange = append(result.Declaration.MappedRange, rng) return result, nil } diff --git a/internal/lsp/source/references.go b/internal/lsp/source/references.go index 85bf41a21b0..a3d32a6d717 100644 --- a/internal/lsp/source/references.go +++ b/internal/lsp/source/references.go @@ -48,6 +48,7 @@ func References(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Posit return nil, err } + packageName := pgf.File.Name.Name // from package decl packageNameStart, err := safetoken.Offset(pgf.Tok, pgf.File.Name.Pos()) if err != nil { return nil, err @@ -75,8 +76,8 @@ func References(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Posit for _, imp := range f.File.Imports { if path, err := strconv.Unquote(imp.Path.Value); err == nil && path == renamingPkg.PkgPath() { refs = append(refs, &ReferenceInfo{ - Name: pgf.File.Name.Name, - MappedRange: NewMappedRange(s.FileSet(), f.Mapper, imp.Pos(), imp.End()), + Name: packageName, + MappedRange: NewMappedRange(f.Tok, f.Mapper, imp.Pos(), imp.End()), }) } } @@ -86,8 +87,8 @@ func References(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Posit // Find internal references to the package within the package itself for _, f := range renamingPkg.CompiledGoFiles() { refs = append(refs, &ReferenceInfo{ - Name: pgf.File.Name.Name, - MappedRange: NewMappedRange(s.FileSet(), f.Mapper, f.File.Name.Pos(), f.File.Name.End()), + Name: packageName, + MappedRange: NewMappedRange(f.Tok, f.Mapper, f.File.Name.Pos(), f.File.Name.End()), }) } diff --git a/internal/lsp/source/rename.go b/internal/lsp/source/rename.go index 6312bcb1296..503422aa906 100644 --- a/internal/lsp/source/rename.go +++ b/internal/lsp/source/rename.go @@ -238,15 +238,15 @@ func (r *renamer) update() (map[span.URI][]diff.TextEdit, error) { continue } lines := strings.Split(comment.Text, "\n") - tok := r.fset.File(comment.Pos()) - commentLine := tok.Position(comment.Pos()).Line + tokFile := r.fset.File(comment.Pos()) + commentLine := tokFile.Line(comment.Pos()) for i, line := range lines { lineStart := comment.Pos() if i > 0 { - lineStart = tok.LineStart(commentLine + i) + lineStart = tokFile.LineStart(commentLine + i) } for _, locs := range docRegexp.FindAllIndex([]byte(line), -1) { - rng := span.NewRange(r.fset, lineStart+token.Pos(locs[0]), lineStart+token.Pos(locs[1])) + rng := span.NewRange(tokFile, lineStart+token.Pos(locs[0]), lineStart+token.Pos(locs[1])) spn, err := rng.Span() if err != nil { return nil, err @@ -265,7 +265,7 @@ func (r *renamer) update() (map[span.URI][]diff.TextEdit, error) { // docComment returns the doc for an identifier. func (r *renamer) docComment(pkg Package, id *ast.Ident) *ast.CommentGroup { - _, nodes, _ := pathEnclosingInterval(r.fset, pkg, id.Pos(), id.End()) + _, tokFile, nodes, _ := pathEnclosingInterval(r.fset, pkg, id.Pos(), id.End()) for _, node := range nodes { switch decl := node.(type) { case *ast.FuncDecl: @@ -294,25 +294,14 @@ func (r *renamer) docComment(pkg Package, id *ast.Ident) *ast.CommentGroup { return nil } - var file *ast.File - for _, f := range pkg.GetSyntax() { - if f.Pos() <= id.Pos() && id.Pos() <= f.End() { - file = f - break - } - } - if file == nil { - return nil - } - - identLine := r.fset.Position(id.Pos()).Line - for _, comment := range file.Comments { + identLine := tokFile.Line(id.Pos()) + for _, comment := range nodes[len(nodes)-1].(*ast.File).Comments { if comment.Pos() > id.Pos() { // Comment is after the identifier. continue } - lastCommentLine := r.fset.Position(comment.End()).Line + lastCommentLine := tokFile.Line(comment.End()) if lastCommentLine+1 == identLine { return comment } @@ -328,7 +317,7 @@ func (r *renamer) docComment(pkg Package, id *ast.Ident) *ast.CommentGroup { func (r *renamer) updatePkgName(pkgName *types.PkgName) (*diff.TextEdit, error) { // Modify ImportSpec syntax to add or remove the Name as needed. pkg := r.packages[pkgName.Pkg()] - _, path, _ := pathEnclosingInterval(r.fset, pkg, pkgName.Pos(), pkgName.Pos()) + _, tokFile, path, _ := pathEnclosingInterval(r.fset, pkg, pkgName.Pos(), pkgName.Pos()) if len(path) < 2 { return nil, fmt.Errorf("no path enclosing interval for %s", pkgName.Name()) } @@ -350,7 +339,7 @@ func (r *renamer) updatePkgName(pkgName *types.PkgName) (*diff.TextEdit, error) EndPos: spec.EndPos, } - rng := span.NewRange(r.fset, spec.Pos(), spec.End()) + rng := span.NewRange(tokFile, spec.Pos(), spec.End()) spn, err := rng.Span() if err != nil { return nil, err diff --git a/internal/lsp/source/rename_check.go b/internal/lsp/source/rename_check.go index b17f9b87067..6fb7ddf9ba5 100644 --- a/internal/lsp/source/rename_check.go +++ b/internal/lsp/source/rename_check.go @@ -372,7 +372,7 @@ func (r *renamer) checkStructField(from *types.Var) { if !ok { return } - pkg, path, _ := pathEnclosingInterval(r.fset, fromPkg, from.Pos(), from.Pos()) + pkg, _, path, _ := pathEnclosingInterval(r.fset, fromPkg, from.Pos(), from.Pos()) if pkg == nil || path == nil { return } @@ -821,13 +821,13 @@ func someUse(info *types.Info, obj types.Object) *ast.Ident { return nil } -// pathEnclosingInterval returns the Package and ast.Node that +// pathEnclosingInterval returns the Package, token.File, and ast.Node that // contain source interval [start, end), and all the node's ancestors // up to the AST root. It searches all ast.Files of all packages. // exact is defined as for astutil.PathEnclosingInterval. // // The zero value is returned if not found. -func pathEnclosingInterval(fset *token.FileSet, pkg Package, start, end token.Pos) (resPkg Package, path []ast.Node, exact bool) { +func pathEnclosingInterval(fset *token.FileSet, pkg Package, start, end token.Pos) (resPkg Package, tokFile *token.File, path []ast.Node, exact bool) { pkgs := []Package{pkg} for _, f := range pkg.GetSyntax() { for _, imp := range f.Imports { @@ -840,35 +840,36 @@ func pathEnclosingInterval(fset *token.FileSet, pkg Package, start, end token.Po } importPkg, err := pkg.GetImport(importPath) if err != nil { - return nil, nil, false + return nil, nil, nil, false } pkgs = append(pkgs, importPkg) } } for _, p := range pkgs { for _, f := range p.GetSyntax() { - if f.Pos() == token.NoPos { + if !f.Pos().IsValid() { // This can happen if the parser saw // too many errors and bailed out. // (Use parser.AllErrors to prevent that.) continue } - if !tokenFileContainsPos(fset.File(f.Pos()), start) { + tokFile := fset.File(f.Pos()) + if !tokenFileContainsPos(tokFile, start) { continue } if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil { - return pkg, path, exact + return pkg, tokFile, path, exact } } } - return nil, nil, false + return nil, nil, nil, false } // TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos) func tokenFileContainsPos(tf *token.File, pos token.Pos) bool { p := int(pos) base := tf.Base() - return base <= p && p < base+tf.Size() + return base <= p && p <= base+tf.Size() } func objectKind(obj types.Object) string { diff --git a/internal/lsp/source/util.go b/internal/lsp/source/util.go index b8a7fc9135f..10970389290 100644 --- a/internal/lsp/source/util.go +++ b/internal/lsp/source/util.go @@ -30,26 +30,22 @@ type MappedRange struct { m *protocol.ColumnMapper // a mapper of the edited source (package.GoFiles) } -// NewMappedRange returns a MappedRange for the given start and end token.Pos. +// NewMappedRange returns a MappedRange for the given file and valid start/end token.Pos. // // By convention, start and end are assumed to be positions in the compiled (== // type checked) source, whereas the column mapper m maps positions in the -// user-edited source. Note that these may not be the same, as when using CGo: +// user-edited source. Note that these may not be the same, as when using goyacc or CGo: // CompiledGoFiles contains generated files, whose positions (via // token.File.Position) point to locations in the edited file -- the file // containing `import "C"`. -func NewMappedRange(fset *token.FileSet, m *protocol.ColumnMapper, start, end token.Pos) MappedRange { - if tf := fset.File(start); tf == nil { - bug.Report("nil file", nil) - } else { - mapped := m.TokFile.Name() - adjusted := tf.PositionFor(start, true) // adjusted position - if adjusted.Filename != mapped { - bug.Reportf("mapped file %q does not match start position file %q", mapped, adjusted.Filename) - } +func NewMappedRange(file *token.File, m *protocol.ColumnMapper, start, end token.Pos) MappedRange { + mapped := m.TokFile.Name() + adjusted := file.PositionFor(start, true) // adjusted position + if adjusted.Filename != mapped { + bug.Reportf("mapped file %q does not match start position file %q", mapped, adjusted.Filename) } return MappedRange{ - spanRange: span.NewRange(fset, start, end), + spanRange: span.NewRange(file, start, end), m: m, } } @@ -134,7 +130,10 @@ func nodeToProtocolRange(snapshot Snapshot, pkg Package, n ast.Node) (protocol.R return mrng.Range() } +// objToMappedRange returns the MappedRange for the object's declaring +// identifier (or string literal, for an import). func objToMappedRange(snapshot Snapshot, pkg Package, obj types.Object) (MappedRange, error) { + nameLen := len(obj.Name()) if pkgName, ok := obj.(*types.PkgName); ok { // An imported Go package has a package-local, unqualified name. // When the name matches the imported package name, there is no @@ -147,29 +146,35 @@ func objToMappedRange(snapshot Snapshot, pkg Package, obj types.Object) (MappedR // When the identifier does not appear in the source, have the range // of the object be the import path, including quotes. if pkgName.Imported().Name() == pkgName.Name() { - return posToMappedRange(snapshot, pkg, obj.Pos(), obj.Pos()+token.Pos(len(pkgName.Imported().Path())+2)) + nameLen = len(pkgName.Imported().Path()) + len(`""`) } } - return nameToMappedRange(snapshot, pkg, obj.Pos(), obj.Name()) -} - -func nameToMappedRange(snapshot Snapshot, pkg Package, pos token.Pos, name string) (MappedRange, error) { - return posToMappedRange(snapshot, pkg, pos, pos+token.Pos(len(name))) + return posToMappedRange(snapshot, pkg, obj.Pos(), obj.Pos()+token.Pos(nameLen)) } +// posToMappedRange returns the MappedRange for the given [start, end) span, +// which must be among the transitive dependencies of pkg. func posToMappedRange(snapshot Snapshot, pkg Package, pos, end token.Pos) (MappedRange, error) { - logicalFilename := snapshot.FileSet().File(pos).Position(pos).Filename + tokFile := snapshot.FileSet().File(pos) + // Subtle: it is not safe to simplify this to tokFile.Name + // because, due to //line directives, a Position within a + // token.File may have a different filename than the File itself. + logicalFilename := tokFile.Position(pos).Filename pgf, _, err := findFileInDeps(pkg, span.URIFromPath(logicalFilename)) if err != nil { return MappedRange{}, err } if !pos.IsValid() { - return MappedRange{}, fmt.Errorf("invalid position for %v", pos) + return MappedRange{}, fmt.Errorf("invalid start position") } if !end.IsValid() { - return MappedRange{}, fmt.Errorf("invalid position for %v", end) + return MappedRange{}, fmt.Errorf("invalid end position") } - return NewMappedRange(snapshot.FileSet(), pgf.Mapper, pos, end), nil + // It is fishy that pgf.Mapper (from the parsed Go file) is + // accompanied here not by pgf.Tok but by tokFile from the global + // FileSet, which is a distinct token.File that doesn't + // contain [pos,end). TODO(adonovan): clean this up. + return NewMappedRange(tokFile, pgf.Mapper, pos, end), nil } // Matches cgo generated comment as well as the proposed standard: diff --git a/internal/lsp/source/util_test.go b/internal/lsp/source/util_test.go index 5d4e98f151c..fe505e4d06c 100644 --- a/internal/lsp/source/util_test.go +++ b/internal/lsp/source/util_test.go @@ -41,7 +41,7 @@ const a𐐀b = 42`) start := cf.Pos(bytes.Index(compiled, []byte("a𐐀b"))) end := start + token.Pos(len("a𐐀b")) - mr := NewMappedRange(fset, mapper, start, end) + mr := NewMappedRange(cf, mapper, start, end) gotRange, err := mr.Range() if err != nil { t.Fatal(err) diff --git a/internal/span/token.go b/internal/span/token.go index af01d7b8348..cae696db757 100644 --- a/internal/span/token.go +++ b/internal/span/token.go @@ -12,28 +12,40 @@ import ( ) // Range represents a source code range in token.Pos form. -// It also carries the FileSet that produced the positions, so that it is +// It also carries the token.File that produced the positions, so that it is // self contained. type Range struct { - Start token.Pos - End token.Pos - - // TokFile may be nil if Start or End is invalid. - // TODO: Eventually we should guarantee that it is non-nil. - TokFile *token.File + TokFile *token.File // non-nil + Start, End token.Pos // both IsValid() } -// NewRange creates a new Range from a FileSet and two positions. -// To represent a point pass a 0 as the end pos. -func NewRange(fset *token.FileSet, start, end token.Pos) Range { - tf := fset.File(start) - if tf == nil { - bug.Reportf("nil file") - } +// NewRange creates a new Range from a token.File and two valid positions within it. +// +// (If you only have a token.FileSet, use file = fset.File(start). But +// most callers know exactly which token.File they're dealing with and +// should pass it explicitly. Not only does this save a lookup, but it +// brings us a step closer to eliminating the global FileSet.) +func NewRange(file *token.File, start, end token.Pos) Range { + if file == nil { + panic("nil *token.File") + } + if !start.IsValid() || !end.IsValid() { + panic("invalid start/end token.Pos") + } + + // TODO(adonovan): ideally we would make this stronger assertion: + // + // // Assert that file is non-nil and contains start and end. + // _ = file.Offset(start) + // _ = file.Offset(end) + // + // but some callers (e.g. packageCompletionSurrounding, + // posToMappedRange) don't ensure this precondition. + return Range{ + TokFile: file, Start: start, End: end, - TokFile: tf, } } From 36430f4b355177a2580d8df0ec38bbf98556a14b Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 1 Jul 2022 17:28:48 -0400 Subject: [PATCH 080/136] internal/lsp/cache: use GetHandle not Bind for actions This change uses a persistent.Map for actions, just like packages. Actions are now reference counted rather than generational. Also: - note optimization opportunities. - minor cleanups. Change-Id: Ibbac8848a3beb3fe19056a7b160d2185155e7021 Reviewed-on: https://go-review.googlesource.com/c/tools/+/415504 gopls-CI: kokoro Auto-Submit: Alan Donovan Run-TryBot: Alan Donovan Reviewed-by: Robert Findley TryBot-Result: Gopher Robot --- internal/lsp/cache/analysis.go | 30 ++++++++++++------- internal/lsp/cache/maps.go | 24 +++++++++++---- internal/lsp/cache/session.go | 3 +- internal/lsp/cache/snapshot.go | 54 ++++++++++++++++++++++------------ 4 files changed, 75 insertions(+), 36 deletions(-) diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go index db2ca2a8b09..4b437858ef3 100644 --- a/internal/lsp/cache/analysis.go +++ b/internal/lsp/cache/analysis.go @@ -85,12 +85,21 @@ type packageFactKey struct { } func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.Analyzer) (*actionHandle, error) { + // TODO(adonovan): opt: this block of code sequentially loads a package + // (and all its dependencies), then sequentially creates action handles + // for the direct dependencies (whose packages have by then been loaded + // as a consequence of ph.check) which does a sequential recursion + // down the action graph. Only once all that work is complete do we + // put a handle in the cache. As with buildPackageHandle, this does + // not exploit the natural parallelism in the problem, and the naive + // use of concurrency would lead to an exponential amount of duplicated + // work. We should instead use an atomically updated future cache + // and a parallel graph traversal. ph, err := s.buildPackageHandle(ctx, id, source.ParseFull) if err != nil { return nil, err } - act := s.getActionHandle(id, ph.mode, a) - if act != nil { + if act := s.getActionHandle(id, ph.mode, a); act != nil { return act, nil } if len(ph.key) == 0 { @@ -100,12 +109,9 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A if err != nil { return nil, err } - act = &actionHandle{ - analyzer: a, - pkg: pkg, - } + + // Add a dependency on each required analyzer. var deps []*actionHandle - // Add a dependency on each required analyzers. for _, req := range a.Requires { reqActionHandle, err := s.actionHandle(ctx, id, req) if err != nil { @@ -131,7 +137,7 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A } } - h := s.generation.Bind(buildActionKey(a, ph), func(ctx context.Context, arg memoize.Arg) interface{} { + handle, release := s.generation.GetHandle(buildActionKey(a, ph), func(ctx context.Context, arg memoize.Arg) interface{} { snapshot := arg.(*snapshot) // Analyze dependencies first. results, err := execAll(ctx, snapshot, deps) @@ -142,9 +148,13 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A } return runAnalysis(ctx, snapshot, a, pkg, results) }) - act.handle = h - act = s.addActionHandle(act) + act := &actionHandle{ + analyzer: a, + pkg: pkg, + handle: handle, + } + act = s.addActionHandle(act, release) return act, nil } diff --git a/internal/lsp/cache/maps.go b/internal/lsp/cache/maps.go index af041777188..f8e03057cfd 100644 --- a/internal/lsp/cache/maps.go +++ b/internal/lsp/cache/maps.go @@ -197,16 +197,18 @@ type packagesMap struct { func newPackagesMap() packagesMap { return packagesMap{ impl: persistent.NewMap(func(a, b interface{}) bool { - left := a.(packageKey) - right := b.(packageKey) - if left.mode != right.mode { - return left.mode < right.mode - } - return left.id < right.id + return packageKeyLess(a.(packageKey), b.(packageKey)) }), } } +func packageKeyLess(x, y packageKey) bool { + if x.mode != y.mode { + return x.mode < y.mode + } + return x.id < y.id +} + func (m packagesMap) Clone() packagesMap { return packagesMap{ impl: m.impl.Clone(), @@ -285,3 +287,13 @@ func (s knownDirsSet) Insert(key span.URI) { func (s knownDirsSet) Remove(key span.URI) { s.impl.Delete(key) } + +// actionKeyLessInterface is the less-than relation for actionKey +// values wrapped in an interface. +func actionKeyLessInterface(a, b interface{}) bool { + x, y := a.(actionKey), b.(actionKey) + if x.analyzer.Name != y.analyzer.Name { + return x.analyzer.Name < y.analyzer.Name + } + return packageKeyLess(x.pkg, y.pkg) +} diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index aca46dd2a88..98d3c250433 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -16,6 +16,7 @@ import ( "golang.org/x/tools/internal/imports" "golang.org/x/tools/internal/lsp/progress" "golang.org/x/tools/internal/lsp/source" + "golang.org/x/tools/internal/persistent" "golang.org/x/tools/internal/span" "golang.org/x/tools/internal/xcontext" ) @@ -238,7 +239,7 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, goFiles: newGoFilesMap(), parseKeysByURI: newParseKeysByURIMap(), symbols: make(map[span.URI]*symbolHandle), - actions: make(map[actionKey]*actionHandle), + actions: persistent.NewMap(actionKeyLessInterface), workspacePackages: make(map[PackageID]PackagePath), unloadableFiles: make(map[span.URI]struct{}), parseModHandles: make(map[span.URI]*parseModHandle), diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 36dcafeaca6..93316653af4 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -36,6 +36,7 @@ import ( "golang.org/x/tools/internal/lsp/source" "golang.org/x/tools/internal/memoize" "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/persistent" "golang.org/x/tools/internal/span" "golang.org/x/tools/internal/typesinternal" ) @@ -87,7 +88,7 @@ type snapshot struct { // TODO(rfindley): consider merging this with files to reduce burden on clone. symbols map[span.URI]*symbolHandle - // packages maps a packageKey to a set of packageHandles to which that file belongs. + // packages maps a packageKey to a *packageHandle. // It may be invalidated when a file's content changes. packages packagesMap @@ -95,8 +96,9 @@ type snapshot struct { // It may be invalidated when metadata changes or a new file is opened or closed. isActivePackageCache isActivePackageCacheMap - // actions maps an actionkey to its actionHandle. - actions map[actionKey]*actionHandle + // actions maps an actionKey to the handle for the future + // result of execution an analysis pass on a package. + actions *persistent.Map // from actionKey to *actionHandle // workspacePackages contains the workspace's packages, which are loaded // when the view is created. @@ -149,6 +151,7 @@ func (s *snapshot) Destroy(destroyedBy string) { s.generation.Destroy(destroyedBy) s.packages.Destroy() s.isActivePackageCache.Destroy() + s.actions.Destroy() s.files.Destroy() s.goFiles.Destroy() s.parseKeysByURI.Destroy() @@ -1177,9 +1180,6 @@ func (s *snapshot) addSymbolHandle(uri span.URI, sh *symbolHandle) *symbolHandle } func (s *snapshot) getActionHandle(id PackageID, m source.ParseMode, a *analysis.Analyzer) *actionHandle { - s.mu.Lock() - defer s.mu.Unlock() - key := actionKey{ pkg: packageKey{ id: id, @@ -1187,13 +1187,18 @@ func (s *snapshot) getActionHandle(id PackageID, m source.ParseMode, a *analysis }, analyzer: a, } - return s.actions[key] -} -func (s *snapshot) addActionHandle(ah *actionHandle) *actionHandle { s.mu.Lock() defer s.mu.Unlock() + ah, ok := s.actions.Get(key) + if !ok { + return nil + } + return ah.(*actionHandle) +} + +func (s *snapshot) addActionHandle(ah *actionHandle, release func()) *actionHandle { key := actionKey{ analyzer: ah.analyzer, pkg: packageKey{ @@ -1201,10 +1206,17 @@ func (s *snapshot) addActionHandle(ah *actionHandle) *actionHandle { mode: ah.pkg.mode, }, } - if ah, ok := s.actions[key]; ok { - return ah + + s.mu.Lock() + defer s.mu.Unlock() + + // If another thread since cached a different handle, + // return it instead of overriding it. + if result, ok := s.actions.Get(key); ok { + release() + return result.(*actionHandle) } - s.actions[key] = ah + s.actions.Set(key, ah, func(_, _ interface{}) { release() }) return ah } @@ -1716,7 +1728,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC initializedErr: s.initializedErr, packages: s.packages.Clone(), isActivePackageCache: s.isActivePackageCache.Clone(), - actions: make(map[actionKey]*actionHandle, len(s.actions)), + actions: s.actions.Clone(), files: s.files.Clone(), goFiles: s.goFiles.Clone(), parseKeysByURI: s.parseKeysByURI.Clone(), @@ -1920,13 +1932,17 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } } - // Copy the package analysis information. - for k, v := range s.actions { - if _, ok := idsToInvalidate[k.pkg.id]; ok { - continue + // Copy actions. + // TODO(adonovan): opt: avoid iteration over s.actions. + var actionsToDelete []actionKey + s.actions.Range(func(k, _ interface{}) { + key := k.(actionKey) + if _, ok := idsToInvalidate[key.pkg.id]; ok { + actionsToDelete = append(actionsToDelete, key) } - newGen.Inherit(v.handle) - result.actions[k] = v + }) + for _, key := range actionsToDelete { + result.actions.Delete(key) } // If the workspace mode has changed, we must delete all metadata, as it From 8184d1ff7a52751ae937e76f2fd00333ed193799 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 4 Jul 2022 10:00:43 -0400 Subject: [PATCH 081/136] internal/lsp/cache: use GetHandle not Bind in astCacheData This change replaces Bind (generational lifetime) with GetHandle (reference counting) for the cache of buildASTCache calls, as Bind is deprecated. Also: - add missing commentary, particularly on the question of why this cache is needed at all. - remove unused field astCacheData.err - simplify SignatureHelp to avoid unnecessary use of Declaration. - minor simplifications to logic surrounding FindPackageFromPos and PosTo{Decl,Field}. Change-Id: I2b7a798b84f23856037797fa6e9ccc5595422e7c Reviewed-on: https://go-review.googlesource.com/c/tools/+/415975 Reviewed-by: Robert Findley Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot Auto-Submit: Alan Donovan gopls-CI: kokoro --- internal/lsp/cache/graph.go | 16 +++++------ internal/lsp/cache/parse.go | 39 +++++++++++++++++++++------ internal/lsp/source/signature_help.go | 11 +------- internal/lsp/source/types_format.go | 17 +++--------- internal/lsp/source/util.go | 12 ++++----- internal/lsp/source/view.go | 8 ++++-- 6 files changed, 55 insertions(+), 48 deletions(-) diff --git a/internal/lsp/cache/graph.go b/internal/lsp/cache/graph.go index 88c9f147195..c1beff82866 100644 --- a/internal/lsp/cache/graph.go +++ b/internal/lsp/cache/graph.go @@ -24,8 +24,8 @@ type metadataGraph struct { // importedBy maps package IDs to the list of packages that import them. importedBy map[PackageID][]PackageID - // ids maps file URIs to package IDs. A single file may belong to multiple - // packages due to tests packages. + // ids maps file URIs to package IDs, sorted by (!valid, cli, packageID). + // A single file may belong to multiple packages due to tests packages. ids map[span.URI][]PackageID } @@ -89,21 +89,21 @@ func (g *metadataGraph) build() { // 4: an invalid command-line-arguments package for uri, ids := range g.ids { sort.Slice(ids, func(i, j int) bool { - // Sort valid packages first. + // 1. valid packages appear earlier. validi := g.metadata[ids[i]].Valid validj := g.metadata[ids[j]].Valid if validi != validj { return validi } + // 2. command-line-args packages appear later. cli := source.IsCommandLineArguments(string(ids[i])) clj := source.IsCommandLineArguments(string(ids[j])) - if cli && !clj { - return false - } - if !cli && clj { - return true + if cli != clj { + return clj } + + // 3. packages appear in name order. return ids[i] < ids[j] }) diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go index c3eae2f7643..712f26ad715 100644 --- a/internal/lsp/cache/parse.go +++ b/internal/lsp/cache/parse.go @@ -128,19 +128,37 @@ func (s *snapshot) astCacheData(ctx context.Context, spkg source.Package, pos to if err != nil { return nil, err } - astHandle := s.generation.Bind(astCacheKey{pkgHandle.key, pgf.URI}, func(ctx context.Context, arg memoize.Arg) interface{} { + + // TODO(adonovan): opt: is it necessary to cache this operation? + // + // I expect the main benefit of CL 221021, which introduced it, + // was the replacement of PathEnclosingInterval, whose + // traversal is allocation-intensive, by buildASTCache. + // + // When run on the largest file in k8s, buildASTCache took + // ~6ms, but I expect most of that cost could be eliminated by + // using a stripped-down version of PathEnclosingInterval that + // cares only about syntax trees and not tokens. A stateless + // utility function that is cheap enough to call for each Pos + // would be a nice simplification. + // + // (The basic approach would be to use ast.Inspect, compare + // each node with the search Pos, and bail out as soon + // as a match is found. The pre-order hook would return false + // to avoid descending into any tree whose End is before + // the search Pos.) + // + // A representative benchmark would help. + astHandle, release := s.generation.GetHandle(astCacheKey{pkgHandle.key, pgf.URI}, func(ctx context.Context, arg memoize.Arg) interface{} { return buildASTCache(pgf) }) + defer release() d, err := astHandle.Get(ctx, s.generation, s) if err != nil { return nil, err } - data := d.(*astCacheData) - if data.err != nil { - return nil, data.err - } - return data, nil + return d.(*astCacheData), nil } func (s *snapshot) PosToDecl(ctx context.Context, spkg source.Package, pos token.Pos) (ast.Decl, error) { @@ -159,10 +177,15 @@ func (s *snapshot) PosToField(ctx context.Context, spkg source.Package, pos toke return data.posToField[pos], nil } +// An astCacheData maps object positions to syntax nodes for a single Go file. type astCacheData struct { - err error + // Maps the position of each name declared by a func/var/const/type + // Decl to the Decl node. Also maps the name and type of each field + // (broadly defined) to its innermost enclosing Decl. + posToDecl map[token.Pos]ast.Decl - posToDecl map[token.Pos]ast.Decl + // Maps the position of the Name and Type of each field + // (broadly defined) to the Field node. posToField map[token.Pos]*ast.Field } diff --git a/internal/lsp/source/signature_help.go b/internal/lsp/source/signature_help.go index 813f67e7b3b..12e359008fe 100644 --- a/internal/lsp/source/signature_help.go +++ b/internal/lsp/source/signature_help.go @@ -102,16 +102,7 @@ FindCall: if err != nil { return nil, 0, err } - rng, err := objToMappedRange(snapshot, pkg, obj) - if err != nil { - return nil, 0, err - } - decl := Declaration{ - obj: obj, - node: node, - } - decl.MappedRange = append(decl.MappedRange, rng) - d, err := FindHoverContext(ctx, snapshot, pkg, decl.obj, decl.node, nil) + d, err := FindHoverContext(ctx, snapshot, pkg, obj, node, nil) if err != nil { return nil, 0, err } diff --git a/internal/lsp/source/types_format.go b/internal/lsp/source/types_format.go index 93344e08678..5e10a509005 100644 --- a/internal/lsp/source/types_format.go +++ b/internal/lsp/source/types_format.go @@ -259,10 +259,11 @@ func FormatVarType(ctx context.Context, snapshot Snapshot, srcpkg Package, obj * return types.TypeString(obj.Type(), qf) } - expr, err := varType(ctx, snapshot, pkg, obj) - if err != nil { + field, err := snapshot.PosToField(ctx, pkg, obj.Pos()) + if err != nil || field == nil { return types.TypeString(obj.Type(), qf) } + expr := field.Type // If the given expr refers to a type parameter, then use the // object's Type instead of the type parameter declaration. This helps @@ -286,18 +287,6 @@ func FormatVarType(ctx context.Context, snapshot Snapshot, srcpkg Package, obj * return fmted } -// varType returns the type expression for a *types.Var. -func varType(ctx context.Context, snapshot Snapshot, pkg Package, obj *types.Var) (ast.Expr, error) { - field, err := snapshot.PosToField(ctx, pkg, obj.Pos()) - if err != nil { - return nil, err - } - if field == nil { - return nil, fmt.Errorf("no declaration for object %s", obj.Name()) - } - return field.Type, nil -} - // qualifyExpr applies the "pkgName." prefix to any *ast.Ident in the expr. func qualifyExpr(expr ast.Expr, srcpkg, pkg Package, clonedInfo map[token.Pos]*types.PkgName, qf types.Qualifier) ast.Expr { ast.Inspect(expr, func(n ast.Node) bool { diff --git a/internal/lsp/source/util.go b/internal/lsp/source/util.go index 10970389290..8d205ee6cee 100644 --- a/internal/lsp/source/util.go +++ b/internal/lsp/source/util.go @@ -311,15 +311,15 @@ func FindPackageFromPos(ctx context.Context, snapshot Snapshot, pos token.Pos) ( for _, pkg := range pkgs { parsed, err := pkg.File(uri) if err != nil { + // TODO(adonovan): should this be a bug.Report or log.Fatal? + // The logic in Identifier seems to think so. + // Should it be a postcondition of PackagesForFile? + // And perhaps PackagesForFile should return the PGFs too. return nil, err } - if parsed == nil { - continue - } - if parsed.Tok.Base() != tok.Base() { - continue + if parsed != nil && parsed.Tok.Base() == tok.Base() { + return pkg, nil } - return pkg, nil } return nil, fmt.Errorf("no package for given file position") } diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go index 98d11517d87..c8656153bd3 100644 --- a/internal/lsp/source/view.go +++ b/internal/lsp/source/view.go @@ -83,11 +83,15 @@ type Snapshot interface { // to quickly find corresponding *ast.Field node given a *types.Var. // We must refer to the AST to render type aliases properly when // formatting signatures and other types. + // May return (nil, nil) if the file didn't declare an object at that position. + // TODO(adonovan): seems like a bug? PosToField(ctx context.Context, pkg Package, pos token.Pos) (*ast.Field, error) // PosToDecl maps certain objects' positions to their surrounding // ast.Decl. This mapping is used when building the documentation // string for the objects. + // May return (nil, nil) if the file didn't declare an object at that position. + // TODO(adonovan): seems like a bug? PosToDecl(ctx context.Context, pkg Package, pos token.Pos) (ast.Decl, error) // DiagnosePackage returns basic diagnostics, including list, parse, and type errors @@ -147,8 +151,8 @@ type Snapshot interface { // IsBuiltin reports whether uri is part of the builtin package. IsBuiltin(ctx context.Context, uri span.URI) bool - // PackagesForFile returns the packages that this file belongs to, checked - // in mode. + // PackagesForFile returns an unordered list of packages that contain + // the file denoted by uri, type checked in the specified mode. PackagesForFile(ctx context.Context, uri span.URI, mode TypecheckMode, includeTestVariants bool) ([]Package, error) // PackageForFile returns a single package that this file belongs to, From 2aef121b8361efd5b8d56dd25a1ec046c50a4e01 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 2 Jun 2022 13:33:11 -0400 Subject: [PATCH 082/136] internal/lsp: consolidate .go/go.mod link logic Now that we have a token.File (albeit throwaway) for a parsed go.mod file, we can combine the .go and go.mod logic for turning it into a protocol.DocumentLink. Change-Id: Id1783644cbd450f0e8dc807beb8ba625675d8540 Reviewed-on: https://go-review.googlesource.com/c/tools/+/410136 Run-TryBot: Alan Donovan Reviewed-by: Robert Findley Auto-Submit: Alan Donovan TryBot-Result: Gopher Robot gopls-CI: kokoro --- internal/lsp/link.go | 66 +++++++++++++++----------------------------- 1 file changed, 22 insertions(+), 44 deletions(-) diff --git a/internal/lsp/link.go b/internal/lsp/link.go index a2962b6659a..65da8a54c31 100644 --- a/internal/lsp/link.go +++ b/internal/lsp/link.go @@ -49,6 +49,8 @@ func modLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandl if err != nil { return nil, err } + tokFile := pm.Mapper.TokFile + var links []protocol.DocumentLink for _, req := range pm.File.Require { if req.Syntax == nil { @@ -66,9 +68,9 @@ func modLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandl } // Shift the start position to the location of the // dependency within the require statement. - start, end := token.Pos(s+i), token.Pos(s+i+len(dep)) + start, end := tokFile.Pos(s+i), tokFile.Pos(s+i+len(dep)) target := source.BuildLink(snapshot.View().Options().LinkTarget, "mod/"+req.Mod.String(), "") - l, err := toProtocolLink(nil, pm.Mapper, target, start, end, source.Mod) + l, err := toProtocolLink(tokFile, pm.Mapper, target, start, end) if err != nil { return nil, err } @@ -79,9 +81,6 @@ func modLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandl return links, nil } - // Create a throwaway token.File. - tokFile := token.NewFileSet().AddFile(fh.URI().Filename(), -1, len(pm.Mapper.Content)) - // Get all the links that are contained in the comments of the file. for _, expr := range pm.File.Syntax.Stmt { comments := expr.Comment() @@ -91,7 +90,7 @@ func modLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandl for _, section := range [][]modfile.Comment{comments.Before, comments.Suffix, comments.After} { for _, comment := range section { start := tokFile.Pos(comment.Start.Byte) - l, err := findLinksInString(ctx, snapshot, comment.Token, start, tokFile, pm.Mapper, source.Mod) + l, err := findLinksInString(ctx, snapshot, comment.Token, start, tokFile, pm.Mapper) if err != nil { return nil, err } @@ -148,8 +147,8 @@ func goLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle // Account for the quotation marks in the positions. start := imp.Path.Pos() + 1 end := imp.Path.End() - 1 - target = source.BuildLink(view.Options().LinkTarget, target, "") - l, err := toProtocolLink(pgf.Tok, pgf.Mapper, target, start, end, source.Go) + targetURL := source.BuildLink(view.Options().LinkTarget, target, "") + l, err := toProtocolLink(pgf.Tok, pgf.Mapper, targetURL, start, end) if err != nil { return nil, err } @@ -157,7 +156,7 @@ func goLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle } } for _, s := range str { - l, err := findLinksInString(ctx, snapshot, s.Value, s.Pos(), pgf.Tok, pgf.Mapper, source.Go) + l, err := findLinksInString(ctx, snapshot, s.Value, s.Pos(), pgf.Tok, pgf.Mapper) if err != nil { return nil, err } @@ -165,7 +164,7 @@ func goLinks(ctx context.Context, snapshot source.Snapshot, fh source.FileHandle } for _, commentGroup := range pgf.File.Comments { for _, comment := range commentGroup.List { - l, err := findLinksInString(ctx, snapshot, comment.Text, comment.Pos(), pgf.Tok, pgf.Mapper, source.Go) + l, err := findLinksInString(ctx, snapshot, comment.Text, comment.Pos(), pgf.Tok, pgf.Mapper) if err != nil { return nil, err } @@ -199,7 +198,7 @@ var acceptedSchemes = map[string]bool{ } // tokFile may be a throwaway File for non-Go files. -func findLinksInString(ctx context.Context, snapshot source.Snapshot, src string, pos token.Pos, tokFile *token.File, m *protocol.ColumnMapper, fileKind source.FileKind) ([]protocol.DocumentLink, error) { +func findLinksInString(ctx context.Context, snapshot source.Snapshot, src string, pos token.Pos, tokFile *token.File, m *protocol.ColumnMapper) ([]protocol.DocumentLink, error) { var links []protocol.DocumentLink for _, index := range snapshot.View().Options().URLRegexp.FindAllIndex([]byte(src), -1) { start, end := index[0], index[1] @@ -222,7 +221,7 @@ func findLinksInString(ctx context.Context, snapshot source.Snapshot, src string if !acceptedSchemes[linkURL.Scheme] { continue } - l, err := toProtocolLink(tokFile, m, linkURL.String(), startPos, endPos, fileKind) + l, err := toProtocolLink(tokFile, m, linkURL.String(), startPos, endPos) if err != nil { return nil, err } @@ -239,8 +238,8 @@ func findLinksInString(ctx context.Context, snapshot source.Snapshot, src string continue } org, repo, number := matches[1], matches[2], matches[3] - target := fmt.Sprintf("https://github.com/%s/%s/issues/%s", org, repo, number) - l, err := toProtocolLink(tokFile, m, target, startPos, endPos, fileKind) + targetURL := fmt.Sprintf("https://github.com/%s/%s/issues/%s", org, repo, number) + l, err := toProtocolLink(tokFile, m, targetURL, startPos, endPos) if err != nil { return nil, err } @@ -261,38 +260,17 @@ var ( issueRegexp *regexp.Regexp ) -func toProtocolLink(tokFile *token.File, m *protocol.ColumnMapper, target string, start, end token.Pos, fileKind source.FileKind) (protocol.DocumentLink, error) { - var rng protocol.Range - switch fileKind { - case source.Go: - // TODO(adonovan): can we now use this logic for the Mod case too? - spn, err := span.NewRange(tokFile, start, end).Span() - if err != nil { - return protocol.DocumentLink{}, err - } - rng, err = m.Range(spn) - if err != nil { - return protocol.DocumentLink{}, err - } - case source.Mod: - s, e := int(start), int(end) - line, col, err := span.ToPosition(m.TokFile, s) - if err != nil { - return protocol.DocumentLink{}, err - } - start := span.NewPoint(line, col, s) - line, col, err = span.ToPosition(m.TokFile, e) - if err != nil { - return protocol.DocumentLink{}, err - } - end := span.NewPoint(line, col, e) - rng, err = m.Range(span.New(m.URI, start, end)) - if err != nil { - return protocol.DocumentLink{}, err - } +func toProtocolLink(tokFile *token.File, m *protocol.ColumnMapper, targetURL string, start, end token.Pos) (protocol.DocumentLink, error) { + spn, err := span.NewRange(tokFile, start, end).Span() + if err != nil { + return protocol.DocumentLink{}, err + } + rng, err := m.Range(spn) + if err != nil { + return protocol.DocumentLink{}, err } return protocol.DocumentLink{ Range: rng, - Target: target, + Target: targetURL, }, nil } From 1dfab61a4877c8b77d3b89afe7b36b74d3dba889 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 4 Jul 2022 13:22:21 -0400 Subject: [PATCH 083/136] internal/lsp/cache: use GetHandle not Bind for 5 URI-keyed maps This change replaces the 5 remaining calls to Bind (generational lifetime) with GetHandle (reference counting). The handles are now stored in persistent.Maps, which simplifies the invalidation logic. All 5 have span.URIs as keys: symbolizeHandles parse{Mod,Work}Handles mod{Tidy,Why}Handles Also, factor the functions that use these maps to have a common form: - a fooImpl function that returns an R result and an error; - a foo wrapper that decorates it with caching. - a local fooResult type, defined struct{R; error} that is the cache entry. The functions for getting/setting map entries are all inlined. The fooHandle types are all replaced by *memoize.Handle, now that their use is local. No behavior change is intended. The other uses of Bind are deleted in these CLs: https://go-review.googlesource.com/c/tools/+/415975 (astCacheData) https://go-review.googlesource.com/c/tools/+/415504 (actions) Change-Id: I77cc4e828936fe171152ca13a12f7a639299e9e5 Reviewed-on: https://go-review.googlesource.com/c/tools/+/415976 Auto-Submit: Alan Donovan TryBot-Result: Gopher Robot Run-TryBot: Alan Donovan gopls-CI: kokoro Reviewed-by: Robert Findley --- internal/lsp/cache/maps.go | 13 +- internal/lsp/cache/mod.go | 408 ++++++++++++++++----------------- internal/lsp/cache/mod_tidy.go | 214 +++++++++-------- internal/lsp/cache/session.go | 10 +- internal/lsp/cache/snapshot.go | 149 +++--------- internal/lsp/cache/symbols.go | 59 ++--- internal/persistent/map.go | 9 +- 7 files changed, 405 insertions(+), 457 deletions(-) diff --git a/internal/lsp/cache/maps.go b/internal/lsp/cache/maps.go index f8e03057cfd..1ec34151540 100644 --- a/internal/lsp/cache/maps.go +++ b/internal/lsp/cache/maps.go @@ -16,11 +16,14 @@ type filesMap struct { impl *persistent.Map } +// uriLessInterface is the < relation for "any" values containing span.URIs. +func uriLessInterface(a, b interface{}) bool { + return a.(span.URI) < b.(span.URI) +} + func newFilesMap() filesMap { return filesMap{ - impl: persistent.NewMap(func(a, b interface{}) bool { - return a.(span.URI) < b.(span.URI) - }), + impl: persistent.NewMap(uriLessInterface), } } @@ -152,9 +155,7 @@ type parseKeysByURIMap struct { func newParseKeysByURIMap() parseKeysByURIMap { return parseKeysByURIMap{ - impl: persistent.NewMap(func(a, b interface{}) bool { - return a.(span.URI) < b.(span.URI) - }), + impl: persistent.NewMap(uriLessInterface), } } diff --git a/internal/lsp/cache/mod.go b/internal/lsp/cache/mod.go index 843919d7b36..1963feea5ac 100644 --- a/internal/lsp/cache/mod.go +++ b/internal/lsp/cache/mod.go @@ -24,152 +24,156 @@ import ( "golang.org/x/tools/internal/span" ) -type parseModHandle struct { - handle *memoize.Handle -} +// ParseMod parses a go.mod file, using a cache. It may return partial results and an error. +func (s *snapshot) ParseMod(ctx context.Context, fh source.FileHandle) (*source.ParsedModule, error) { + uri := fh.URI() -type parseModData struct { - parsed *source.ParsedModule + s.mu.Lock() + entry, hit := s.parseModHandles.Get(uri) + s.mu.Unlock() - // err is any error encountered while parsing the file. - err error -} + type parseModResult struct { + parsed *source.ParsedModule + err error + } -func (mh *parseModHandle) parse(ctx context.Context, snapshot *snapshot) (*source.ParsedModule, error) { - v, err := mh.handle.Get(ctx, snapshot.generation, snapshot) + // cache miss? + if !hit { + handle, release := s.generation.GetHandle(fh.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} { + parsed, err := parseModImpl(ctx, fh) + return parseModResult{parsed, err} + }) + + entry = handle + s.mu.Lock() + s.parseModHandles.Set(uri, entry, func(_, _ interface{}) { release() }) + s.mu.Unlock() + } + + // Await result. + v, err := entry.(*memoize.Handle).Get(ctx, s.generation, s) if err != nil { return nil, err } - data := v.(*parseModData) - return data.parsed, data.err + res := v.(parseModResult) + return res.parsed, res.err } -func (s *snapshot) ParseMod(ctx context.Context, modFH source.FileHandle) (*source.ParsedModule, error) { - if handle := s.getParseModHandle(modFH.URI()); handle != nil { - return handle.parse(ctx, s) - } - h := s.generation.Bind(modFH.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} { - _, done := event.Start(ctx, "cache.ParseModHandle", tag.URI.Of(modFH.URI())) - defer done() +// parseModImpl parses the go.mod file whose name and contents are in fh. +// It may return partial results and an error. +func parseModImpl(ctx context.Context, fh source.FileHandle) (*source.ParsedModule, error) { + _, done := event.Start(ctx, "cache.ParseMod", tag.URI.Of(fh.URI())) + defer done() - contents, err := modFH.Read() - if err != nil { - return &parseModData{err: err} - } - m := protocol.NewColumnMapper(modFH.URI(), contents) - file, parseErr := modfile.Parse(modFH.URI().Filename(), contents, nil) - // Attempt to convert the error to a standardized parse error. - var parseErrors []*source.Diagnostic - if parseErr != nil { - mfErrList, ok := parseErr.(modfile.ErrorList) - if !ok { - return &parseModData{err: fmt.Errorf("unexpected parse error type %v", parseErr)} - } - for _, mfErr := range mfErrList { - rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos) - if err != nil { - return &parseModData{err: err} - } - parseErrors = append(parseErrors, &source.Diagnostic{ - URI: modFH.URI(), - Range: rng, - Severity: protocol.SeverityError, - Source: source.ParseError, - Message: mfErr.Err.Error(), - }) + contents, err := fh.Read() + if err != nil { + return nil, err + } + m := protocol.NewColumnMapper(fh.URI(), contents) + file, parseErr := modfile.Parse(fh.URI().Filename(), contents, nil) + // Attempt to convert the error to a standardized parse error. + var parseErrors []*source.Diagnostic + if parseErr != nil { + mfErrList, ok := parseErr.(modfile.ErrorList) + if !ok { + return nil, fmt.Errorf("unexpected parse error type %v", parseErr) + } + for _, mfErr := range mfErrList { + rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos) + if err != nil { + return nil, err } + parseErrors = append(parseErrors, &source.Diagnostic{ + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityError, + Source: source.ParseError, + Message: mfErr.Err.Error(), + }) } - return &parseModData{ - parsed: &source.ParsedModule{ - URI: modFH.URI(), - Mapper: m, - File: file, - ParseErrors: parseErrors, - }, - err: parseErr, - } - }) + } + return &source.ParsedModule{ + URI: fh.URI(), + Mapper: m, + File: file, + ParseErrors: parseErrors, + }, parseErr +} + +// ParseWork parses a go.work file, using a cache. It may return partial results and an error. +// TODO(adonovan): move to new work.go file. +func (s *snapshot) ParseWork(ctx context.Context, fh source.FileHandle) (*source.ParsedWorkFile, error) { + uri := fh.URI() - pmh := &parseModHandle{handle: h} s.mu.Lock() - s.parseModHandles[modFH.URI()] = pmh + entry, hit := s.parseWorkHandles.Get(uri) s.mu.Unlock() - return pmh.parse(ctx, s) -} - -type parseWorkHandle struct { - handle *memoize.Handle -} + type parseWorkResult struct { + parsed *source.ParsedWorkFile + err error + } -type parseWorkData struct { - parsed *source.ParsedWorkFile + // cache miss? + if !hit { + handle, release := s.generation.GetHandle(fh.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} { + parsed, err := parseWorkImpl(ctx, fh) + return parseWorkResult{parsed, err} + }) - // err is any error encountered while parsing the file. - err error -} + entry = handle + s.mu.Lock() + s.parseWorkHandles.Set(uri, entry, func(_, _ interface{}) { release() }) + s.mu.Unlock() + } -func (mh *parseWorkHandle) parse(ctx context.Context, snapshot *snapshot) (*source.ParsedWorkFile, error) { - v, err := mh.handle.Get(ctx, snapshot.generation, snapshot) + // Await result. + v, err := entry.(*memoize.Handle).Get(ctx, s.generation, s) if err != nil { return nil, err } - data := v.(*parseWorkData) - return data.parsed, data.err + res := v.(parseWorkResult) + return res.parsed, res.err } -func (s *snapshot) ParseWork(ctx context.Context, modFH source.FileHandle) (*source.ParsedWorkFile, error) { - if handle := s.getParseWorkHandle(modFH.URI()); handle != nil { - return handle.parse(ctx, s) - } - h := s.generation.Bind(modFH.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} { - _, done := event.Start(ctx, "cache.ParseModHandle", tag.URI.Of(modFH.URI())) - defer done() +// parseWorkImpl parses a go.work file. It may return partial results and an error. +func parseWorkImpl(ctx context.Context, fh source.FileHandle) (*source.ParsedWorkFile, error) { + _, done := event.Start(ctx, "cache.ParseWork", tag.URI.Of(fh.URI())) + defer done() - contents, err := modFH.Read() - if err != nil { - return &parseWorkData{err: err} - } - m := protocol.NewColumnMapper(modFH.URI(), contents) - file, parseErr := modfile.ParseWork(modFH.URI().Filename(), contents, nil) - // Attempt to convert the error to a standardized parse error. - var parseErrors []*source.Diagnostic - if parseErr != nil { - mfErrList, ok := parseErr.(modfile.ErrorList) - if !ok { - return &parseWorkData{err: fmt.Errorf("unexpected parse error type %v", parseErr)} - } - for _, mfErr := range mfErrList { - rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos) - if err != nil { - return &parseWorkData{err: err} - } - parseErrors = append(parseErrors, &source.Diagnostic{ - URI: modFH.URI(), - Range: rng, - Severity: protocol.SeverityError, - Source: source.ParseError, - Message: mfErr.Err.Error(), - }) + contents, err := fh.Read() + if err != nil { + return nil, err + } + m := protocol.NewColumnMapper(fh.URI(), contents) + file, parseErr := modfile.ParseWork(fh.URI().Filename(), contents, nil) + // Attempt to convert the error to a standardized parse error. + var parseErrors []*source.Diagnostic + if parseErr != nil { + mfErrList, ok := parseErr.(modfile.ErrorList) + if !ok { + return nil, fmt.Errorf("unexpected parse error type %v", parseErr) + } + for _, mfErr := range mfErrList { + rng, err := rangeFromPositions(m, mfErr.Pos, mfErr.Pos) + if err != nil { + return nil, err } + parseErrors = append(parseErrors, &source.Diagnostic{ + URI: fh.URI(), + Range: rng, + Severity: protocol.SeverityError, + Source: source.ParseError, + Message: mfErr.Err.Error(), + }) } - return &parseWorkData{ - parsed: &source.ParsedWorkFile{ - URI: modFH.URI(), - Mapper: m, - File: file, - ParseErrors: parseErrors, - }, - err: parseErr, - } - }) - - pwh := &parseWorkHandle{handle: h} - s.mu.Lock() - s.parseWorkHandles[modFH.URI()] = pwh - s.mu.Unlock() - - return pwh.parse(ctx, s) + } + return &source.ParsedWorkFile{ + URI: fh.URI(), + Mapper: m, + File: file, + ParseErrors: parseErrors, + }, parseErr } // goSum reads the go.sum file for the go.mod file at modURI, if it exists. If @@ -198,104 +202,100 @@ func sumFilename(modURI span.URI) string { return strings.TrimSuffix(modURI.Filename(), ".mod") + ".sum" } -// modKey is uniquely identifies cached data for `go mod why` or dependencies -// to upgrade. -type modKey struct { - sessionID string - env source.Hash - view string - mod source.FileIdentity - verb modAction -} +// ModWhy returns the "go mod why" result for each module named in a +// require statement in the go.mod file. +// TODO(adonovan): move to new mod_why.go file. +func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string]string, error) { + uri := fh.URI() -type modAction int + if s.View().FileKind(fh) != source.Mod { + return nil, fmt.Errorf("%s is not a go.mod file", uri) + } -const ( - why modAction = iota - upgrade -) + s.mu.Lock() + entry, hit := s.modWhyHandles.Get(uri) + s.mu.Unlock() -type modWhyHandle struct { - handle *memoize.Handle -} + type modWhyResult struct { + why map[string]string + err error + } -type modWhyData struct { - // why keeps track of the `go mod why` results for each require statement - // in the go.mod file. - why map[string]string + // cache miss? + if !hit { + // TODO(adonovan): use a simpler cache of promises that + // is shared across snapshots. See comment at modTidyKey. + type modWhyKey struct { + // TODO(rfindley): is sessionID used to identify overlays because modWhy + // looks at overlay state? In that case, I am not sure that this key + // is actually correct. The key should probably just be URI, and + // invalidated in clone when any import changes. + sessionID string + env source.Hash + view string + mod source.FileIdentity + } + key := modWhyKey{ + sessionID: s.view.session.id, + env: hashEnv(s), + mod: fh.FileIdentity(), + view: s.view.rootURI.Filename(), + } + handle, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} { + why, err := modWhyImpl(ctx, arg.(*snapshot), fh) + return modWhyResult{why, err} + }) - err error -} + entry = handle + s.mu.Lock() + s.modWhyHandles.Set(uri, entry, func(_, _ interface{}) { release() }) + s.mu.Unlock() + } -func (mwh *modWhyHandle) why(ctx context.Context, snapshot *snapshot) (map[string]string, error) { - v, err := mwh.handle.Get(ctx, snapshot.generation, snapshot) + // Await result. + v, err := entry.(*memoize.Handle).Get(ctx, s.generation, s) if err != nil { return nil, err } - data := v.(*modWhyData) - return data.why, data.err + res := v.(modWhyResult) + return res.why, res.err } -func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string]string, error) { - if s.View().FileKind(fh) != source.Mod { - return nil, fmt.Errorf("%s is not a go.mod file", fh.URI()) +// modWhyImpl returns the result of "go mod why -m" on the specified go.mod file. +func modWhyImpl(ctx context.Context, snapshot *snapshot, fh source.FileHandle) (map[string]string, error) { + ctx, done := event.Start(ctx, "cache.ModWhy", tag.URI.Of(fh.URI())) + defer done() + + pm, err := snapshot.ParseMod(ctx, fh) + if err != nil { + return nil, err } - if handle := s.getModWhyHandle(fh.URI()); handle != nil { - return handle.why(ctx, s) + // No requires to explain. + if len(pm.File.Require) == 0 { + return nil, nil // empty result } - key := modKey{ - sessionID: s.view.session.id, - env: hashEnv(s), - mod: fh.FileIdentity(), - view: s.view.rootURI.Filename(), - verb: why, + // Run `go mod why` on all the dependencies. + inv := &gocommand.Invocation{ + Verb: "mod", + Args: []string{"why", "-m"}, + WorkingDir: filepath.Dir(fh.URI().Filename()), } - h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { - ctx, done := event.Start(ctx, "cache.ModWhyHandle", tag.URI.Of(fh.URI())) - defer done() - - snapshot := arg.(*snapshot) - - pm, err := snapshot.ParseMod(ctx, fh) - if err != nil { - return &modWhyData{err: err} - } - // No requires to explain. - if len(pm.File.Require) == 0 { - return &modWhyData{} - } - // Run `go mod why` on all the dependencies. - inv := &gocommand.Invocation{ - Verb: "mod", - Args: []string{"why", "-m"}, - WorkingDir: filepath.Dir(fh.URI().Filename()), - } - for _, req := range pm.File.Require { - inv.Args = append(inv.Args, req.Mod.Path) - } - stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal, inv) - if err != nil { - return &modWhyData{err: err} - } - whyList := strings.Split(stdout.String(), "\n\n") - if len(whyList) != len(pm.File.Require) { - return &modWhyData{ - err: fmt.Errorf("mismatched number of results: got %v, want %v", len(whyList), len(pm.File.Require)), - } - } - why := make(map[string]string, len(pm.File.Require)) - for i, req := range pm.File.Require { - why[req.Mod.Path] = whyList[i] - } - return &modWhyData{why: why} - }) - - mwh := &modWhyHandle{handle: h} - s.mu.Lock() - s.modWhyHandles[fh.URI()] = mwh - s.mu.Unlock() - - return mwh.why(ctx, s) + for _, req := range pm.File.Require { + inv.Args = append(inv.Args, req.Mod.Path) + } + stdout, err := snapshot.RunGoCommandDirect(ctx, source.Normal, inv) + if err != nil { + return nil, err + } + whyList := strings.Split(stdout.String(), "\n\n") + if len(whyList) != len(pm.File.Require) { + return nil, fmt.Errorf("mismatched number of results: got %v, want %v", len(whyList), len(pm.File.Require)) + } + why := make(map[string]string, len(pm.File.Require)) + for i, req := range pm.File.Require { + why[req.Mod.Path] = whyList[i] + } + return why, nil } // extractGoCommandError tries to parse errors that come from the go command diff --git a/internal/lsp/cache/mod_tidy.go b/internal/lsp/cache/mod_tidy.go index 91394659503..84f369ef3d4 100644 --- a/internal/lsp/cache/mod_tidy.go +++ b/internal/lsp/cache/mod_tidy.go @@ -28,125 +28,139 @@ import ( "golang.org/x/tools/internal/span" ) -type modTidyKey struct { - sessionID string - env source.Hash - gomod source.FileIdentity - imports source.Hash - unsavedOverlays source.Hash - view string -} +// modTidyImpl runs "go mod tidy" on a go.mod file, using a cache. +// +// REVIEWERS: what does it mean to cache an operation that has side effects? +// Or are we de-duplicating operations in flight on the same file? +func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*source.TidiedModule, error) { + uri := pm.URI + if pm.File == nil { + return nil, fmt.Errorf("cannot tidy unparseable go.mod file: %v", uri) + } -type modTidyHandle struct { - handle *memoize.Handle -} + s.mu.Lock() + entry, hit := s.modTidyHandles.Get(uri) + s.mu.Unlock() -type modTidyData struct { - tidied *source.TidiedModule - err error -} + type modTidyResult struct { + tidied *source.TidiedModule + err error + } + + // Cache miss? + if !hit { + fh, err := s.GetFile(ctx, pm.URI) + if err != nil { + return nil, err + } + // If the file handle is an overlay, it may not be written to disk. + // The go.mod file has to be on disk for `go mod tidy` to work. + // TODO(rfindley): is this still true with Go 1.16 overlay support? + if _, ok := fh.(*overlay); ok { + if info, _ := os.Stat(fh.URI().Filename()); info == nil { + return nil, source.ErrNoModOnDisk + } + } + if criticalErr := s.GetCriticalError(ctx); criticalErr != nil { + return &source.TidiedModule{ + Diagnostics: criticalErr.DiagList, + }, nil + } + workspacePkgs, err := s.workspacePackageHandles(ctx) + if err != nil { + return nil, err + } + + s.mu.Lock() + overlayHash := hashUnsavedOverlays(s.files) + s.mu.Unlock() + + // There's little reason at to use the shared cache for mod + // tidy (and mod why) as their key includes the view and session. + // TODO(adonovan): use a simpler cache of promises that + // is shared across snapshots. + type modTidyKey struct { + // TODO(rfindley): this key is also suspicious (see modWhyKey). + sessionID string + env source.Hash + gomod source.FileIdentity + imports source.Hash + unsavedOverlays source.Hash + view string + } + key := modTidyKey{ + sessionID: s.view.session.id, + view: s.view.folder.Filename(), + imports: s.hashImports(ctx, workspacePkgs), + unsavedOverlays: overlayHash, + gomod: fh.FileIdentity(), + env: hashEnv(s), + } + handle, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} { + tidied, err := modTidyImpl(ctx, arg.(*snapshot), fh, pm, workspacePkgs) + return modTidyResult{tidied, err} + }) -func (mth *modTidyHandle) tidy(ctx context.Context, snapshot *snapshot) (*source.TidiedModule, error) { - v, err := mth.handle.Get(ctx, snapshot.generation, snapshot) + entry = handle + s.mu.Lock() + s.modTidyHandles.Set(uri, entry, func(_, _ interface{}) { release() }) + s.mu.Unlock() + } + + // Await result. + v, err := entry.(*memoize.Handle).Get(ctx, s.generation, s) if err != nil { return nil, err } - data := v.(*modTidyData) - return data.tidied, data.err + res := v.(modTidyResult) + return res.tidied, res.err } -func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*source.TidiedModule, error) { - if pm.File == nil { - return nil, fmt.Errorf("cannot tidy unparseable go.mod file: %v", pm.URI) - } - if handle := s.getModTidyHandle(pm.URI); handle != nil { - return handle.tidy(ctx, s) +// modTidyImpl runs "go mod tidy" on a go.mod file. +func modTidyImpl(ctx context.Context, snapshot *snapshot, fh source.FileHandle, pm *source.ParsedModule, workspacePkgs []*packageHandle) (*source.TidiedModule, error) { + ctx, done := event.Start(ctx, "cache.ModTidy", tag.URI.Of(fh.URI())) + defer done() + + inv := &gocommand.Invocation{ + Verb: "mod", + Args: []string{"tidy"}, + WorkingDir: filepath.Dir(fh.URI().Filename()), } - fh, err := s.GetFile(ctx, pm.URI) + tmpURI, inv, cleanup, err := snapshot.goCommandInvocation(ctx, source.WriteTemporaryModFile, inv) if err != nil { return nil, err } - // If the file handle is an overlay, it may not be written to disk. - // The go.mod file has to be on disk for `go mod tidy` to work. - if _, ok := fh.(*overlay); ok { - if info, _ := os.Stat(fh.URI().Filename()); info == nil { - return nil, source.ErrNoModOnDisk - } + // Keep the temporary go.mod file around long enough to parse it. + defer cleanup() + + if _, err := snapshot.view.session.gocmdRunner.Run(ctx, *inv); err != nil { + return nil, err } - if criticalErr := s.GetCriticalError(ctx); criticalErr != nil { - return &source.TidiedModule{ - Diagnostics: criticalErr.DiagList, - }, nil + + // Go directly to disk to get the temporary mod file, + // since it is always on disk. + tempContents, err := ioutil.ReadFile(tmpURI.Filename()) + if err != nil { + return nil, err } - workspacePkgs, err := s.workspacePackageHandles(ctx) + ideal, err := modfile.Parse(tmpURI.Filename(), tempContents, nil) if err != nil { + // We do not need to worry about the temporary file's parse errors + // since it has been "tidied". return nil, err } - s.mu.Lock() - overlayHash := hashUnsavedOverlays(s.files) - s.mu.Unlock() - - key := modTidyKey{ - sessionID: s.view.session.id, - view: s.view.folder.Filename(), - imports: s.hashImports(ctx, workspacePkgs), - unsavedOverlays: overlayHash, - gomod: fh.FileIdentity(), - env: hashEnv(s), - } - h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { - ctx, done := event.Start(ctx, "cache.ModTidyHandle", tag.URI.Of(fh.URI())) - defer done() - - snapshot := arg.(*snapshot) - inv := &gocommand.Invocation{ - Verb: "mod", - Args: []string{"tidy"}, - WorkingDir: filepath.Dir(fh.URI().Filename()), - } - tmpURI, inv, cleanup, err := snapshot.goCommandInvocation(ctx, source.WriteTemporaryModFile, inv) - if err != nil { - return &modTidyData{err: err} - } - // Keep the temporary go.mod file around long enough to parse it. - defer cleanup() - - if _, err := s.view.session.gocmdRunner.Run(ctx, *inv); err != nil { - return &modTidyData{err: err} - } - // Go directly to disk to get the temporary mod file, since it is - // always on disk. - tempContents, err := ioutil.ReadFile(tmpURI.Filename()) - if err != nil { - return &modTidyData{err: err} - } - ideal, err := modfile.Parse(tmpURI.Filename(), tempContents, nil) - if err != nil { - // We do not need to worry about the temporary file's parse errors - // since it has been "tidied". - return &modTidyData{err: err} - } - // Compare the original and tidied go.mod files to compute errors and - // suggested fixes. - diagnostics, err := modTidyDiagnostics(ctx, snapshot, pm, ideal, workspacePkgs) - if err != nil { - return &modTidyData{err: err} - } - return &modTidyData{ - tidied: &source.TidiedModule{ - Diagnostics: diagnostics, - TidiedContent: tempContents, - }, - } - }) - - mth := &modTidyHandle{handle: h} - s.mu.Lock() - s.modTidyHandles[fh.URI()] = mth - s.mu.Unlock() + // Compare the original and tidied go.mod files to compute errors and + // suggested fixes. + diagnostics, err := modTidyDiagnostics(ctx, snapshot, pm, ideal, workspacePkgs) + if err != nil { + return nil, err + } - return mth.tidy(ctx, s) + return &source.TidiedModule{ + Diagnostics: diagnostics, + TidiedContent: tempContents, + }, nil } func (s *snapshot) hashImports(ctx context.Context, wsPackages []*packageHandle) source.Hash { diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 98d3c250433..80468bc5931 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -238,14 +238,14 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, isActivePackageCache: newIsActivePackageCacheMap(), goFiles: newGoFilesMap(), parseKeysByURI: newParseKeysByURIMap(), - symbols: make(map[span.URI]*symbolHandle), + symbolizeHandles: persistent.NewMap(uriLessInterface), actions: persistent.NewMap(actionKeyLessInterface), workspacePackages: make(map[PackageID]PackagePath), unloadableFiles: make(map[span.URI]struct{}), - parseModHandles: make(map[span.URI]*parseModHandle), - parseWorkHandles: make(map[span.URI]*parseWorkHandle), - modTidyHandles: make(map[span.URI]*modTidyHandle), - modWhyHandles: make(map[span.URI]*modWhyHandle), + parseModHandles: persistent.NewMap(uriLessInterface), + parseWorkHandles: persistent.NewMap(uriLessInterface), + modTidyHandles: persistent.NewMap(uriLessInterface), + modWhyHandles: persistent.NewMap(uriLessInterface), knownSubdirs: newKnownDirsSet(), workspace: workspace, } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 93316653af4..b962435b62e 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -85,8 +85,9 @@ type snapshot struct { goFiles goFilesMap parseKeysByURI parseKeysByURIMap - // TODO(rfindley): consider merging this with files to reduce burden on clone. - symbols map[span.URI]*symbolHandle + // symbolizeHandles maps each file URI to a handle for the future + // result of computing the symbols declared in that file. + symbolizeHandles *persistent.Map // from span.URI to *memoize.Handle // packages maps a packageKey to a *packageHandle. // It may be invalidated when a file's content changes. @@ -109,17 +110,17 @@ type snapshot struct { // parseModHandles keeps track of any parseModHandles for the snapshot. // The handles need not refer to only the view's go.mod file. - parseModHandles map[span.URI]*parseModHandle + parseModHandles *persistent.Map // from span.URI to *memoize.Handle // parseWorkHandles keeps track of any parseWorkHandles for the snapshot. // The handles need not refer to only the view's go.work file. - parseWorkHandles map[span.URI]*parseWorkHandle + parseWorkHandles *persistent.Map // from span.URI to *memoize.Handle // Preserve go.mod-related handles to avoid garbage-collecting the results // of various calls to the go command. The handles need not refer to only // the view's go.mod file. - modTidyHandles map[span.URI]*modTidyHandle - modWhyHandles map[span.URI]*modWhyHandle + modTidyHandles *persistent.Map // from span.URI to *memoize.Handle + modWhyHandles *persistent.Map // from span.URI to *memoize.Handle workspace *workspace // (not guarded by mu) @@ -156,6 +157,11 @@ func (s *snapshot) Destroy(destroyedBy string) { s.goFiles.Destroy() s.parseKeysByURI.Destroy() s.knownSubdirs.Destroy() + s.symbolizeHandles.Destroy() + s.parseModHandles.Destroy() + s.parseWorkHandles.Destroy() + s.modTidyHandles.Destroy() + s.modWhyHandles.Destroy() if s.workspaceDir != "" { if err := os.RemoveAll(s.workspaceDir); err != nil { @@ -700,30 +706,6 @@ func (s *snapshot) addGoFile(key parseKey, pgh *parseGoHandle, release func()) * return pgh } -func (s *snapshot) getParseModHandle(uri span.URI) *parseModHandle { - s.mu.Lock() - defer s.mu.Unlock() - return s.parseModHandles[uri] -} - -func (s *snapshot) getParseWorkHandle(uri span.URI) *parseWorkHandle { - s.mu.Lock() - defer s.mu.Unlock() - return s.parseWorkHandles[uri] -} - -func (s *snapshot) getModWhyHandle(uri span.URI) *modWhyHandle { - s.mu.Lock() - defer s.mu.Unlock() - return s.modWhyHandles[uri] -} - -func (s *snapshot) getModTidyHandle(uri span.URI) *modTidyHandle { - s.mu.Lock() - defer s.mu.Unlock() - return s.modTidyHandles[uri] -} - func (s *snapshot) getImportedBy(id PackageID) []PackageID { s.mu.Lock() defer s.mu.Unlock() @@ -1039,12 +1021,12 @@ func (s *snapshot) Symbols(ctx context.Context) map[span.URI][]source.Symbol { iolimit <- struct{}{} // acquire token group.Go(func() error { defer func() { <-iolimit }() // release token - v, err := s.buildSymbolHandle(ctx, f).handle.Get(ctx, s.generation, s) + symbols, err := s.symbolize(ctx, f) if err != nil { return err } resultMu.Lock() - result[uri] = v.(*symbolData).symbols + result[uri] = symbols resultMu.Unlock() return nil }) @@ -1159,26 +1141,6 @@ func (s *snapshot) getPackage(id PackageID, mode source.ParseMode) *packageHandl return ph } -func (s *snapshot) getSymbolHandle(uri span.URI) *symbolHandle { - s.mu.Lock() - defer s.mu.Unlock() - - return s.symbols[uri] -} - -func (s *snapshot) addSymbolHandle(uri span.URI, sh *symbolHandle) *symbolHandle { - s.mu.Lock() - defer s.mu.Unlock() - - // If the package handle has already been cached, - // return the cached handle instead of overriding it. - if sh, ok := s.symbols[uri]; ok { - return sh - } - s.symbols[uri] = sh - return sh -} - func (s *snapshot) getActionHandle(id PackageID, m source.ParseMode, a *analysis.Analyzer) *actionHandle { key := actionKey{ pkg: packageKey{ @@ -1732,42 +1694,23 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC files: s.files.Clone(), goFiles: s.goFiles.Clone(), parseKeysByURI: s.parseKeysByURI.Clone(), - symbols: make(map[span.URI]*symbolHandle, len(s.symbols)), + symbolizeHandles: s.symbolizeHandles.Clone(), workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)), unloadableFiles: make(map[span.URI]struct{}, len(s.unloadableFiles)), - parseModHandles: make(map[span.URI]*parseModHandle, len(s.parseModHandles)), - parseWorkHandles: make(map[span.URI]*parseWorkHandle, len(s.parseWorkHandles)), - modTidyHandles: make(map[span.URI]*modTidyHandle, len(s.modTidyHandles)), - modWhyHandles: make(map[span.URI]*modWhyHandle, len(s.modWhyHandles)), + parseModHandles: s.parseModHandles.Clone(), + parseWorkHandles: s.parseWorkHandles.Clone(), + modTidyHandles: s.modTidyHandles.Clone(), + modWhyHandles: s.modWhyHandles.Clone(), knownSubdirs: s.knownSubdirs.Clone(), workspace: newWorkspace, } - // Copy all of the FileHandles. - for k, v := range s.symbols { - if change, ok := changes[k]; ok { - if change.exists { - result.symbols[k] = result.buildSymbolHandle(ctx, change.fileHandle) - } - continue - } - newGen.Inherit(v.handle) - result.symbols[k] = v - } - // Copy the set of unloadable files. for k, v := range s.unloadableFiles { result.unloadableFiles[k] = v } - // Copy all of the modHandles. - for k, v := range s.parseModHandles { - result.parseModHandles[k] = v - } - // Copy all of the parseWorkHandles. - for k, v := range s.parseWorkHandles { - result.parseWorkHandles[k] = v - } + // TODO(adonovan): merge loops over "changes". for uri := range changes { keys, ok := result.parseKeysByURI.Get(uri) if ok { @@ -1776,21 +1719,13 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } result.parseKeysByURI.Delete(uri) } - } - // Copy all of the go.mod-related handles. They may be invalidated later, - // so we inherit them at the end of the function. - for k, v := range s.modTidyHandles { - if _, ok := changes[k]; ok { - continue - } - result.modTidyHandles[k] = v - } - for k, v := range s.modWhyHandles { - if _, ok := changes[k]; ok { - continue - } - result.modWhyHandles[k] = v + // Invalidate go.mod-related handles. + result.modTidyHandles.Delete(uri) + result.modWhyHandles.Delete(uri) + + // Invalidate handles for cached symbols. + result.symbolizeHandles.Delete(uri) } // Add all of the known subdirectories, but don't update them for the @@ -1857,17 +1792,16 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC // Invalidate the previous modTidyHandle if any of the files have been // saved or if any of the metadata has been invalidated. if invalidateMetadata || fileWasSaved(originalFH, change.fileHandle) { - // TODO(rstambler): Only delete mod handles for which the - // withoutURI is relevant. - for k := range s.modTidyHandles { - delete(result.modTidyHandles, k) - } - for k := range s.modWhyHandles { - delete(result.modWhyHandles, k) - } + // TODO(maybe): Only delete mod handles for + // which the withoutURI is relevant. + // Requires reverse-engineering the go command. (!) + + result.modTidyHandles.Clear() + result.modWhyHandles.Clear() } - delete(result.parseModHandles, uri) - delete(result.parseWorkHandles, uri) + + result.parseModHandles.Delete(uri) + result.parseWorkHandles.Delete(uri) // Handle the invalidated file; it may have new contents or not exist. if !change.exists { result.files.Delete(uri) @@ -2011,19 +1945,6 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC result.workspacePackages = s.workspacePackages } - // Inherit all of the go.mod-related handles. - for _, v := range result.modTidyHandles { - newGen.Inherit(v.handle) - } - for _, v := range result.modWhyHandles { - newGen.Inherit(v.handle) - } - for _, v := range result.parseModHandles { - newGen.Inherit(v.handle) - } - for _, v := range result.parseWorkHandles { - newGen.Inherit(v.handle) - } // Don't bother copying the importedBy graph, // as it changes each time we update metadata. diff --git a/internal/lsp/cache/symbols.go b/internal/lsp/cache/symbols.go index 50d7b123ec9..ab031bf64b9 100644 --- a/internal/lsp/cache/symbols.go +++ b/internal/lsp/cache/symbols.go @@ -18,43 +18,48 @@ import ( "golang.org/x/tools/internal/memoize" ) -// A symbolHandle contains a handle to the result of symbolizing a file. -type symbolHandle struct { - handle *memoize.Handle -} +// symbolize returns the result of symbolizing the file identified by fh, using a cache. +func (s *snapshot) symbolize(ctx context.Context, fh source.FileHandle) ([]source.Symbol, error) { + uri := fh.URI() -// symbolData contains the data produced by extracting symbols from a file. -type symbolData struct { - symbols []source.Symbol - err error -} + s.mu.Lock() + entry, hit := s.symbolizeHandles.Get(uri) + s.mu.Unlock() -// buildSymbolHandle returns a handle to the future result of -// symbolizing the file identified by fh, -// if necessary creating it and saving it in the snapshot. -func (s *snapshot) buildSymbolHandle(ctx context.Context, fh source.FileHandle) *symbolHandle { - if h := s.getSymbolHandle(fh.URI()); h != nil { - return h + type symbolizeResult struct { + symbols []source.Symbol + err error } - type symbolHandleKey source.Hash - key := symbolHandleKey(fh.FileIdentity().Hash) - handle := s.generation.Bind(key, func(_ context.Context, arg memoize.Arg) interface{} { - snapshot := arg.(*snapshot) - symbols, err := symbolize(snapshot, fh) - return &symbolData{symbols, err} - }) - sh := &symbolHandle{ - handle: handle, + // Cache miss? + if !hit { + type symbolHandleKey source.Hash + key := symbolHandleKey(fh.FileIdentity().Hash) + handle, release := s.generation.GetHandle(key, func(_ context.Context, arg memoize.Arg) interface{} { + symbols, err := symbolizeImpl(arg.(*snapshot), fh) + return symbolizeResult{symbols, err} + }) + + entry = handle + + s.mu.Lock() + s.symbolizeHandles.Set(uri, entry, func(_, _ interface{}) { release() }) + s.mu.Unlock() } - return s.addSymbolHandle(fh.URI(), sh) + // Await result. + v, err := entry.(*memoize.Handle).Get(ctx, s.generation, s) + if err != nil { + return nil, err + } + res := v.(symbolizeResult) + return res.symbols, res.err } -// symbolize reads and parses a file and extracts symbols from it. +// symbolizeImpl reads and parses a file and extracts symbols from it. // It may use a parsed file already present in the cache but // otherwise does not populate the cache. -func symbolize(snapshot *snapshot, fh source.FileHandle) ([]source.Symbol, error) { +func symbolizeImpl(snapshot *snapshot, fh source.FileHandle) ([]source.Symbol, error) { src, err := fh.Read() if err != nil { return nil, err diff --git a/internal/persistent/map.go b/internal/persistent/map.go index 55b7065e9f7..f5dd10206b8 100644 --- a/internal/persistent/map.go +++ b/internal/persistent/map.go @@ -120,10 +120,17 @@ func (pm *Map) Clone() *Map { } } -// Destroy the persistent map. +// Destroy destroys the map. // // After Destroy, the Map should not be used again. func (pm *Map) Destroy() { + // The implementation of these two functions is the same, + // but their intent is different. + pm.Clear() +} + +// Clear removes all entries from the map. +func (pm *Map) Clear() { pm.root.decref() pm.root = nil } From 9c2a5567e347d6d57de667256c748abf757d1ee4 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Thu, 7 Jul 2022 18:41:33 -0400 Subject: [PATCH 084/136] internal/lsp/cache: fail addPackageHandle if metadata is stale If metadata is refreshed during the execution of buildPackageHandle, we should not store the resulting package handle in the snapshot, as it breaks the invariant that computed packages match the currently loaded metadata. This strictness revealed another bug: because of our fine-grained locking in snapshot.load, it is possible that we set valid metadata multiple times, leading to unnecessary invalidation and potential further races to package handles. Fix this by building all metadata when processing the go/packages result, and only filtering updates after grabbing the lock. For golang/go#53733 Change-Id: Ib63ae4fbd97d0d25d45fe04f9bcd835996db41da Reviewed-on: https://go-review.googlesource.com/c/tools/+/416224 Run-TryBot: Robert Findley TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan gopls-CI: kokoro --- internal/lsp/cache/check.go | 4 +-- internal/lsp/cache/load.go | 60 ++++++++++++++++++++-------------- internal/lsp/cache/snapshot.go | 22 +++++++++++-- 3 files changed, 55 insertions(+), 31 deletions(-) diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index aae6de0eea7..c8b314a072d 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -209,9 +209,7 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so // been cached, addPackage will return the cached value. This is fine, // since the original package handle above will have no references and be // garbage collected. - ph = s.addPackageHandle(ph, release) - - return ph, nil + return s.addPackageHandle(ph, release) } func (s *snapshot) workspaceParseMode(id PackageID) source.ParseMode { diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index 08c88ab8e74..8937f934031 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -47,6 +47,8 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf if errors.Is(err, context.Canceled) { return } + // TODO(rfindley): merge these metadata updates with the updates below, to + // avoid updating the graph twice. s.clearShouldLoad(scopes...) }() @@ -154,7 +156,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf } moduleErrs := make(map[string][]packages.Error) // module path -> errors - updates := make(map[PackageID]*KnownMetadata) + newMetadata := make(map[PackageID]*KnownMetadata) for _, pkg := range pkgs { // The Go command returns synthetic list results for module queries that // encountered module errors. @@ -196,31 +198,48 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf } // Skip filtered packages. They may be added anyway if they're // dependencies of non-filtered packages. + // + // TODO(rfindley): why exclude metadata arbitrarily here? It should be safe + // to capture all metadata. if s.view.allFilesExcluded(pkg) { continue } - // TODO: once metadata is immutable, we shouldn't have to lock here. - s.mu.Lock() - err := computeMetadataUpdates(ctx, s.meta, PackagePath(pkg.PkgPath), pkg, cfg, query, updates, nil) - s.mu.Unlock() - if err != nil { + if err := buildMetadata(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, newMetadata, nil); err != nil { return err } } - var loadedIDs []PackageID - for id := range updates { - loadedIDs = append(loadedIDs, id) + s.mu.Lock() + + // Only update metadata where we don't already have valid metadata. + // + // We want to preserve an invariant that s.packages.Get(id).m.Metadata + // matches s.meta.metadata[id].Metadata. By avoiding overwriting valid + // metadata, we minimize the amount of invalidation required to preserve this + // invariant. + // + // TODO(rfindley): perform a sanity check that metadata matches here. If not, + // we have an invalidation bug elsewhere. + updates := make(map[PackageID]*KnownMetadata) + var updatedIDs []PackageID + for _, m := range newMetadata { + if existing := s.meta.metadata[m.ID]; existing == nil || !existing.Valid { + updates[m.ID] = m + updatedIDs = append(updatedIDs, m.ID) + } } event.Log(ctx, fmt.Sprintf("%s: updating metadata for %d packages", eventName, len(updates))) - s.mu.Lock() + // Invalidate the reverse transitive closure of packages that have changed. + // + // Note that the original metadata is being invalidated here, so we use the + // original metadata graph to compute the reverse closure. + invalidatedPackages := s.meta.reverseTransitiveClosure(true, updatedIDs...) - // invalidate the reverse transitive closure of packages that have changed. - invalidatedPackages := s.meta.reverseTransitiveClosure(true, loadedIDs...) s.meta = s.meta.Clone(updates) s.resetIsActivePackageLocked() + // Invalidate any packages we may have associated with this metadata. // // TODO(rfindley): this should not be necessary, as we should have already @@ -431,10 +450,10 @@ func makeWorkspaceDir(ctx context.Context, workspace *workspace, fs source.FileS return tmpdir, nil } -// computeMetadataUpdates populates the updates map with metadata updates to +// buildMetadata populates the updates map with metadata updates to // apply, based on the given pkg. It recurs through pkg.Imports to ensure that // metadata exists for all dependencies. -func computeMetadataUpdates(ctx context.Context, g *metadataGraph, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*KnownMetadata, path []PackageID) error { +func buildMetadata(ctx context.Context, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*KnownMetadata, path []PackageID) error { id := PackageID(pkg.ID) if source.IsCommandLineArguments(pkg.ID) { suffix := ":" + strings.Join(query, ",") @@ -442,21 +461,12 @@ func computeMetadataUpdates(ctx context.Context, g *metadataGraph, pkgPath Packa pkgPath = PackagePath(string(pkgPath) + suffix) } - // If we have valid metadata for this package, don't update. This minimizes - // the amount of subsequent invalidation. - // - // TODO(rfindley): perform a sanity check that metadata matches here. If not, - // we have an invalidation bug elsewhere. - if existing := g.metadata[id]; existing != nil && existing.Valid { - return nil - } - if _, ok := updates[id]; ok { // If we've already seen this dependency, there may be an import cycle, or // we may have reached the same package transitively via distinct paths. // Check the path to confirm. - // TODO(rfindley): this doesn't look right. Any single piece of new + // TODO(rfindley): this doesn't look sufficient. Any single piece of new // metadata could theoretically introduce import cycles in the metadata // graph. What's the point of this limited check here (and is it even // possible to get an import cycle in data from go/packages)? Consider @@ -535,7 +545,7 @@ func computeMetadataUpdates(ctx context.Context, g *metadataGraph, pkgPath Packa m.MissingDeps[importPkgPath] = struct{}{} continue } - if err := computeMetadataUpdates(ctx, g, importPkgPath, importPkg, cfg, query, updates, append(path, id)); err != nil { + if err := buildMetadata(ctx, importPkgPath, importPkg, cfg, query, updates, append(path, id)); err != nil { event.Error(ctx, "error in dependency", err) } } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index b962435b62e..5623f24c550 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -91,6 +91,11 @@ type snapshot struct { // packages maps a packageKey to a *packageHandle. // It may be invalidated when a file's content changes. + // + // Invariants to preserve: + // - packages.Get(id).m.Metadata == meta.metadata[id].Metadata for all ids + // - if a package is in packages, then all of its dependencies should also + // be in packages, unless there is a missing import packages packagesMap // isActivePackageCache maps package ID to the cached value if it is active or not. @@ -712,18 +717,29 @@ func (s *snapshot) getImportedBy(id PackageID) []PackageID { return s.meta.importedBy[id] } -func (s *snapshot) addPackageHandle(ph *packageHandle, release func()) *packageHandle { +// addPackageHandle stores ph in the snapshot, or returns a pre-existing handle +// for the given package key, if it exists. +// +// An error is returned if the metadata used to build ph is no longer relevant. +func (s *snapshot) addPackageHandle(ph *packageHandle, release func()) (*packageHandle, error) { s.mu.Lock() defer s.mu.Unlock() + if s.meta.metadata[ph.m.ID].Metadata != ph.m.Metadata { + return nil, fmt.Errorf("stale metadata for %s", ph.m.ID) + } + // If the package handle has already been cached, // return the cached handle instead of overriding it. if result, ok := s.packages.Get(ph.packageKey()); ok { release() - return result + if result.m.Metadata != ph.m.Metadata { + return nil, bug.Errorf("existing package handle does not match for %s", ph.m.ID) + } + return result, nil } s.packages.Set(ph.packageKey(), ph, release) - return ph + return ph, nil } func (s *snapshot) workspacePackageIDs() (ids []PackageID) { From 8746177218db2b5640e4134ec99a5a905018b50c Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Thu, 7 Jul 2022 15:29:21 -0400 Subject: [PATCH 085/136] internal/lsp/cache: simplify ParseGo This change simplifes the ParseGo interface to make it consistent with the other handle+map operations: - ParseGoImpl is the basic parser. - The 'fixed' bool result is a field of ParsedGoFile. - ParseGo is the caching wrapper. The map accessors have been inlined into it. - goFiles (renamed parsedGoFiles) is now just a bare persistent.Map. - parseGoHandle is replaced by *memoize.Handle - the operations of "make a handle" and "wait for it" are no longer separate (since clients never want one without the other). - cachedPGF and peekOrParse have been combined into peekParseGoLocked. Change-Id: If01a6aaa7e6a8d78cb89c305e5279738e8e7bb55 Reviewed-on: https://go-review.googlesource.com/c/tools/+/416223 TryBot-Result: Gopher Robot Reviewed-by: Robert Findley gopls-CI: kokoro Run-TryBot: Alan Donovan --- internal/lsp/cache/check.go | 8 +- internal/lsp/cache/maps.go | 46 +--------- internal/lsp/cache/parse.go | 149 ++++++++++++++++----------------- internal/lsp/cache/session.go | 2 +- internal/lsp/cache/snapshot.go | 60 ++++--------- internal/lsp/cache/symbols.go | 10 ++- internal/lsp/source/view.go | 1 + 7 files changed, 104 insertions(+), 172 deletions(-) diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index c8b314a072d..c17288c9e15 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -568,15 +568,13 @@ func parseCompiledGoFiles(ctx context.Context, compiledGoFiles []source.FileHand // TODO(adonovan): opt: parallelize this loop, which takes 1-25ms. for _, fh := range compiledGoFiles { var pgf *source.ParsedGoFile - var fixed bool var err error // Only parse Full through the cache -- we need to own Exported ASTs // to prune them. if mode == source.ParseFull { - pgf, fixed, err = snapshot.parseGo(ctx, fh, mode) + pgf, err = snapshot.ParseGo(ctx, fh, mode) } else { - d := parseGo(ctx, snapshot.FileSet(), fh, mode) // ~20us/KB - pgf, fixed, err = d.parsed, d.fixed, d.err + pgf, err = parseGoImpl(ctx, snapshot.FileSet(), fh, mode) // ~20us/KB } if err != nil { return err @@ -587,7 +585,7 @@ func parseCompiledGoFiles(ctx context.Context, compiledGoFiles []source.FileHand } // If we have fixed parse errors in any of the files, we should hide type // errors, as they may be completely nonsensical. - pkg.hasFixedFiles = pkg.hasFixedFiles || fixed + pkg.hasFixedFiles = pkg.hasFixedFiles || pgf.Fixed } if mode != source.ParseExported { return nil diff --git a/internal/lsp/cache/maps.go b/internal/lsp/cache/maps.go index 1ec34151540..4bb3b3b2689 100644 --- a/internal/lsp/cache/maps.go +++ b/internal/lsp/cache/maps.go @@ -59,16 +59,8 @@ func (m filesMap) Delete(key span.URI) { m.impl.Delete(key) } -type goFilesMap struct { - impl *persistent.Map -} - -func newGoFilesMap() goFilesMap { - return goFilesMap{ - impl: persistent.NewMap(func(a, b interface{}) bool { - return parseKeyLess(a.(parseKey), b.(parseKey)) - }), - } +func parseKeyLessInterface(a, b interface{}) bool { + return parseKeyLess(a.(parseKey), b.(parseKey)) } func parseKeyLess(a, b parseKey) bool { @@ -81,40 +73,6 @@ func parseKeyLess(a, b parseKey) bool { return a.file.URI < b.file.URI } -func (m goFilesMap) Clone() goFilesMap { - return goFilesMap{ - impl: m.impl.Clone(), - } -} - -func (m goFilesMap) Destroy() { - m.impl.Destroy() -} - -func (m goFilesMap) Get(key parseKey) (*parseGoHandle, bool) { - value, ok := m.impl.Get(key) - if !ok { - return nil, false - } - return value.(*parseGoHandle), true -} - -func (m goFilesMap) Range(do func(key parseKey, value *parseGoHandle)) { - m.impl.Range(func(key, value interface{}) { - do(key.(parseKey), value.(*parseGoHandle)) - }) -} - -func (m goFilesMap) Set(key parseKey, value *parseGoHandle, release func()) { - m.impl.Set(key, value, func(key, value interface{}) { - release() - }) -} - -func (m goFilesMap) Delete(key parseKey) { - m.impl.Delete(key) -} - type isActivePackageCacheMap struct { impl *persistent.Map } diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go index 712f26ad715..c8c751f0b2e 100644 --- a/internal/lsp/cache/parse.go +++ b/internal/lsp/cache/parse.go @@ -35,78 +35,76 @@ type parseKey struct { mode source.ParseMode } -type parseGoHandle struct { - handle *memoize.Handle - file source.FileHandle - mode source.ParseMode -} - -type parseGoData struct { - parsed *source.ParsedGoFile - - // If true, we adjusted the AST to make it type check better, and - // it may not match the source code. - fixed bool - err error // any other errors -} +// ParseGo parses the file whose contents are provided by fh, using a cache. +// The resulting tree may have be fixed up. +// +// The parser mode must not be ParseExported: that mode is used during +// type checking to destructively trim the tree to reduce work, +// which is not safe for values from a shared cache. +// TODO(adonovan): opt: shouldn't parseGoImpl do the trimming? +// Then we can cache the result since it would never change. +func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { + if mode == source.ParseExported { + panic("only type checking should use Exported") + } -func (s *snapshot) parseGoHandle(ctx context.Context, fh source.FileHandle, mode source.ParseMode) *parseGoHandle { key := parseKey{ file: fh.FileIdentity(), mode: mode, } - if pgh := s.getGoFile(key); pgh != nil { - return pgh - } - parseHandle, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} { - snapshot := arg.(*snapshot) - return parseGo(ctx, snapshot.FileSet(), fh, mode) - }) - pgh := &parseGoHandle{ - handle: parseHandle, - file: fh, - mode: mode, + s.mu.Lock() + entry, hit := s.parsedGoFiles.Get(key) + s.mu.Unlock() + + // cache miss? + if !hit { + handle, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} { + parsed, err := parseGoImpl(ctx, arg.(*snapshot).FileSet(), fh, mode) + return parseGoResult{parsed, err} + }) + + s.mu.Lock() + // Check cache again in case another thread got there first. + if prev, ok := s.parsedGoFiles.Get(key); ok { + entry = prev + release() + } else { + entry = handle + s.parsedGoFiles.Set(key, entry, func(_, _ interface{}) { release() }) + } + s.mu.Unlock() } - return s.addGoFile(key, pgh, release) -} - -func (pgh *parseGoHandle) String() string { - return pgh.file.URI().Filename() -} -func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { - pgf, _, err := s.parseGo(ctx, fh, mode) - return pgf, err + // Await result. + v, err := entry.(*memoize.Handle).Get(ctx, s.generation, s) + if err != nil { + return nil, err + } + res := v.(parseGoResult) + return res.parsed, res.err } -func (s *snapshot) parseGo(ctx context.Context, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, bool, error) { - if mode == source.ParseExported { - panic("only type checking should use Exported") +// peekParseGoLocked peeks at the cache used by ParseGo but does not +// populate it or wait for other threads to do so. On cache hit, it returns +// the cache result of parseGoImpl; otherwise it returns (nil, nil). +func (s *snapshot) peekParseGoLocked(fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { + entry, hit := s.parsedGoFiles.Get(parseKey{fh.FileIdentity(), mode}) + if !hit { + return nil, nil // no-one has requested this file } - pgh := s.parseGoHandle(ctx, fh, mode) - d, err := pgh.handle.Get(ctx, s.generation, s) - if err != nil { - return nil, false, err + v := entry.(*memoize.Handle).Cached(s.generation) + if v == nil { + return nil, nil // parsing is still in progress } - data := d.(*parseGoData) - return data.parsed, data.fixed, data.err + res := v.(parseGoResult) + return res.parsed, res.err } -// cachedPGF returns the cached ParsedGoFile for the given ParseMode, if it -// has already been computed. Otherwise, it returns nil. -func (s *snapshot) cachedPGF(fh source.FileHandle, mode source.ParseMode) *source.ParsedGoFile { - key := parseKey{file: fh.FileIdentity(), mode: mode} - if pgh := s.getGoFile(key); pgh != nil { - cached := pgh.handle.Cached(s.generation) - if cached != nil { - cached := cached.(*parseGoData) - if cached.parsed != nil { - return cached.parsed - } - } - } - return nil +// parseGoResult holds the result of a call to parseGoImpl. +type parseGoResult struct { + parsed *source.ParsedGoFile + err error } type astCacheKey struct { @@ -274,17 +272,18 @@ func buildASTCache(pgf *source.ParsedGoFile) *astCacheData { return data } -func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mode source.ParseMode) *parseGoData { +// parseGoImpl parses the Go source file whose content is provided by fh. +func parseGoImpl(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { ctx, done := event.Start(ctx, "cache.parseGo", tag.File.Of(fh.URI().Filename())) defer done() ext := filepath.Ext(fh.URI().Filename()) if ext != ".go" && ext != "" { // files generated by cgo have no extension - return &parseGoData{err: fmt.Errorf("cannot parse non-Go file %s", fh.URI())} + return nil, fmt.Errorf("cannot parse non-Go file %s", fh.URI()) } src, err := fh.Read() if err != nil { - return &parseGoData{err: err} + return nil, err } parserMode := parser.AllErrors | parser.ParseComments @@ -346,22 +345,20 @@ func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mod } } - return &parseGoData{ - parsed: &source.ParsedGoFile{ - URI: fh.URI(), - Mode: mode, - Src: src, - File: file, - Tok: tok, - Mapper: &protocol.ColumnMapper{ - URI: fh.URI(), - TokFile: tok, - Content: src, - }, - ParseErr: parseErr, + return &source.ParsedGoFile{ + URI: fh.URI(), + Mode: mode, + Src: src, + Fixed: fixed, + File: file, + Tok: tok, + Mapper: &protocol.ColumnMapper{ + URI: fh.URI(), + TokFile: tok, + Content: src, }, - fixed: fixed, - } + ParseErr: parseErr, + }, nil } // An unexportedFilter removes as much unexported AST from a set of Files as possible. diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 80468bc5931..9ea612a3ab4 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -236,7 +236,7 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, meta: &metadataGraph{}, files: newFilesMap(), isActivePackageCache: newIsActivePackageCacheMap(), - goFiles: newGoFilesMap(), + parsedGoFiles: persistent.NewMap(parseKeyLessInterface), parseKeysByURI: newParseKeysByURIMap(), symbolizeHandles: persistent.NewMap(uriLessInterface), actions: persistent.NewMap(actionKeyLessInterface), diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 5623f24c550..c228db9655b 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -81,8 +81,13 @@ type snapshot struct { // It may invalidated when a file's content changes. files filesMap - // goFiles maps a parseKey to its parseGoHandle. - goFiles goFilesMap + // parsedGoFiles maps a parseKey to the handle of the future result of parsing it. + parsedGoFiles *persistent.Map // from parseKey to *memoize.Handle + + // parseKeysByURI records the set of keys of parsedGoFiles that + // need to be invalidated for each URI. + // TODO(adonovan): opt: parseKey = ParseMode + URI, so this could + // be just a set of ParseModes, or we could loop over AllParseModes. parseKeysByURI parseKeysByURIMap // symbolizeHandles maps each file URI to a handle for the future @@ -159,7 +164,7 @@ func (s *snapshot) Destroy(destroyedBy string) { s.isActivePackageCache.Destroy() s.actions.Destroy() s.files.Destroy() - s.goFiles.Destroy() + s.parsedGoFiles.Destroy() s.parseKeysByURI.Destroy() s.knownSubdirs.Destroy() s.symbolizeHandles.Destroy() @@ -688,29 +693,6 @@ func (s *snapshot) checkedPackage(ctx context.Context, id PackageID, mode source return ph.check(ctx, s) } -func (s *snapshot) getGoFile(key parseKey) *parseGoHandle { - s.mu.Lock() - defer s.mu.Unlock() - if result, ok := s.goFiles.Get(key); ok { - return result - } - return nil -} - -func (s *snapshot) addGoFile(key parseKey, pgh *parseGoHandle, release func()) *parseGoHandle { - s.mu.Lock() - defer s.mu.Unlock() - if result, ok := s.goFiles.Get(key); ok { - release() - return result - } - s.goFiles.Set(key, pgh, release) - keys, _ := s.parseKeysByURI.Get(key.file.URI) - keys = append([]parseKey{key}, keys...) - s.parseKeysByURI.Set(key.file.URI, keys) - return pgh -} - func (s *snapshot) getImportedBy(id PackageID) []PackageID { s.mu.Lock() defer s.mu.Unlock() @@ -1708,7 +1690,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC isActivePackageCache: s.isActivePackageCache.Clone(), actions: s.actions.Clone(), files: s.files.Clone(), - goFiles: s.goFiles.Clone(), + parsedGoFiles: s.parsedGoFiles.Clone(), parseKeysByURI: s.parseKeysByURI.Clone(), symbolizeHandles: s.symbolizeHandles.Clone(), workspacePackages: make(map[PackageID]PackagePath, len(s.workspacePackages)), @@ -1731,7 +1713,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC keys, ok := result.parseKeysByURI.Get(uri) if ok { for _, key := range keys { - result.goFiles.Delete(key) + result.parsedGoFiles.Delete(key) } result.parseKeysByURI.Delete(uri) } @@ -2147,28 +2129,20 @@ func metadataChanges(ctx context.Context, lockedSnapshot *snapshot, oldFH, newFH return invalidate, pkgFileChanged, importDeleted } -// peekOrParse returns the cached ParsedGoFile if it exists, otherwise parses -// without caching. +// peekOrParse returns the cached ParsedGoFile if it exists, +// otherwise parses without populating the cache. // // It returns an error if the file could not be read (note that parsing errors // are stored in ParsedGoFile.ParseErr). // // lockedSnapshot must be locked. func peekOrParse(ctx context.Context, lockedSnapshot *snapshot, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { - key := parseKey{file: fh.FileIdentity(), mode: mode} - if pgh, ok := lockedSnapshot.goFiles.Get(key); ok { - cached := pgh.handle.Cached(lockedSnapshot.generation) - if cached != nil { - cached := cached.(*parseGoData) - if cached.parsed != nil { - return cached.parsed, nil - } - } + // Peek in the cache without populating it. + // We do this to reduce retained heap, not work. + if parsed, _ := lockedSnapshot.peekParseGoLocked(fh, mode); parsed != nil { + return parsed, nil // cache hit } - - fset := token.NewFileSet() - data := parseGo(ctx, fset, fh, mode) - return data.parsed, data.err + return parseGoImpl(ctx, token.NewFileSet(), fh, mode) } func magicCommentsChanged(original *ast.File, current *ast.File) bool { diff --git a/internal/lsp/cache/symbols.go b/internal/lsp/cache/symbols.go index ab031bf64b9..4cbf8589025 100644 --- a/internal/lsp/cache/symbols.go +++ b/internal/lsp/cache/symbols.go @@ -70,9 +70,13 @@ func symbolizeImpl(snapshot *snapshot, fh source.FileHandle) ([]source.Symbol, e fileDesc *token.File ) - // If the file has already been fully parsed through the cache, we can just - // use the result. - if pgf := snapshot.cachedPGF(fh, source.ParseFull); pgf != nil { + // If the file has already been fully parsed through the + // cache, we can just use the result. But we don't want to + // populate the cache after a miss. + snapshot.mu.Lock() + pgf, _ := snapshot.peekParseGoLocked(fh, source.ParseFull) + snapshot.mu.Unlock() + if pgf != nil { file = pgf.File fileDesc = pgf.Tok } diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go index c8656153bd3..caf18505856 100644 --- a/internal/lsp/source/view.go +++ b/internal/lsp/source/view.go @@ -297,6 +297,7 @@ type ParsedGoFile struct { // Source code used to build the AST. It may be different from the // actual content of the file if we have fixed the AST. Src []byte + Fixed bool Mapper *protocol.ColumnMapper ParseErr scanner.ErrorList } From 53ead67a981c04bcacdd4f593330c43ee9285578 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 5 Jul 2022 10:30:58 -0400 Subject: [PATCH 086/136] internal/memoize: delete Generation and Bind Now that the lifetime of all handles in the store is determined by reference counting, we no longer need the generation feature. The Arg interface, renamed RefCounted, is now optional, and causes the lifetime of the argument to be extended for the duration of the Function call. This is important when the Get(ctx) context is cancelled, causing the function call to outlive Get: if Get's reference to the argument was borrowed, it needs to increase the refcount to prevent premature destruction. Also: - add missing snapshot.release() call in importsState.populateProcessEnv. - remove the --memoize_panic_on_destroyed flag. Change-Id: I0b3d37c16f8b3f550bb10120c066b628c3db244b Reviewed-on: https://go-review.googlesource.com/c/tools/+/416076 Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot Auto-Submit: Alan Donovan Reviewed-by: Robert Findley gopls-CI: kokoro --- internal/lsp/cache/analysis.go | 6 +- internal/lsp/cache/check.go | 8 +- internal/lsp/cache/imports.go | 3 +- internal/lsp/cache/mod.go | 12 +- internal/lsp/cache/mod_tidy.go | 9 +- internal/lsp/cache/parse.go | 10 +- internal/lsp/cache/session.go | 26 ++- internal/lsp/cache/snapshot.go | 48 +++-- internal/lsp/cache/symbols.go | 4 +- internal/lsp/cache/view.go | 4 +- internal/lsp/command.go | 2 +- internal/lsp/general.go | 3 +- internal/lsp/source/view.go | 13 +- internal/memoize/memoize.go | 295 +++++++------------------------ internal/memoize/memoize_test.go | 84 +++------ 15 files changed, 184 insertions(+), 343 deletions(-) diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go index 4b437858ef3..e196d1c4a35 100644 --- a/internal/lsp/cache/analysis.go +++ b/internal/lsp/cache/analysis.go @@ -137,7 +137,7 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A } } - handle, release := s.generation.GetHandle(buildActionKey(a, ph), func(ctx context.Context, arg memoize.Arg) interface{} { + handle, release := s.store.Handle(buildActionKey(a, ph), func(ctx context.Context, arg interface{}) interface{} { snapshot := arg.(*snapshot) // Analyze dependencies first. results, err := execAll(ctx, snapshot, deps) @@ -159,7 +159,7 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A } func (act *actionHandle) analyze(ctx context.Context, snapshot *snapshot) ([]*source.Diagnostic, interface{}, error) { - d, err := act.handle.Get(ctx, snapshot.generation, snapshot) + d, err := snapshot.awaitHandle(ctx, act.handle) if err != nil { return nil, nil, err } @@ -189,7 +189,7 @@ func execAll(ctx context.Context, snapshot *snapshot, actions []*actionHandle) ( for _, act := range actions { act := act g.Go(func() error { - v, err := act.handle.Get(ctx, snapshot.generation, snapshot) + v, err := snapshot.awaitHandle(ctx, act.handle) if err != nil { return err } diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index c17288c9e15..4680c6e7285 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -167,7 +167,7 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so // Create a handle for the result of type checking. experimentalKey := s.View().Options().ExperimentalPackageCacheKey key := computePackageKey(m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey) - handle, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} { + handle, release := s.store.Handle(key, func(ctx context.Context, arg interface{}) interface{} { // TODO(adonovan): eliminate use of arg with this handle. // (In all cases snapshot is equal to the enclosing s.) snapshot := arg.(*snapshot) @@ -286,7 +286,7 @@ func hashConfig(config *packages.Config) source.Hash { } func (ph *packageHandle) check(ctx context.Context, s *snapshot) (*pkg, error) { - v, err := ph.handle.Get(ctx, s.generation, s) + v, err := s.awaitHandle(ctx, ph.handle) if err != nil { return nil, err } @@ -302,8 +302,8 @@ func (ph *packageHandle) ID() string { return string(ph.m.ID) } -func (ph *packageHandle) cached(g *memoize.Generation) (*pkg, error) { - v := ph.handle.Cached(g) +func (ph *packageHandle) cached() (*pkg, error) { + v := ph.handle.Cached() if v == nil { return nil, fmt.Errorf("no cached type information for %s", ph.m.PkgPath) } diff --git a/internal/lsp/cache/imports.go b/internal/lsp/cache/imports.go index f333f700ddf..710a1f3407a 100644 --- a/internal/lsp/cache/imports.go +++ b/internal/lsp/cache/imports.go @@ -143,11 +143,12 @@ func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapsho // Take an extra reference to the snapshot so that its workspace directory // (if any) isn't destroyed while we're using it. - release := snapshot.generation.Acquire() + release := snapshot.Acquire() _, inv, cleanupInvocation, err := snapshot.goCommandInvocation(ctx, source.LoadWorkspace, &gocommand.Invocation{ WorkingDir: snapshot.view.rootURI.Filename(), }) if err != nil { + release() return nil, err } pe.WorkingDir = inv.WorkingDir diff --git a/internal/lsp/cache/mod.go b/internal/lsp/cache/mod.go index 1963feea5ac..79b3fd016d6 100644 --- a/internal/lsp/cache/mod.go +++ b/internal/lsp/cache/mod.go @@ -39,7 +39,7 @@ func (s *snapshot) ParseMod(ctx context.Context, fh source.FileHandle) (*source. // cache miss? if !hit { - handle, release := s.generation.GetHandle(fh.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} { + handle, release := s.store.Handle(fh.FileIdentity(), func(ctx context.Context, _ interface{}) interface{} { parsed, err := parseModImpl(ctx, fh) return parseModResult{parsed, err} }) @@ -51,7 +51,7 @@ func (s *snapshot) ParseMod(ctx context.Context, fh source.FileHandle) (*source. } // Await result. - v, err := entry.(*memoize.Handle).Get(ctx, s.generation, s) + v, err := s.awaitHandle(ctx, entry.(*memoize.Handle)) if err != nil { return nil, err } @@ -116,7 +116,7 @@ func (s *snapshot) ParseWork(ctx context.Context, fh source.FileHandle) (*source // cache miss? if !hit { - handle, release := s.generation.GetHandle(fh.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} { + handle, release := s.store.Handle(fh.FileIdentity(), func(ctx context.Context, _ interface{}) interface{} { parsed, err := parseWorkImpl(ctx, fh) return parseWorkResult{parsed, err} }) @@ -128,7 +128,7 @@ func (s *snapshot) ParseWork(ctx context.Context, fh source.FileHandle) (*source } // Await result. - v, err := entry.(*memoize.Handle).Get(ctx, s.generation, s) + v, err := s.awaitHandle(ctx, entry.(*memoize.Handle)) if err != nil { return nil, err } @@ -241,7 +241,7 @@ func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string mod: fh.FileIdentity(), view: s.view.rootURI.Filename(), } - handle, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} { + handle, release := s.store.Handle(key, func(ctx context.Context, arg interface{}) interface{} { why, err := modWhyImpl(ctx, arg.(*snapshot), fh) return modWhyResult{why, err} }) @@ -253,7 +253,7 @@ func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string } // Await result. - v, err := entry.(*memoize.Handle).Get(ctx, s.generation, s) + v, err := s.awaitHandle(ctx, entry.(*memoize.Handle)) if err != nil { return nil, err } diff --git a/internal/lsp/cache/mod_tidy.go b/internal/lsp/cache/mod_tidy.go index 84f369ef3d4..b59b4fd8832 100644 --- a/internal/lsp/cache/mod_tidy.go +++ b/internal/lsp/cache/mod_tidy.go @@ -29,9 +29,6 @@ import ( ) // modTidyImpl runs "go mod tidy" on a go.mod file, using a cache. -// -// REVIEWERS: what does it mean to cache an operation that has side effects? -// Or are we de-duplicating operations in flight on the same file? func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*source.TidiedModule, error) { uri := pm.URI if pm.File == nil { @@ -77,6 +74,8 @@ func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*sourc // There's little reason at to use the shared cache for mod // tidy (and mod why) as their key includes the view and session. + // Its only real value is to de-dup requests in flight, for + // which a singleflight in the View would suffice. // TODO(adonovan): use a simpler cache of promises that // is shared across snapshots. type modTidyKey struct { @@ -96,7 +95,7 @@ func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*sourc gomod: fh.FileIdentity(), env: hashEnv(s), } - handle, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} { + handle, release := s.store.Handle(key, func(ctx context.Context, arg interface{}) interface{} { tidied, err := modTidyImpl(ctx, arg.(*snapshot), fh, pm, workspacePkgs) return modTidyResult{tidied, err} }) @@ -108,7 +107,7 @@ func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*sourc } // Await result. - v, err := entry.(*memoize.Handle).Get(ctx, s.generation, s) + v, err := s.awaitHandle(ctx, entry.(*memoize.Handle)) if err != nil { return nil, err } diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go index c8c751f0b2e..ef588c60597 100644 --- a/internal/lsp/cache/parse.go +++ b/internal/lsp/cache/parse.go @@ -59,7 +59,7 @@ func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode sourc // cache miss? if !hit { - handle, release := s.generation.GetHandle(key, func(ctx context.Context, arg memoize.Arg) interface{} { + handle, release := s.store.Handle(key, func(ctx context.Context, arg interface{}) interface{} { parsed, err := parseGoImpl(ctx, arg.(*snapshot).FileSet(), fh, mode) return parseGoResult{parsed, err} }) @@ -77,7 +77,7 @@ func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode sourc } // Await result. - v, err := entry.(*memoize.Handle).Get(ctx, s.generation, s) + v, err := s.awaitHandle(ctx, entry.(*memoize.Handle)) if err != nil { return nil, err } @@ -93,7 +93,7 @@ func (s *snapshot) peekParseGoLocked(fh source.FileHandle, mode source.ParseMode if !hit { return nil, nil // no-one has requested this file } - v := entry.(*memoize.Handle).Cached(s.generation) + v := entry.(*memoize.Handle).Cached() if v == nil { return nil, nil // parsing is still in progress } @@ -147,12 +147,12 @@ func (s *snapshot) astCacheData(ctx context.Context, spkg source.Package, pos to // the search Pos.) // // A representative benchmark would help. - astHandle, release := s.generation.GetHandle(astCacheKey{pkgHandle.key, pgf.URI}, func(ctx context.Context, arg memoize.Arg) interface{} { + astHandle, release := s.store.Handle(astCacheKey{pkgHandle.key, pgf.URI}, func(ctx context.Context, arg interface{}) interface{} { return buildASTCache(pgf) }) defer release() - d, err := astHandle.Get(ctx, s.generation, s) + d, err := s.awaitHandle(ctx, astHandle) if err != nil { return nil, err } diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 9ea612a3ab4..a46b7928c78 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -231,7 +231,7 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, backgroundCtx: backgroundCtx, cancel: cancel, initializeOnce: &sync.Once{}, - generation: s.cache.store.Generation(generationName(v, 0)), + store: &s.cache.store, packages: newPackagesMap(), meta: &metadataGraph{}, files: newFilesMap(), @@ -254,12 +254,28 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, initCtx, initCancel := context.WithCancel(xcontext.Detach(ctx)) v.initCancelFirstAttempt = initCancel snapshot := v.snapshot - release := snapshot.generation.Acquire() + + // Acquire both references before the possibility + // of releasing either one, to avoid premature + // destruction if initialize returns quickly. + // + // TODO(adonovan): our reference counting discipline is not sound: + // the count is initially zero and incremented/decremented by + // acquire/release, but there is a race between object birth + // and the first call to acquire during which the snapshot may be + // destroyed. + // + // In most systems, an object is born with a count of 1 and + // destroyed by any decref that brings the count to zero. + // We should do that too. + release1 := snapshot.Acquire() + release2 := snapshot.Acquire() go func() { - defer release() + defer release2() snapshot.initialize(initCtx, true) }() - return v, snapshot, snapshot.generation.Acquire(), nil + + return v, snapshot, release1, nil } // View returns the view by name. @@ -539,6 +555,8 @@ func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes defer release() snapshots = append(snapshots, snapshot) } + // TODO(adonovan): opt: release lock here. + knownDirs := knownDirectories(ctx, snapshots) defer knownDirs.Destroy() diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index c228db9655b..fa71fbd8a80 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -14,6 +14,7 @@ import ( "go/types" "io" "io/ioutil" + "log" "os" "path/filepath" "regexp" @@ -22,6 +23,8 @@ import ( "strconv" "strings" "sync" + "sync/atomic" + "unsafe" "golang.org/x/mod/modfile" "golang.org/x/mod/module" @@ -42,16 +45,16 @@ import ( ) type snapshot struct { - memoize.Arg // allow as a memoize.Function arg - id uint64 view *View cancel func() backgroundCtx context.Context - // the cache generation that contains the data for this snapshot. - generation *memoize.Generation + store *memoize.Store // cache of handles shared by all snapshots + + refcount sync.WaitGroup // number of references + destroyedBy *string // atomically set to non-nil in Destroy once refcount = 0 // The snapshot's initialization state is controlled by the fields below. // @@ -148,6 +151,22 @@ type snapshot struct { unprocessedSubdirChanges []*fileChange } +var _ memoize.RefCounted = (*snapshot)(nil) // snapshots are reference-counted + +// Acquire prevents the snapshot from being destroyed until the returned function is called. +func (s *snapshot) Acquire() func() { + type uP = unsafe.Pointer + if destroyedBy := atomic.LoadPointer((*uP)(uP(&s.destroyedBy))); destroyedBy != nil { + log.Panicf("%d: acquire() after Destroy(%q)", s.id, *(*string)(destroyedBy)) + } + s.refcount.Add(1) + return s.refcount.Done +} + +func (s *snapshot) awaitHandle(ctx context.Context, h *memoize.Handle) (interface{}, error) { + return h.Get(ctx, s) +} + type packageKey struct { mode source.ParseMode id PackageID @@ -159,7 +178,16 @@ type actionKey struct { } func (s *snapshot) Destroy(destroyedBy string) { - s.generation.Destroy(destroyedBy) + // Wait for all leases to end before commencing destruction. + s.refcount.Wait() + + // Report bad state as a debugging aid. + // Not foolproof: another thread could acquire() at this moment. + type uP = unsafe.Pointer // looking forward to generics... + if old := atomic.SwapPointer((*uP)(uP(&s.destroyedBy)), uP(&destroyedBy)); old != nil { + log.Panicf("%d: Destroy(%q) after Destroy(%q)", s.id, destroyedBy, *(*string)(old)) + } + s.packages.Destroy() s.isActivePackageCache.Destroy() s.actions.Destroy() @@ -355,6 +383,7 @@ func (s *snapshot) RunGoCommands(ctx context.Context, allowNetwork bool, wd stri return true, modBytes, sumBytes, nil } +// TODO(adonovan): remove unused cleanup mechanism. func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.InvocationFlags, inv *gocommand.Invocation) (tmpURI span.URI, updatedInv *gocommand.Invocation, cleanup func(), err error) { s.view.optionsMu.Lock() allowModfileModificationOption := s.view.options.AllowModfileModifications @@ -1092,7 +1121,7 @@ func (s *snapshot) CachedImportPaths(ctx context.Context) (map[string]source.Pac results := map[string]source.Package{} s.packages.Range(func(key packageKey, ph *packageHandle) { - cachedPkg, err := ph.cached(s.generation) + cachedPkg, err := ph.cached() if err != nil { return } @@ -1645,10 +1674,6 @@ func inVendor(uri span.URI) bool { return strings.Contains(split[1], "/") } -func generationName(v *View, snapshotID uint64) string { - return fmt.Sprintf("v%v/%v", v.id, snapshotID) -} - // unappliedChanges is a file source that handles an uncloned snapshot. type unappliedChanges struct { originalSnapshot *snapshot @@ -1675,11 +1700,10 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC s.mu.Lock() defer s.mu.Unlock() - newGen := s.view.session.cache.store.Generation(generationName(s.view, s.id+1)) bgCtx, cancel := context.WithCancel(bgCtx) result := &snapshot{ id: s.id + 1, - generation: newGen, + store: s.store, view: s.view, backgroundCtx: bgCtx, cancel: cancel, diff --git a/internal/lsp/cache/symbols.go b/internal/lsp/cache/symbols.go index 4cbf8589025..b562d5bbdd8 100644 --- a/internal/lsp/cache/symbols.go +++ b/internal/lsp/cache/symbols.go @@ -35,7 +35,7 @@ func (s *snapshot) symbolize(ctx context.Context, fh source.FileHandle) ([]sourc if !hit { type symbolHandleKey source.Hash key := symbolHandleKey(fh.FileIdentity().Hash) - handle, release := s.generation.GetHandle(key, func(_ context.Context, arg memoize.Arg) interface{} { + handle, release := s.store.Handle(key, func(_ context.Context, arg interface{}) interface{} { symbols, err := symbolizeImpl(arg.(*snapshot), fh) return symbolizeResult{symbols, err} }) @@ -48,7 +48,7 @@ func (s *snapshot) symbolize(ctx context.Context, fh source.FileHandle) ([]sourc } // Await result. - v, err := entry.(*memoize.Handle).Get(ctx, s.generation, s) + v, err := s.awaitHandle(ctx, entry.(*memoize.Handle)) if err != nil { return nil, err } diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go index 1810f6e641d..f95c4759219 100644 --- a/internal/lsp/cache/view.go +++ b/internal/lsp/cache/view.go @@ -594,7 +594,7 @@ func (v *View) getSnapshot() (*snapshot, func()) { if v.snapshot == nil { panic("getSnapshot called after shutdown") } - return v.snapshot, v.snapshot.generation.Acquire() + return v.snapshot, v.snapshot.Acquire() } func (s *snapshot) initialize(ctx context.Context, firstAttempt bool) { @@ -734,7 +734,7 @@ func (v *View) invalidateContent(ctx context.Context, changes map[span.URI]*file v.snapshot = oldSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata) go oldSnapshot.Destroy("View.invalidateContent") - return v.snapshot, v.snapshot.generation.Acquire() + return v.snapshot, v.snapshot.Acquire() } func (s *Session) getWorkspaceInformation(ctx context.Context, folder span.URI, options *source.Options) (*workspaceInformation, error) { diff --git a/internal/lsp/command.go b/internal/lsp/command.go index 862af6088ec..cd4c7273101 100644 --- a/internal/lsp/command.go +++ b/internal/lsp/command.go @@ -691,7 +691,7 @@ func (c *commandHandler) GenerateGoplsMod(ctx context.Context, args command.URIA if err != nil { return fmt.Errorf("formatting mod file: %w", err) } - filename := filepath.Join(snapshot.View().Folder().Filename(), "gopls.mod") + filename := filepath.Join(v.Folder().Filename(), "gopls.mod") if err := ioutil.WriteFile(filename, content, 0644); err != nil { return fmt.Errorf("writing mod file: %w", err) } diff --git a/internal/lsp/general.go b/internal/lsp/general.go index 385a04a25fd..06633acb0c4 100644 --- a/internal/lsp/general.go +++ b/internal/lsp/general.go @@ -474,8 +474,7 @@ func (s *Server) beginFileRequest(ctx context.Context, pURI protocol.DocumentURI release() return nil, nil, false, func() {}, err } - kind := snapshot.View().FileKind(fh) - if expectKind != source.UnknownKind && kind != expectKind { + if expectKind != source.UnknownKind && view.FileKind(fh) != expectKind { // Wrong kind of file. Nothing to do. release() return nil, nil, false, func() {}, nil diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go index caf18505856..d7e212a121d 100644 --- a/internal/lsp/source/view.go +++ b/internal/lsp/source/view.go @@ -260,10 +260,14 @@ type View interface { // original one will be. SetOptions(context.Context, *Options) (View, error) - // Snapshot returns the current snapshot for the view. + // Snapshot returns the current snapshot for the view, and a + // release function that must be called when the Snapshot is + // no longer needed. Snapshot(ctx context.Context) (Snapshot, func()) - // Rebuild rebuilds the current view, replacing the original view in its session. + // Rebuild rebuilds the current view, replacing the original + // view in its session. It returns a Snapshot and a release + // function that must be called when the Snapshot is no longer needed. Rebuild(ctx context.Context) (Snapshot, func(), error) // IsGoPrivatePath reports whether target is a private import path, as identified @@ -348,7 +352,8 @@ type Session interface { // NewView creates a new View, returning it and its first snapshot. If a // non-empty tempWorkspace directory is provided, the View will record a copy // of its gopls workspace module in that directory, so that client tooling - // can execute in the same main module. + // can execute in the same main module. It returns a release + // function that must be called when the Snapshot is no longer needed. NewView(ctx context.Context, name string, folder span.URI, options *Options) (View, Snapshot, func(), error) // Cache returns the cache that created this session, for debugging only. @@ -372,6 +377,8 @@ type Session interface { // DidModifyFile reports a file modification to the session. It returns // the new snapshots after the modifications have been applied, paired with // the affected file URIs for those snapshots. + // On success, it returns a list of release functions that + // must be called when the snapshots are no longer needed. DidModifyFiles(ctx context.Context, changes []FileModification) (map[Snapshot][]span.URI, []func(), error) // ExpandModificationsToDirectories returns the set of changes with the diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index 4b84410d506..2db7945428f 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -5,13 +5,14 @@ // Package memoize supports memoizing the return values of functions with // idempotent results that are expensive to compute. // -// To use this package, build a store and use it to acquire handles with the -// Bind method. +// To use this package, create a Store, call its Handle method to +// acquire a handle to (aka a "promise" of) the future result of a +// function, and call Handle.Get to obtain the result. Get may block +// if the function has not finished (or started). package memoize import ( "context" - "flag" "fmt" "reflect" "runtime/trace" @@ -21,107 +22,44 @@ import ( "golang.org/x/tools/internal/xcontext" ) -var ( - panicOnDestroyed = flag.Bool("memoize_panic_on_destroyed", false, - "Panic when a destroyed generation is read rather than returning an error. "+ - "Panicking may make it easier to debug lifetime errors, especially when "+ - "used with GOTRACEBACK=crash to see all running goroutines.") -) - // Store binds keys to functions, returning handles that can be used to access // the functions results. type Store struct { handlesMu sync.Mutex // lock ordering: Store.handlesMu before Handle.mu handles map[interface{}]*Handle - // handles which are bound to generations for GC purposes. - // (It is the subset of values of 'handles' with trackGenerations enabled.) - boundHandles map[*Handle]struct{} -} - -// Generation creates a new Generation associated with s. Destroy must be -// called on the returned Generation once it is no longer in use. name is -// for debugging purposes only. -func (s *Store) Generation(name string) *Generation { - return &Generation{store: s, name: name} -} - -// A Generation is a logical point in time of the cache life-cycle. Cache -// entries associated with a Generation will not be removed until the -// Generation is destroyed. -type Generation struct { - // destroyed is 1 after the generation is destroyed. Atomic. - destroyed uint32 - store *Store - name string - // destroyedBy describes the caller that togged destroyed from 0 to 1. - destroyedBy string - // wg tracks the reference count of this generation. - wg sync.WaitGroup } -// Destroy waits for all operations referencing g to complete, then removes -// all references to g from cache entries. Cache entries that no longer -// reference any non-destroyed generation are removed. Destroy must be called -// exactly once for each generation, and destroyedBy describes the caller. -func (g *Generation) Destroy(destroyedBy string) { - g.wg.Wait() - - prevDestroyedBy := g.destroyedBy - g.destroyedBy = destroyedBy - if ok := atomic.CompareAndSwapUint32(&g.destroyed, 0, 1); !ok { - panic("Destroy on generation " + g.name + " already destroyed by " + prevDestroyedBy) - } - - g.store.handlesMu.Lock() - defer g.store.handlesMu.Unlock() - for h := range g.store.boundHandles { - h.mu.Lock() - if _, ok := h.generations[g]; ok { - delete(h.generations, g) // delete even if it's dead, in case of dangling references to the entry. - if len(h.generations) == 0 { - h.state = stateDestroyed - delete(g.store.handles, h.key) - if h.trackGenerations { - delete(g.store.boundHandles, h) - } - } - } - h.mu.Unlock() - } -} - -// Acquire creates a new reference to g, and returns a func to release that -// reference. -func (g *Generation) Acquire() func() { - destroyed := atomic.LoadUint32(&g.destroyed) - if destroyed != 0 { - panic("acquire on generation " + g.name + " destroyed by " + g.destroyedBy) - } - g.wg.Add(1) - return g.wg.Done +// A RefCounted is a value whose functional lifetime is determined by +// reference counting. +// +// Its Acquire method is called before the Function is invoked, and +// the corresponding release is called when the Function returns. +// Usually both events happen within a single call to Get, so Get +// would be fine with a "borrowed" reference, but if the context is +// cancelled, Get may return before the Function is complete, causing +// the argument to escape, and potential premature destruction of the +// value. For a reference-counted type, this requires a pair of +// increment/decrement operations to extend its life. +type RefCounted interface { + // Acquire prevents the value from being destroyed until the + // returned function is called. + Acquire() func() } -// Arg is a marker interface that can be embedded to indicate a type is -// intended for use as a Function argument. -type Arg interface{ memoizeArg() } - // Function is the type for functions that can be memoized. -// The result must be a pointer. -type Function func(ctx context.Context, arg Arg) interface{} +// +// If the arg is a RefCounted, its Acquire/Release operations are called. +type Function func(ctx context.Context, arg interface{}) interface{} type state int -// TODO(rfindley): remove stateDestroyed; Handles should not need to know -// whether or not they have been destroyed. -// -// TODO(rfindley): also consider removing stateIdle. Why create a handle if you +// TODO(rfindley): consider removing stateIdle. Why create a handle if you // aren't certain you're going to need its result? And if you know you need its // result, why wait to begin computing it? const ( stateIdle = iota stateRunning stateCompleted - stateDestroyed ) // Handle is returned from a store when a key is bound to a function. @@ -136,19 +74,10 @@ const ( // they decrement waiters. If it drops to zero, the inner context is cancelled, // computation is abandoned, and state resets to idle to start the process over // again. -// -// Handles may be tracked by generations, or directly reference counted, as -// determined by the trackGenerations field. See the field comments for more -// information about the differences between these two forms. -// -// TODO(rfindley): eliminate generational handles. type Handle struct { key interface{} mu sync.Mutex // lock ordering: Store.handlesMu before Handle.mu - // generations is the set of generations in which this handle is valid. - generations map[*Generation]struct{} - state state // done is set in running state, and closed when exiting it. done chan struct{} @@ -161,88 +90,49 @@ type Handle struct { // value is set in completed state. value interface{} - // If trackGenerations is set, this handle tracks generations in which it - // is valid, via the generations field. Otherwise, it is explicitly reference - // counted via the refCounter field. - trackGenerations bool - refCounter int32 + refcount int32 // accessed using atomic load/store } -// Bind returns a "generational" handle for the given key and function. +// Handle returns a reference-counted handle for the future result of +// calling the specified function. Calls to Handle with the same key +// return the same handle, and all calls to Handle.Get on a given +// handle return the same result but the function is called at most once. // -// Each call to bind will return the same handle if it is already bound. Bind -// will always return a valid handle, creating one if needed. Each key can -// only have one handle at any given time. The value will be held at least -// until the associated generation is destroyed. Bind does not cause the value -// to be generated. -// -// It is responsibility of the caller to call Inherit on the handler whenever -// it should still be accessible by a next generation. -func (g *Generation) Bind(key interface{}, function Function) *Handle { - return g.getHandle(key, function, true) -} - -// GetHandle returns a "reference-counted" handle for the given key -// and function with similar properties and behavior as Bind. Unlike -// Bind, it returns a release callback which must be called once the -// handle is no longer needed. -func (g *Generation) GetHandle(key interface{}, function Function) (*Handle, func()) { - h := g.getHandle(key, function, false) - store := g.store - release := func() { - // Acquire store.handlesMu before mutating refCounter - store.handlesMu.Lock() - defer store.handlesMu.Unlock() - - h.mu.Lock() - defer h.mu.Unlock() - - h.refCounter-- - if h.refCounter == 0 { - // Don't mark destroyed: for reference counted handles we can't know when - // they are no longer reachable from runnable goroutines. For example, - // gopls could have a current operation that is using a packageHandle. - // Destroying the handle here would cause that operation to hang. - delete(store.handles, h.key) - } - } - return h, release -} - -func (g *Generation) getHandle(key interface{}, function Function, trackGenerations bool) *Handle { - // panic early if the function is nil - // it would panic later anyway, but in a way that was much harder to debug +// The caller must call the returned function to decrement the +// handle's reference count when it is no longer needed. +func (store *Store) Handle(key interface{}, function Function) (*Handle, func()) { if function == nil { - panic("the function passed to bind must not be nil") + panic("nil function") } - if atomic.LoadUint32(&g.destroyed) != 0 { - panic("operation on generation " + g.name + " destroyed by " + g.destroyedBy) - } - g.store.handlesMu.Lock() - defer g.store.handlesMu.Unlock() - h, ok := g.store.handles[key] + + store.handlesMu.Lock() + h, ok := store.handles[key] if !ok { + // new handle h = &Handle{ - key: key, - function: function, - trackGenerations: trackGenerations, - } - if trackGenerations { - if g.store.boundHandles == nil { - g.store.boundHandles = map[*Handle]struct{}{} - } - h.generations = make(map[*Generation]struct{}, 1) - g.store.boundHandles[h] = struct{}{} + key: key, + function: function, + refcount: 1, } - if g.store.handles == nil { - g.store.handles = map[interface{}]*Handle{} + if store.handles == nil { + store.handles = map[interface{}]*Handle{} } - g.store.handles[key] = h + store.handles[key] = h + } else { + // existing handle + atomic.AddInt32(&h.refcount, 1) } + store.handlesMu.Unlock() - h.incrementRef(g) - return h + release := func() { + if atomic.AddInt32(&h.refcount, -1) == 0 { + store.handlesMu.Lock() + delete(store.handles, h.key) + store.handlesMu.Unlock() + } + } + return h, release } // Stats returns the number of each type of value in the store. @@ -278,53 +168,13 @@ func (s *Store) DebugOnlyIterate(f func(k, v interface{})) { } } -// Inherit makes h valid in generation g. It is concurrency-safe. -func (g *Generation) Inherit(h *Handle) { - if atomic.LoadUint32(&g.destroyed) != 0 { - panic("inherit on generation " + g.name + " destroyed by " + g.destroyedBy) - } - if !h.trackGenerations { - panic("called Inherit on handle not created by Generation.Bind") - } - - h.incrementRef(g) -} - -func (h *Handle) incrementRef(g *Generation) { - h.mu.Lock() - defer h.mu.Unlock() - - if h.state == stateDestroyed { - panic(fmt.Sprintf("inheriting destroyed handle %#v (type %T) into generation %v", h.key, h.key, g.name)) - } - - if h.trackGenerations { - h.generations[g] = struct{}{} - } else { - h.refCounter++ - } -} - -// hasRefLocked reports whether h is valid in generation g. h.mu must be held. -func (h *Handle) hasRefLocked(g *Generation) bool { - if !h.trackGenerations { - return true - } - - _, ok := h.generations[g] - return ok -} - // Cached returns the value associated with a handle. // // It will never cause the value to be generated. // It will return the cached value, if present. -func (h *Handle) Cached(g *Generation) interface{} { +func (h *Handle) Cached() interface{} { h.mu.Lock() defer h.mu.Unlock() - if !h.hasRefLocked(g) { - return nil - } if h.state == stateCompleted { return h.value } @@ -334,54 +184,39 @@ func (h *Handle) Cached(g *Generation) interface{} { // Get returns the value associated with a handle. // // If the value is not yet ready, the underlying function will be invoked. -// If ctx is cancelled, Get returns nil. -func (h *Handle) Get(ctx context.Context, g *Generation, arg Arg) (interface{}, error) { - release := g.Acquire() - defer release() - +// If ctx is cancelled, Get returns (nil, Canceled). +func (h *Handle) Get(ctx context.Context, arg interface{}) (interface{}, error) { if ctx.Err() != nil { return nil, ctx.Err() } h.mu.Lock() - if !h.hasRefLocked(g) { - h.mu.Unlock() - - err := fmt.Errorf("reading key %#v: generation %v is not known", h.key, g.name) - if *panicOnDestroyed && ctx.Err() != nil { - panic(err) - } - return nil, err - } switch h.state { case stateIdle: - return h.run(ctx, g, arg) + return h.run(ctx, arg) case stateRunning: return h.wait(ctx) case stateCompleted: defer h.mu.Unlock() return h.value, nil - case stateDestroyed: - h.mu.Unlock() - err := fmt.Errorf("Get on destroyed entry %#v (type %T) in generation %v", h.key, h.key, g.name) - if *panicOnDestroyed { - panic(err) - } - return nil, err default: panic("unknown state") } } // run starts h.function and returns the result. h.mu must be locked. -func (h *Handle) run(ctx context.Context, g *Generation, arg Arg) (interface{}, error) { +func (h *Handle) run(ctx context.Context, arg interface{}) (interface{}, error) { childCtx, cancel := context.WithCancel(xcontext.Detach(ctx)) h.cancel = cancel h.state = stateRunning h.done = make(chan struct{}) function := h.function // Read under the lock - // Make sure that the generation isn't destroyed while we're running in it. - release := g.Acquire() + // Make sure that the argument isn't destroyed while we're running in it. + release := func() {} + if rc, ok := arg.(RefCounted); ok { + release = rc.Acquire() + } + go func() { trace.WithRegion(childCtx, fmt.Sprintf("Handle.run %T", h.key), func() { defer release() diff --git a/internal/memoize/memoize_test.go b/internal/memoize/memoize_test.go index 48bb181173e..bde02bf6136 100644 --- a/internal/memoize/memoize_test.go +++ b/internal/memoize/memoize_test.go @@ -6,7 +6,6 @@ package memoize_test import ( "context" - "strings" "sync" "testing" "time" @@ -15,90 +14,53 @@ import ( ) func TestGet(t *testing.T) { - s := &memoize.Store{} - g := s.Generation("x") + var store memoize.Store evaled := 0 - h := g.Bind("key", func(context.Context, memoize.Arg) interface{} { + h, release := store.Handle("key", func(context.Context, interface{}) interface{} { evaled++ return "res" }) - expectGet(t, h, g, "res") - expectGet(t, h, g, "res") + defer release() + expectGet(t, h, "res") + expectGet(t, h, "res") if evaled != 1 { t.Errorf("got %v calls to function, wanted 1", evaled) } } -func expectGet(t *testing.T, h *memoize.Handle, g *memoize.Generation, wantV interface{}) { +func expectGet(t *testing.T, h *memoize.Handle, wantV interface{}) { t.Helper() - gotV, gotErr := h.Get(context.Background(), g, nil) + gotV, gotErr := h.Get(context.Background(), nil) if gotV != wantV || gotErr != nil { t.Fatalf("Get() = %v, %v, wanted %v, nil", gotV, gotErr, wantV) } } -func expectGetError(t *testing.T, h *memoize.Handle, g *memoize.Generation, substr string) { - gotV, gotErr := h.Get(context.Background(), g, nil) - if gotErr == nil || !strings.Contains(gotErr.Error(), substr) { - t.Fatalf("Get() = %v, %v, wanted err %q", gotV, gotErr, substr) - } -} - -func TestGenerations(t *testing.T) { - s := &memoize.Store{} - // Evaluate key in g1. - g1 := s.Generation("g1") - h1 := g1.Bind("key", func(context.Context, memoize.Arg) interface{} { return "res" }) - expectGet(t, h1, g1, "res") - - // Get key in g2. It should inherit the value from g1. - g2 := s.Generation("g2") - h2 := g2.Bind("key", func(context.Context, memoize.Arg) interface{} { - t.Fatal("h2 should not need evaluation") - return "error" - }) - expectGet(t, h2, g2, "res") - - // With g1 destroyed, g2 should still work. - g1.Destroy("TestGenerations") - expectGet(t, h2, g2, "res") - - // With all generations destroyed, key should be re-evaluated. - g2.Destroy("TestGenerations") - g3 := s.Generation("g3") - h3 := g3.Bind("key", func(context.Context, memoize.Arg) interface{} { return "new res" }) - expectGet(t, h3, g3, "new res") -} - func TestHandleRefCounting(t *testing.T) { - s := &memoize.Store{} - g1 := s.Generation("g1") + var store memoize.Store v1 := false v2 := false - h1, release1 := g1.GetHandle("key1", func(context.Context, memoize.Arg) interface{} { + h1, release1 := store.Handle("key1", func(context.Context, interface{}) interface{} { return &v1 }) - h2, release2 := g1.GetHandle("key2", func(context.Context, memoize.Arg) interface{} { + h2, release2 := store.Handle("key2", func(context.Context, interface{}) interface{} { return &v2 }) - expectGet(t, h1, g1, &v1) - expectGet(t, h2, g1, &v2) + expectGet(t, h1, &v1) + expectGet(t, h2, &v2) - g2 := s.Generation("g2") - expectGet(t, h1, g2, &v1) - g1.Destroy("by test") - expectGet(t, h2, g2, &v2) + expectGet(t, h1, &v1) + expectGet(t, h2, &v2) - h2Copy, release2Copy := g2.GetHandle("key2", func(context.Context, memoize.Arg) interface{} { + h2Copy, release2Copy := store.Handle("key2", func(context.Context, interface{}) interface{} { return &v1 }) if h2 != h2Copy { t.Error("NewHandle returned a new value while old is not destroyed yet") } - expectGet(t, h2Copy, g2, &v2) - g2.Destroy("by test") + expectGet(t, h2Copy, &v2) release2() if got, want := v2, false; got != want { @@ -110,27 +72,23 @@ func TestHandleRefCounting(t *testing.T) { } release1() - g3 := s.Generation("g3") - h2Copy, release2Copy = g3.GetHandle("key2", func(context.Context, memoize.Arg) interface{} { + h2Copy, release2Copy = store.Handle("key2", func(context.Context, interface{}) interface{} { return &v2 }) if h2 == h2Copy { t.Error("NewHandle returned previously destroyed value") } release2Copy() - g3.Destroy("by test") } func TestHandleDestroyedWhileRunning(t *testing.T) { - // Test that calls to Handle.Get return even if the handle is destroyed while - // running. + // Test that calls to Handle.Get return even if the handle is destroyed while running. - s := &memoize.Store{} - g := s.Generation("g") + var store memoize.Store c := make(chan int) var v int - h, release := g.GetHandle("key", func(ctx context.Context, _ memoize.Arg) interface{} { + h, release := store.Handle("key", func(ctx context.Context, _ interface{}) interface{} { <-c <-c if err := ctx.Err(); err != nil { @@ -147,7 +105,7 @@ func TestHandleDestroyedWhileRunning(t *testing.T) { var got interface{} var err error go func() { - got, err = h.Get(ctx, g, nil) + got, err = h.Get(ctx, nil) wg.Done() }() From d6c099e3c1a39214a49a5b7c5c5faa604650e581 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 5 Jul 2022 16:10:16 -0400 Subject: [PATCH 087/136] internal/memoize: document stateIdle, RefCounted ...now that I have understood why they are not in fact inessential. Change-Id: I1ab881a7d24cd71ee183bc8c6a1a058dbda641e2 Reviewed-on: https://go-review.googlesource.com/c/tools/+/416077 TryBot-Result: Gopher Robot Auto-Submit: Alan Donovan Run-TryBot: Alan Donovan gopls-CI: kokoro Reviewed-by: Robert Findley --- internal/lsp/cache/check.go | 4 +-- internal/memoize/memoize.go | 68 +++++++++++++++++++------------------ 2 files changed, 37 insertions(+), 35 deletions(-) diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index 4680c6e7285..a2599f930c2 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -167,9 +167,9 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so // Create a handle for the result of type checking. experimentalKey := s.View().Options().ExperimentalPackageCacheKey key := computePackageKey(m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey) + // TODO(adonovan): extract lambda into a standalone function to + // avoid implicit lexical dependencies. handle, release := s.store.Handle(key, func(ctx context.Context, arg interface{}) interface{} { - // TODO(adonovan): eliminate use of arg with this handle. - // (In all cases snapshot is equal to the enclosing s.) snapshot := arg.(*snapshot) // Start type checking of direct dependencies, diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index 2db7945428f..6d62ebb0d96 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -29,6 +29,21 @@ type Store struct { handles map[interface{}]*Handle } +// Function is the type of a function that can be memoized. +// +// If the arg is a RefCounted, its Acquire/Release operations are called. +// +// The argument must not materially affect the result of the function +// in ways that are not captured by the handle's key, since if +// Handle.Get is called twice concurrently, with the same (implicit) +// key but different arguments, the Function is called only once but +// its result must be suitable for both callers. +// +// The main purpose of the argument is to avoid the Function closure +// needing to retain large objects (in practice: the snapshot) in +// memory that can be supplied at call time by any caller. +type Function func(ctx context.Context, arg interface{}) interface{} + // A RefCounted is a value whose functional lifetime is determined by // reference counting. // @@ -46,38 +61,32 @@ type RefCounted interface { Acquire() func() } -// Function is the type for functions that can be memoized. -// -// If the arg is a RefCounted, its Acquire/Release operations are called. -type Function func(ctx context.Context, arg interface{}) interface{} - type state int -// TODO(rfindley): consider removing stateIdle. Why create a handle if you -// aren't certain you're going to need its result? And if you know you need its -// result, why wait to begin computing it? const ( - stateIdle = iota - stateRunning - stateCompleted + stateIdle = iota // newly constructed, or last waiter was cancelled + stateRunning // start was called and not cancelled + stateCompleted // function call ran to completion ) -// Handle is returned from a store when a key is bound to a function. -// It is then used to access the results of that function. -// -// A Handle starts out in idle state, waiting for something to demand its -// evaluation. It then transitions into running state. While it's running, -// waiters tracks the number of Get calls waiting for a result, and the done -// channel is used to notify waiters of the next state transition. Once the -// evaluation finishes, value is set, state changes to completed, and done -// is closed, unblocking waiters. Alternatively, as Get calls are cancelled, -// they decrement waiters. If it drops to zero, the inner context is cancelled, -// computation is abandoned, and state resets to idle to start the process over -// again. +// A Handle represents the future result of a call to a function. type Handle struct { key interface{} mu sync.Mutex // lock ordering: Store.handlesMu before Handle.mu + // A Handle starts out IDLE, waiting for something to demand + // its evaluation. It then transitions into RUNNING state. + // + // While RUNNING, waiters tracks the number of Get calls + // waiting for a result, and the done channel is used to + // notify waiters of the next state transition. Once + // evaluation finishes, value is set, state changes to + // COMPLETED, and done is closed, unblocking waiters. + // + // Alternatively, as Get calls are cancelled, they decrement + // waiters. If it drops to zero, the inner context is + // cancelled, computation is abandoned, and state resets to + // IDLE to start the process over again. state state // done is set in running state, and closed when exiting it. done chan struct{} @@ -155,16 +164,9 @@ func (s *Store) DebugOnlyIterate(f func(k, v interface{})) { defer s.handlesMu.Unlock() for k, h := range s.handles { - var v interface{} - h.mu.Lock() - if h.state == stateCompleted { - v = h.value - } - h.mu.Unlock() - if v == nil { - continue + if v := h.Cached(); v != nil { + f(k, v) } - f(k, v) } } @@ -241,7 +243,7 @@ func (h *Handle) run(ctx context.Context, arg interface{}) (interface{}, error) } h.value = v - h.function = nil + h.function = nil // aid GC h.state = stateCompleted close(h.done) }) From 42457a544a678826371e9ee4f874257a54314320 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Sun, 10 Jul 2022 12:41:17 -0400 Subject: [PATCH 088/136] internal/lsp/cache: don't pin a snapshot to view.importsState Whenever a view.importsState value is created, we use snapshot.goCommandInvocation to extract information used to run the go command in the imports.ProcessEnv. Before this CL, that process acquired the current snapshot (whatever it was), and held onto it for the purpose of sharing a workspace directory. As a consequence all the memory in that snapshot was pinned for the lifecycle of the importsState, which can be the entire editing session. This results in a memory leak as information in the session is invalidated. Fix this by creating a copy of the workspace directory to be owned by the importsState. Also: - Add some TODOs - Clean up some stale comments Fixes golang/go#53780 Change-Id: I2c55cc26b2d46c9320c6c7cd86b3e24971cd5073 Reviewed-on: https://go-review.googlesource.com/c/tools/+/416874 TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan gopls-CI: kokoro Run-TryBot: Robert Findley --- internal/lsp/cache/imports.go | 44 ++++++++++++++++++++++++---------- internal/lsp/cache/snapshot.go | 40 ++++++++++++++++++++++++++----- 2 files changed, 66 insertions(+), 18 deletions(-) diff --git a/internal/lsp/cache/imports.go b/internal/lsp/cache/imports.go index 710a1f3407a..7877c4f074d 100644 --- a/internal/lsp/cache/imports.go +++ b/internal/lsp/cache/imports.go @@ -7,6 +7,7 @@ package cache import ( "context" "fmt" + "os" "reflect" "strings" "sync" @@ -141,21 +142,20 @@ func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapsho pe.Logf = nil } - // Take an extra reference to the snapshot so that its workspace directory - // (if any) isn't destroyed while we're using it. - release := snapshot.Acquire() + // Extract invocation details from the snapshot to use with goimports. + // + // TODO(rfindley): refactor to extract the necessary invocation logic into + // separate functions. Using goCommandInvocation is unnecessarily indirect, + // and has led to memory leaks in the past, when the snapshot was + // unintentionally held past its lifetime. _, inv, cleanupInvocation, err := snapshot.goCommandInvocation(ctx, source.LoadWorkspace, &gocommand.Invocation{ WorkingDir: snapshot.view.rootURI.Filename(), }) if err != nil { - release() return nil, err } - pe.WorkingDir = inv.WorkingDir + pe.BuildFlags = inv.BuildFlags - pe.WorkingDir = inv.WorkingDir - pe.ModFile = inv.ModFile - pe.ModFlag = inv.ModFlag pe.Env = map[string]string{} for _, kv := range inv.Env { split := strings.SplitN(kv, "=", 2) @@ -164,11 +164,31 @@ func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapsho } pe.Env[split[0]] = split[1] } + // We don't actually use the invocation, so clean it up now. + cleanupInvocation() + + // If the snapshot uses a synthetic workspace directory, create a copy for + // the lifecycle of the importsState. + // + // Notably, we cannot use the snapshot invocation working directory, as that + // is tied to the lifecycle of the snapshot. + // + // Otherwise return a no-op cleanup function. + cleanup = func() {} + if snapshot.usesWorkspaceDir() { + tmpDir, err := makeWorkspaceDir(ctx, snapshot.workspace, snapshot) + if err != nil { + return nil, err + } + pe.WorkingDir = tmpDir + cleanup = func() { + os.RemoveAll(tmpDir) // ignore error + } + } else { + pe.WorkingDir = snapshot.view.rootURI.Filename() + } - return func() { - cleanupInvocation() - release() - }, nil + return cleanup, nil } func (s *importsState) refreshProcessEnv() { diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index fa71fbd8a80..fe56d67acdd 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -383,6 +383,11 @@ func (s *snapshot) RunGoCommands(ctx context.Context, allowNetwork bool, wd stri return true, modBytes, sumBytes, nil } +// goCommandInvocation populates inv with configuration for running go commands on the snapshot. +// +// TODO(rfindley): refactor this function to compose the required configuration +// explicitly, rather than implicitly deriving it from flags and inv. +// // TODO(adonovan): remove unused cleanup mechanism. func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.InvocationFlags, inv *gocommand.Invocation) (tmpURI span.URI, updatedInv *gocommand.Invocation, cleanup func(), err error) { s.view.optionsMu.Lock() @@ -412,7 +417,6 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat // - the working directory. // - the -mod flag // - the -modfile flag - // - the -workfile flag // // These are dependent on a number of factors: whether we need to run in a // synthetic workspace, whether flags are supported at the current go @@ -463,6 +467,9 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat } } + // TODO(rfindley): in the case of go.work mode, modURI is empty and we fall + // back on the default behavior of vendorEnabled with an empty modURI. Figure + // out what is correct here and implement it explicitly. vendorEnabled, err := s.vendorEnabled(ctx, modURI, modContent) if err != nil { return "", nil, cleanup, err @@ -498,13 +505,15 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat return "", nil, cleanup, source.ErrTmpModfileUnsupported } - // We should use -workfile if: - // 1. We're not actively trying to mutate a modfile. - // 2. We have an active go.work file. - // 3. We're using at least Go 1.18. + // We should use -modfile if: + // - the workspace mode supports it + // - we're using a go.work file on go1.18+, or we need a temp mod file (for + // example, if running go mod tidy in a go.work workspace) + // + // TODO(rfindley): this is very hard to follow. Refactor. useWorkFile := !needTempMod && s.workspace.moduleSource == goWorkWorkspace && s.view.goversion >= 18 if useWorkFile { - // TODO(#51215): build a temp workfile and set GOWORK in the environment. + // Since we're running in the workspace root, the go command will resolve GOWORK automatically. } else if useTempMod { if modURI == "" { return "", nil, cleanup, fmt.Errorf("no go.mod file found in %s", inv.WorkingDir) @@ -525,6 +534,25 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat return tmpURI, inv, cleanup, nil } +// usesWorkspaceDir reports whether the snapshot should use a synthetic +// workspace directory for running workspace go commands such as go list. +// +// TODO(rfindley): this logic is duplicated with goCommandInvocation. Clean up +// the latter, and deduplicate. +func (s *snapshot) usesWorkspaceDir() bool { + switch s.workspace.moduleSource { + case legacyWorkspace: + return false + case goWorkWorkspace: + if s.view.goversion >= 18 { + return false + } + // Before go 1.18, the Go command did not natively support go.work files, + // so we 'fake' them with a workspace module. + } + return true +} + func (s *snapshot) buildOverlay() map[string][]byte { s.mu.Lock() defer s.mu.Unlock() From 71dc5e295f34038a34374651070f7039a4682133 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 8 Jul 2022 16:01:25 -0400 Subject: [PATCH 089/136] internal/lsp/cache: make snapshot reference counting uniform Previously, snapshots were born with a reference count of zero. Although leases were created and returned by functions that return a snapshot, and correctly disposed of by the caller, the view did not hold a lease for its snapshot. The view's lease on the snapshot was implicit, and for this reason the view made explicit calls to Destroy when these implicit leases ended, which would then wait for all explicit leases to end. Now that the view holds an explicit lease to its snapshot, it is safe (as a follow-up change) to move the destroy logic into the call of release() that brings the refcount to zero. Also: - clarify that release functions should be called when and only when err is nil. - in View: remove unused View.cancel field. - in tempModFile: clarify tricky return + defer + named-result-var statement. - in DidModifyFiles: simplify type of releases from []func() to func(). - in Server.addFolders: fix reference leak (looks minor). Change-Id: Ibf61f4ce109e91060e2ccd854c32214b119f2f0a Reviewed-on: https://go-review.googlesource.com/c/tools/+/416875 Reviewed-by: Robert Findley Auto-Submit: Alan Donovan gopls-CI: kokoro Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot --- internal/lsp/cache/session.go | 45 +++++++++++------------ internal/lsp/cache/snapshot.go | 29 ++++++++++++--- internal/lsp/cache/view.go | 53 ++++++++++++++-------------- internal/lsp/general.go | 11 ++++-- internal/lsp/mod/mod_test.go | 2 +- internal/lsp/source/source_test.go | 2 +- internal/lsp/source/view.go | 6 ++-- internal/lsp/text_synchronization.go | 6 ++-- internal/lsp/workspace.go | 6 ++-- 9 files changed, 92 insertions(+), 68 deletions(-) diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index a46b7928c78..8c90c77ea8d 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -165,7 +165,7 @@ func (s *Session) NewView(ctx context.Context, name string, folder span.URI, opt } view, snapshot, release, err := s.createView(ctx, name, folder, options, 0) if err != nil { - return nil, nil, func() {}, err + return nil, nil, nil, err } s.views = append(s.views, view) // we always need to drop the view map @@ -249,33 +249,23 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, knownSubdirs: newKnownDirsSet(), workspace: workspace, } + // Save one reference in the view. + v.releaseSnapshot = v.snapshot.Acquire() // Initialize the view without blocking. initCtx, initCancel := context.WithCancel(xcontext.Detach(ctx)) v.initCancelFirstAttempt = initCancel snapshot := v.snapshot - // Acquire both references before the possibility - // of releasing either one, to avoid premature - // destruction if initialize returns quickly. - // - // TODO(adonovan): our reference counting discipline is not sound: - // the count is initially zero and incremented/decremented by - // acquire/release, but there is a race between object birth - // and the first call to acquire during which the snapshot may be - // destroyed. - // - // In most systems, an object is born with a count of 1 and - // destroyed by any decref that brings the count to zero. - // We should do that too. - release1 := snapshot.Acquire() - release2 := snapshot.Acquire() + // Pass a second reference to the background goroutine. + bgRelease := snapshot.Acquire() go func() { - defer release2() + defer bgRelease() snapshot.initialize(initCtx, true) }() - return v, snapshot, release1, nil + // Return a third reference to the caller. + return v, snapshot, snapshot.Acquire(), nil } // View returns the view by name. @@ -427,10 +417,8 @@ func (s *Session) dropView(ctx context.Context, v *View) (int, error) { } func (s *Session) ModifyFiles(ctx context.Context, changes []source.FileModification) error { - _, releases, err := s.DidModifyFiles(ctx, changes) - for _, release := range releases { - release() - } + _, release, err := s.DidModifyFiles(ctx, changes) + release() return err } @@ -445,7 +433,7 @@ type fileChange struct { isUnchanged bool } -func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModification) (map[source.Snapshot][]span.URI, []func(), error) { +func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModification) (map[source.Snapshot][]span.URI, func(), error) { s.viewMu.RLock() defer s.viewMu.RUnlock() views := make(map[*View]map[span.URI]*fileChange) @@ -526,6 +514,14 @@ func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModif viewToSnapshot[view] = snapshot } + // The release function is called when the + // returned URIs no longer need to be valid. + release := func() { + for _, release := range releases { + release() + } + } + // We only want to diagnose each changed file once, in the view to which // it "most" belongs. We do this by picking the best view for each URI, // and then aggregating the set of snapshots and their URIs (to avoid @@ -543,7 +539,8 @@ func (s *Session) DidModifyFiles(ctx context.Context, changes []source.FileModif } snapshotURIs[snapshot] = append(snapshotURIs[snapshot], mod.URI) } - return snapshotURIs, releases, nil + + return snapshotURIs, release, nil } func (s *Session) ExpandModificationsToDirectories(ctx context.Context, changes []source.FileModification) []source.FileModification { diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index fe56d67acdd..c2438cdf208 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -154,6 +154,14 @@ type snapshot struct { var _ memoize.RefCounted = (*snapshot)(nil) // snapshots are reference-counted // Acquire prevents the snapshot from being destroyed until the returned function is called. +// +// (s.Acquire().release() could instead be expressed as a pair of +// method calls s.IncRef(); s.DecRef(). The latter has the advantage +// that the DecRefs are fungible and don't require holding anything in +// addition to the refcounted object s, but paradoxically that is also +// an advantage of the current approach, which forces the caller to +// consider the release function at every stage, making a reference +// leak more obvious.) func (s *snapshot) Acquire() func() { type uP = unsafe.Pointer if destroyedBy := atomic.LoadPointer((*uP)(uP(&s.destroyedBy))); destroyedBy != nil { @@ -177,7 +185,14 @@ type actionKey struct { analyzer *analysis.Analyzer } -func (s *snapshot) Destroy(destroyedBy string) { +// destroy waits for all leases on the snapshot to expire then releases +// any resources (reference counts and files) associated with it. +// +// TODO(adonovan): move this logic into the release function returned +// by Acquire when the reference count becomes zero. (This would cost +// us the destroyedBy debug info, unless we add it to the signature of +// memoize.RefCounted.Acquire.) +func (s *snapshot) destroy(destroyedBy string) { // Wait for all leases to end before commencing destruction. s.refcount.Wait() @@ -388,7 +403,9 @@ func (s *snapshot) RunGoCommands(ctx context.Context, allowNetwork bool, wd stri // TODO(rfindley): refactor this function to compose the required configuration // explicitly, rather than implicitly deriving it from flags and inv. // -// TODO(adonovan): remove unused cleanup mechanism. +// TODO(adonovan): simplify cleanup mechanism. It's hard to see, but +// it used only after call to tempModFile. Clarify that it is only +// non-nil on success. func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.InvocationFlags, inv *gocommand.Invocation) (tmpURI span.URI, updatedInv *gocommand.Invocation, cleanup func(), err error) { s.view.optionsMu.Lock() allowModfileModificationOption := s.view.options.AllowModfileModifications @@ -1715,7 +1732,7 @@ func (ac *unappliedChanges) GetFile(ctx context.Context, uri span.URI) (source.F return ac.originalSnapshot.GetFile(ctx, uri) } -func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) *snapshot { +func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileChange, forceReloadMetadata bool) (*snapshot, func()) { ctx, done := event.Start(ctx, "snapshot.clone") defer done() @@ -1754,6 +1771,10 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC knownSubdirs: s.knownSubdirs.Clone(), workspace: newWorkspace, } + // Create a lease on the new snapshot. + // (Best to do this early in case the code below hides an + // incref/decref operation that might destroy it prematurely.) + release := result.Acquire() // Copy the set of unloadable files. for k, v := range s.unloadableFiles { @@ -2011,7 +2032,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } } result.dumpWorkspace("clone") - return result + return result, release } // invalidatedPackageIDs returns all packages invalidated by a change to uri. diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go index f95c4759219..8ed9ecd0d16 100644 --- a/internal/lsp/cache/view.go +++ b/internal/lsp/cache/view.go @@ -47,10 +47,6 @@ type View struct { // background contexts created for this view. baseCtx context.Context - // cancel is called when all action being performed by the current view - // should be stopped. - cancel context.CancelFunc - // name is the user visible name of this view. name string @@ -71,8 +67,9 @@ type View struct { // attempt at initialization. initCancelFirstAttempt context.CancelFunc - snapshotMu sync.Mutex - snapshot *snapshot // nil after shutdown has been called + snapshotMu sync.Mutex + snapshot *snapshot // nil after shutdown has been called + releaseSnapshot func() // called when snapshot is no longer needed // initialWorkspaceLoad is closed when the first workspace initialization has // completed. If we failed to load, we only retry if the go.mod file changes, @@ -161,9 +158,10 @@ func (f *fileBase) addURI(uri span.URI) int { func (v *View) ID() string { return v.id } -// tempModFile creates a temporary go.mod file based on the contents of the -// given go.mod file. It is the caller's responsibility to clean up the files -// when they are done using them. +// tempModFile creates a temporary go.mod file based on the contents +// of the given go.mod file. On success, it is the caller's +// responsibility to call the cleanup function when the file is no +// longer needed. func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanup func(), err error) { filenameHash := source.Hashf("%s", modFh.URI().Filename()) tmpMod, err := ioutil.TempFile("", fmt.Sprintf("go.%s.*.mod", filenameHash)) @@ -184,7 +182,9 @@ func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanu return "", nil, err } - cleanup = func() { + // We use a distinct name here to avoid subtlety around the fact + // that both 'return' and 'defer' update the "cleanup" variable. + doCleanup := func() { _ = os.Remove(tmpSumName) _ = os.Remove(tmpURI.Filename()) } @@ -192,7 +192,7 @@ func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanu // Be careful to clean up if we return an error from this function. defer func() { if err != nil { - cleanup() + doCleanup() cleanup = nil } }() @@ -200,11 +200,11 @@ func tempModFile(modFh source.FileHandle, gosum []byte) (tmpURI span.URI, cleanu // Create an analogous go.sum, if one exists. if gosum != nil { if err := ioutil.WriteFile(tmpSumName, gosum, 0655); err != nil { - return "", cleanup, err + return "", nil, err } } - return tmpURI, cleanup, nil + return tmpURI, doCleanup, nil } // Name returns the user visible name of this view. @@ -527,18 +527,15 @@ func (v *View) shutdown(ctx context.Context) { // Cancel the initial workspace load if it is still running. v.initCancelFirstAttempt() - v.mu.Lock() - if v.cancel != nil { - v.cancel() - v.cancel = nil - } - v.mu.Unlock() v.snapshotMu.Lock() if v.snapshot != nil { - go v.snapshot.Destroy("View.shutdown") + v.releaseSnapshot() + go v.snapshot.destroy("View.shutdown") v.snapshot = nil + v.releaseSnapshot = nil } v.snapshotMu.Unlock() + v.importsState.destroy() } @@ -718,22 +715,26 @@ func (v *View) invalidateContent(ctx context.Context, changes map[span.URI]*file v.snapshotMu.Lock() defer v.snapshotMu.Unlock() - if v.snapshot == nil { + prevSnapshot, prevReleaseSnapshot := v.snapshot, v.releaseSnapshot + + if prevSnapshot == nil { panic("invalidateContent called after shutdown") } // Cancel all still-running previous requests, since they would be // operating on stale data. - v.snapshot.cancel() + prevSnapshot.cancel() // Do not clone a snapshot until its view has finished initializing. - v.snapshot.AwaitInitialized(ctx) + prevSnapshot.AwaitInitialized(ctx) - oldSnapshot := v.snapshot + // Save one lease of the cloned snapshot in the view. + v.snapshot, v.releaseSnapshot = prevSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata) - v.snapshot = oldSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata) - go oldSnapshot.Destroy("View.invalidateContent") + prevReleaseSnapshot() + go prevSnapshot.destroy("View.invalidateContent") + // Return a second lease to the caller. return v.snapshot, v.snapshot.Acquire() } diff --git a/internal/lsp/general.go b/internal/lsp/general.go index 06633acb0c4..fbb9692ea82 100644 --- a/internal/lsp/general.go +++ b/internal/lsp/general.go @@ -249,17 +249,21 @@ func (s *Server) addFolders(ctx context.Context, folders []protocol.WorkspaceFol } work := s.progress.Start(ctx, "Setting up workspace", "Loading packages...", nil, nil) snapshot, release, err := s.addView(ctx, folder.Name, uri) - if err == source.ErrViewExists { - continue - } if err != nil { + if err == source.ErrViewExists { + continue + } viewErrors[uri] = err work.End(ctx, fmt.Sprintf("Error loading packages: %s", err)) continue } + // Inv: release() must be called once. + var swg sync.WaitGroup swg.Add(1) allFoldersWg.Add(1) + // TODO(adonovan): this looks fishy. Is AwaitInitialized + // supposed to be called once per folder? go func() { defer swg.Done() defer allFoldersWg.Done() @@ -271,6 +275,7 @@ func (s *Server) addFolders(ctx context.Context, folders []protocol.WorkspaceFol buf := &bytes.Buffer{} if err := snapshot.WriteEnv(ctx, buf); err != nil { viewErrors[uri] = err + release() continue } event.Log(ctx, buf.String()) diff --git a/internal/lsp/mod/mod_test.go b/internal/lsp/mod/mod_test.go index b2d257caeeb..09a182d16d7 100644 --- a/internal/lsp/mod/mod_test.go +++ b/internal/lsp/mod/mod_test.go @@ -46,10 +46,10 @@ func TestModfileRemainsUnchanged(t *testing.T) { t.Fatal(err) } _, _, release, err := session.NewView(ctx, "diagnostics_test", span.URIFromPath(folder), options) - release() if err != nil { t.Fatal(err) } + release() after, err := ioutil.ReadFile(filepath.Join(folder, "go.mod")) if err != nil { t.Fatal(err) diff --git a/internal/lsp/source/source_test.go b/internal/lsp/source/source_test.go index 9218f9ddc1a..8beb8a5dde0 100644 --- a/internal/lsp/source/source_test.go +++ b/internal/lsp/source/source_test.go @@ -55,10 +55,10 @@ func testSource(t *testing.T, datum *tests.Data) { tests.DefaultOptions(options) options.SetEnvSlice(datum.Config.Env) view, _, release, err := session.NewView(ctx, "source_test", span.URIFromPath(datum.Config.Dir), options) - release() if err != nil { t.Fatal(err) } + release() defer view.Shutdown(ctx) // Enable type error analyses for tests. diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go index d7e212a121d..004d830dd07 100644 --- a/internal/lsp/source/view.go +++ b/internal/lsp/source/view.go @@ -352,7 +352,7 @@ type Session interface { // NewView creates a new View, returning it and its first snapshot. If a // non-empty tempWorkspace directory is provided, the View will record a copy // of its gopls workspace module in that directory, so that client tooling - // can execute in the same main module. It returns a release + // can execute in the same main module. On success it also returns a release // function that must be called when the Snapshot is no longer needed. NewView(ctx context.Context, name string, folder span.URI, options *Options) (View, Snapshot, func(), error) @@ -377,9 +377,9 @@ type Session interface { // DidModifyFile reports a file modification to the session. It returns // the new snapshots after the modifications have been applied, paired with // the affected file URIs for those snapshots. - // On success, it returns a list of release functions that + // On success, it returns a release function that // must be called when the snapshots are no longer needed. - DidModifyFiles(ctx context.Context, changes []FileModification) (map[Snapshot][]span.URI, []func(), error) + DidModifyFiles(ctx context.Context, changes []FileModification) (map[Snapshot][]span.URI, func(), error) // ExpandModificationsToDirectories returns the set of changes with the // directory changes removed and expanded to include all of the files in diff --git a/internal/lsp/text_synchronization.go b/internal/lsp/text_synchronization.go index 3276a47bf99..514834b077c 100644 --- a/internal/lsp/text_synchronization.go +++ b/internal/lsp/text_synchronization.go @@ -290,7 +290,7 @@ func (s *Server) processModifications(ctx context.Context, modifications []sourc // to their files. modifications = s.session.ExpandModificationsToDirectories(ctx, modifications) - snapshots, releases, err := s.session.DidModifyFiles(ctx, modifications) + snapshots, release, err := s.session.DidModifyFiles(ctx, modifications) if err != nil { close(diagnoseDone) return err @@ -298,9 +298,7 @@ func (s *Server) processModifications(ctx context.Context, modifications []sourc go func() { s.diagnoseSnapshots(snapshots, onDisk) - for _, release := range releases { - release() - } + release() close(diagnoseDone) }() diff --git a/internal/lsp/workspace.go b/internal/lsp/workspace.go index a1f837e2309..b41406db5dc 100644 --- a/internal/lsp/workspace.go +++ b/internal/lsp/workspace.go @@ -26,16 +26,18 @@ func (s *Server) didChangeWorkspaceFolders(ctx context.Context, params *protocol return s.addFolders(ctx, event.Added) } +// addView returns a Snapshot and a release function that must be +// called when it is no longer needed. func (s *Server) addView(ctx context.Context, name string, uri span.URI) (source.Snapshot, func(), error) { s.stateMu.Lock() state := s.state s.stateMu.Unlock() if state < serverInitialized { - return nil, func() {}, fmt.Errorf("addView called before server initialized") + return nil, nil, fmt.Errorf("addView called before server initialized") } options := s.session.Options().Clone() if err := s.fetchConfig(ctx, name, uri, options); err != nil { - return nil, func() {}, err + return nil, nil, err } _, snapshot, release, err := s.session.NewView(ctx, name, uri, options) return snapshot, release, err From b6e495100ec74eb3127d0b0af75ec441e2979077 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Mon, 11 Jul 2022 21:58:26 +0000 Subject: [PATCH 090/136] Revert "internal/lsp/cache: don't pin a snapshot to view.importsState" This reverts commit 42457a544a678826371e9ee4f874257a54314320. Reason for revert: test flakes (golang/go#53796) Change-Id: I9d7061220b43f9de88060a0bba5c5635d92fe3d9 Reviewed-on: https://go-review.googlesource.com/c/tools/+/416879 Reviewed-by: Alan Donovan Auto-Submit: Robert Findley Run-TryBot: Robert Findley gopls-CI: kokoro TryBot-Result: Gopher Robot --- internal/lsp/cache/imports.go | 44 ++++++++++------------------------ internal/lsp/cache/snapshot.go | 40 +++++-------------------------- 2 files changed, 18 insertions(+), 66 deletions(-) diff --git a/internal/lsp/cache/imports.go b/internal/lsp/cache/imports.go index 7877c4f074d..710a1f3407a 100644 --- a/internal/lsp/cache/imports.go +++ b/internal/lsp/cache/imports.go @@ -7,7 +7,6 @@ package cache import ( "context" "fmt" - "os" "reflect" "strings" "sync" @@ -142,20 +141,21 @@ func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapsho pe.Logf = nil } - // Extract invocation details from the snapshot to use with goimports. - // - // TODO(rfindley): refactor to extract the necessary invocation logic into - // separate functions. Using goCommandInvocation is unnecessarily indirect, - // and has led to memory leaks in the past, when the snapshot was - // unintentionally held past its lifetime. + // Take an extra reference to the snapshot so that its workspace directory + // (if any) isn't destroyed while we're using it. + release := snapshot.Acquire() _, inv, cleanupInvocation, err := snapshot.goCommandInvocation(ctx, source.LoadWorkspace, &gocommand.Invocation{ WorkingDir: snapshot.view.rootURI.Filename(), }) if err != nil { + release() return nil, err } - + pe.WorkingDir = inv.WorkingDir pe.BuildFlags = inv.BuildFlags + pe.WorkingDir = inv.WorkingDir + pe.ModFile = inv.ModFile + pe.ModFlag = inv.ModFlag pe.Env = map[string]string{} for _, kv := range inv.Env { split := strings.SplitN(kv, "=", 2) @@ -164,31 +164,11 @@ func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapsho } pe.Env[split[0]] = split[1] } - // We don't actually use the invocation, so clean it up now. - cleanupInvocation() - - // If the snapshot uses a synthetic workspace directory, create a copy for - // the lifecycle of the importsState. - // - // Notably, we cannot use the snapshot invocation working directory, as that - // is tied to the lifecycle of the snapshot. - // - // Otherwise return a no-op cleanup function. - cleanup = func() {} - if snapshot.usesWorkspaceDir() { - tmpDir, err := makeWorkspaceDir(ctx, snapshot.workspace, snapshot) - if err != nil { - return nil, err - } - pe.WorkingDir = tmpDir - cleanup = func() { - os.RemoveAll(tmpDir) // ignore error - } - } else { - pe.WorkingDir = snapshot.view.rootURI.Filename() - } - return cleanup, nil + return func() { + cleanupInvocation() + release() + }, nil } func (s *importsState) refreshProcessEnv() { diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index c2438cdf208..a5cb74355e7 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -398,11 +398,6 @@ func (s *snapshot) RunGoCommands(ctx context.Context, allowNetwork bool, wd stri return true, modBytes, sumBytes, nil } -// goCommandInvocation populates inv with configuration for running go commands on the snapshot. -// -// TODO(rfindley): refactor this function to compose the required configuration -// explicitly, rather than implicitly deriving it from flags and inv. -// // TODO(adonovan): simplify cleanup mechanism. It's hard to see, but // it used only after call to tempModFile. Clarify that it is only // non-nil on success. @@ -434,6 +429,7 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat // - the working directory. // - the -mod flag // - the -modfile flag + // - the -workfile flag // // These are dependent on a number of factors: whether we need to run in a // synthetic workspace, whether flags are supported at the current go @@ -484,9 +480,6 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat } } - // TODO(rfindley): in the case of go.work mode, modURI is empty and we fall - // back on the default behavior of vendorEnabled with an empty modURI. Figure - // out what is correct here and implement it explicitly. vendorEnabled, err := s.vendorEnabled(ctx, modURI, modContent) if err != nil { return "", nil, cleanup, err @@ -522,15 +515,13 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat return "", nil, cleanup, source.ErrTmpModfileUnsupported } - // We should use -modfile if: - // - the workspace mode supports it - // - we're using a go.work file on go1.18+, or we need a temp mod file (for - // example, if running go mod tidy in a go.work workspace) - // - // TODO(rfindley): this is very hard to follow. Refactor. + // We should use -workfile if: + // 1. We're not actively trying to mutate a modfile. + // 2. We have an active go.work file. + // 3. We're using at least Go 1.18. useWorkFile := !needTempMod && s.workspace.moduleSource == goWorkWorkspace && s.view.goversion >= 18 if useWorkFile { - // Since we're running in the workspace root, the go command will resolve GOWORK automatically. + // TODO(#51215): build a temp workfile and set GOWORK in the environment. } else if useTempMod { if modURI == "" { return "", nil, cleanup, fmt.Errorf("no go.mod file found in %s", inv.WorkingDir) @@ -551,25 +542,6 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat return tmpURI, inv, cleanup, nil } -// usesWorkspaceDir reports whether the snapshot should use a synthetic -// workspace directory for running workspace go commands such as go list. -// -// TODO(rfindley): this logic is duplicated with goCommandInvocation. Clean up -// the latter, and deduplicate. -func (s *snapshot) usesWorkspaceDir() bool { - switch s.workspace.moduleSource { - case legacyWorkspace: - return false - case goWorkWorkspace: - if s.view.goversion >= 18 { - return false - } - // Before go 1.18, the Go command did not natively support go.work files, - // so we 'fake' them with a workspace module. - } - return true -} - func (s *snapshot) buildOverlay() map[string][]byte { s.mu.Lock() defer s.mu.Unlock() From bc957ec62f029f49308c06e257f078b6c74e9d2f Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Mon, 11 Jul 2022 20:42:38 -0400 Subject: [PATCH 091/136] internal/lsp/source: use token.File-agnostic positions to dedupe refs We were already using a token.File-agnostic position for object positions in our references algorithm, but still relied on token.Pos for identifying duplicate references. This breaks down when a file may have multiple parsed representations in different packages. While we should endeavor to eliminate duplicate parsing, our algorithms should not rely on this for correctness. Update the reference de-duplication to use the same position key as object search. For golang/go#53796 Change-Id: Ic2e6c23380ea4e6b2747e4e5b45d7bfa6e656f0f Reviewed-on: https://go-review.googlesource.com/c/tools/+/416881 Run-TryBot: Robert Findley TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan --- internal/lsp/source/implementation.go | 42 +++++++++++++++------------ internal/lsp/source/references.go | 12 ++++++-- 2 files changed, 33 insertions(+), 21 deletions(-) diff --git a/internal/lsp/source/implementation.go b/internal/lsp/source/implementation.go index 6666605a99a..39a9289d1d6 100644 --- a/internal/lsp/source/implementation.go +++ b/internal/lsp/source/implementation.go @@ -235,17 +235,23 @@ func qualifiedObjsAtProtocolPos(ctx context.Context, s Snapshot, uri span.URI, p if err != nil { return nil, err } - return qualifiedObjsAtLocation(ctx, s, objSearchKey{uri, offset}, map[objSearchKey]bool{}) + return qualifiedObjsAtLocation(ctx, s, positionKey{uri, offset}, map[positionKey]bool{}) } -type objSearchKey struct { +// A positionKey identifies a byte offset within a file (URI). +// +// When a file has been parsed multiple times in the same FileSet, +// there may be multiple token.Pos values denoting the same logical +// position. In such situations, a positionKey may be used for +// de-duplication. +type positionKey struct { uri span.URI offset int } // qualifiedObjsAtLocation finds all objects referenced at offset in uri, across // all packages in the snapshot. -func qualifiedObjsAtLocation(ctx context.Context, s Snapshot, key objSearchKey, seen map[objSearchKey]bool) ([]qualifiedObject, error) { +func qualifiedObjsAtLocation(ctx context.Context, s Snapshot, key positionKey, seen map[positionKey]bool) ([]qualifiedObject, error) { if seen[key] { return nil, nil } @@ -343,21 +349,8 @@ func qualifiedObjsAtLocation(ctx context.Context, s Snapshot, key objSearchKey, // is in another package, but this should be good enough to find all // uses. - pos := obj.Pos() - var uri span.URI - offset := -1 - for _, pgf := range pkg.CompiledGoFiles() { - if pgf.Tok.Base() <= int(pos) && int(pos) <= pgf.Tok.Base()+pgf.Tok.Size() { - var err error - offset, err = safetoken.Offset(pgf.Tok, pos) - if err != nil { - return nil, err - } - uri = pgf.URI - } - } - if offset >= 0 { - otherObjs, err := qualifiedObjsAtLocation(ctx, s, objSearchKey{uri, offset}, seen) + if key, found := packagePositionKey(pkg, obj.Pos()); found { + otherObjs, err := qualifiedObjsAtLocation(ctx, s, key, seen) if err != nil { return nil, err } @@ -380,6 +373,19 @@ func qualifiedObjsAtLocation(ctx context.Context, s Snapshot, key objSearchKey, return qualifiedObjs, nil } +// packagePositionKey finds the positionKey for the given pos. +// +// The second result reports whether the position was found. +func packagePositionKey(pkg Package, pos token.Pos) (positionKey, bool) { + for _, pgf := range pkg.CompiledGoFiles() { + offset, err := safetoken.Offset(pgf.Tok, pos) + if err == nil { + return positionKey{pgf.URI, offset}, true + } + } + return positionKey{}, false +} + // pathEnclosingObjNode returns the AST path to the object-defining // node associated with pos. "Object-defining" means either an // *ast.Ident mapped directly to a types.Object or an ast.Node mapped diff --git a/internal/lsp/source/references.go b/internal/lsp/source/references.go index a3d32a6d717..a1643fbec6c 100644 --- a/internal/lsp/source/references.go +++ b/internal/lsp/source/references.go @@ -16,6 +16,7 @@ import ( "strconv" "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/lsp/bug" "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/lsp/safetoken" "golang.org/x/tools/internal/span" @@ -127,7 +128,7 @@ func References(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Posit func references(ctx context.Context, snapshot Snapshot, qos []qualifiedObject, includeDeclaration, includeInterfaceRefs, includeEmbeddedRefs bool) ([]*ReferenceInfo, error) { var ( references []*ReferenceInfo - seen = make(map[token.Pos]bool) + seen = make(map[positionKey]bool) ) pos := qos[0].obj.Pos() @@ -189,10 +190,15 @@ func references(ctx context.Context, snapshot Snapshot, qos []qualifiedObject, i continue } } - if seen[ident.Pos()] { + key, found := packagePositionKey(pkg, ident.Pos()) + if !found { + bug.Reportf("ident %v (pos: %v) not found in package %v", ident.Name, ident.Pos(), pkg.Name()) continue } - seen[ident.Pos()] = true + if seen[key] { + continue + } + seen[key] = true rng, err := posToMappedRange(snapshot, pkg, ident.Pos(), ident.End()) if err != nil { return nil, err From a79ee0f0f02a90ce00bfd721fae9d2d154cb7568 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Tue, 12 Jul 2022 01:24:52 +0000 Subject: [PATCH 092/136] Revert "Revert "internal/lsp/cache: don't pin a snapshot to view.importsState" This reverts CL 416879, which itself was a revert of CL 416874. Reason for revert: failure is understood now, as described in https://github.com/golang/go/issues/53796#issuecomment-1181157704 and fixed in CL 416881. Change-Id: I1d6a4ae46fbb1bf78e2f23656de7885b439f43fb Reviewed-on: https://go-review.googlesource.com/c/tools/+/416882 Reviewed-by: Alan Donovan Run-TryBot: Robert Findley TryBot-Result: Gopher Robot --- internal/lsp/cache/imports.go | 44 ++++++++++++++++++++++++---------- internal/lsp/cache/snapshot.go | 40 ++++++++++++++++++++++++++----- 2 files changed, 66 insertions(+), 18 deletions(-) diff --git a/internal/lsp/cache/imports.go b/internal/lsp/cache/imports.go index 710a1f3407a..7877c4f074d 100644 --- a/internal/lsp/cache/imports.go +++ b/internal/lsp/cache/imports.go @@ -7,6 +7,7 @@ package cache import ( "context" "fmt" + "os" "reflect" "strings" "sync" @@ -141,21 +142,20 @@ func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapsho pe.Logf = nil } - // Take an extra reference to the snapshot so that its workspace directory - // (if any) isn't destroyed while we're using it. - release := snapshot.Acquire() + // Extract invocation details from the snapshot to use with goimports. + // + // TODO(rfindley): refactor to extract the necessary invocation logic into + // separate functions. Using goCommandInvocation is unnecessarily indirect, + // and has led to memory leaks in the past, when the snapshot was + // unintentionally held past its lifetime. _, inv, cleanupInvocation, err := snapshot.goCommandInvocation(ctx, source.LoadWorkspace, &gocommand.Invocation{ WorkingDir: snapshot.view.rootURI.Filename(), }) if err != nil { - release() return nil, err } - pe.WorkingDir = inv.WorkingDir + pe.BuildFlags = inv.BuildFlags - pe.WorkingDir = inv.WorkingDir - pe.ModFile = inv.ModFile - pe.ModFlag = inv.ModFlag pe.Env = map[string]string{} for _, kv := range inv.Env { split := strings.SplitN(kv, "=", 2) @@ -164,11 +164,31 @@ func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapsho } pe.Env[split[0]] = split[1] } + // We don't actually use the invocation, so clean it up now. + cleanupInvocation() + + // If the snapshot uses a synthetic workspace directory, create a copy for + // the lifecycle of the importsState. + // + // Notably, we cannot use the snapshot invocation working directory, as that + // is tied to the lifecycle of the snapshot. + // + // Otherwise return a no-op cleanup function. + cleanup = func() {} + if snapshot.usesWorkspaceDir() { + tmpDir, err := makeWorkspaceDir(ctx, snapshot.workspace, snapshot) + if err != nil { + return nil, err + } + pe.WorkingDir = tmpDir + cleanup = func() { + os.RemoveAll(tmpDir) // ignore error + } + } else { + pe.WorkingDir = snapshot.view.rootURI.Filename() + } - return func() { - cleanupInvocation() - release() - }, nil + return cleanup, nil } func (s *importsState) refreshProcessEnv() { diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index a5cb74355e7..c2438cdf208 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -398,6 +398,11 @@ func (s *snapshot) RunGoCommands(ctx context.Context, allowNetwork bool, wd stri return true, modBytes, sumBytes, nil } +// goCommandInvocation populates inv with configuration for running go commands on the snapshot. +// +// TODO(rfindley): refactor this function to compose the required configuration +// explicitly, rather than implicitly deriving it from flags and inv. +// // TODO(adonovan): simplify cleanup mechanism. It's hard to see, but // it used only after call to tempModFile. Clarify that it is only // non-nil on success. @@ -429,7 +434,6 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat // - the working directory. // - the -mod flag // - the -modfile flag - // - the -workfile flag // // These are dependent on a number of factors: whether we need to run in a // synthetic workspace, whether flags are supported at the current go @@ -480,6 +484,9 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat } } + // TODO(rfindley): in the case of go.work mode, modURI is empty and we fall + // back on the default behavior of vendorEnabled with an empty modURI. Figure + // out what is correct here and implement it explicitly. vendorEnabled, err := s.vendorEnabled(ctx, modURI, modContent) if err != nil { return "", nil, cleanup, err @@ -515,13 +522,15 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat return "", nil, cleanup, source.ErrTmpModfileUnsupported } - // We should use -workfile if: - // 1. We're not actively trying to mutate a modfile. - // 2. We have an active go.work file. - // 3. We're using at least Go 1.18. + // We should use -modfile if: + // - the workspace mode supports it + // - we're using a go.work file on go1.18+, or we need a temp mod file (for + // example, if running go mod tidy in a go.work workspace) + // + // TODO(rfindley): this is very hard to follow. Refactor. useWorkFile := !needTempMod && s.workspace.moduleSource == goWorkWorkspace && s.view.goversion >= 18 if useWorkFile { - // TODO(#51215): build a temp workfile and set GOWORK in the environment. + // Since we're running in the workspace root, the go command will resolve GOWORK automatically. } else if useTempMod { if modURI == "" { return "", nil, cleanup, fmt.Errorf("no go.mod file found in %s", inv.WorkingDir) @@ -542,6 +551,25 @@ func (s *snapshot) goCommandInvocation(ctx context.Context, flags source.Invocat return tmpURI, inv, cleanup, nil } +// usesWorkspaceDir reports whether the snapshot should use a synthetic +// workspace directory for running workspace go commands such as go list. +// +// TODO(rfindley): this logic is duplicated with goCommandInvocation. Clean up +// the latter, and deduplicate. +func (s *snapshot) usesWorkspaceDir() bool { + switch s.workspace.moduleSource { + case legacyWorkspace: + return false + case goWorkWorkspace: + if s.view.goversion >= 18 { + return false + } + // Before go 1.18, the Go command did not natively support go.work files, + // so we 'fake' them with a workspace module. + } + return true +} + func (s *snapshot) buildOverlay() map[string][]byte { s.mu.Lock() defer s.mu.Unlock() From a5adb0f2c2ab868ee0a7594bfd8432fc55545285 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Mon, 11 Jul 2022 16:55:34 -0400 Subject: [PATCH 093/136] internal/lsp/cache: use mod=readonly for process env funcs CL 416874 changed the logic of populateProcessEnv and incorrectly removed a write of ProcessEnv.ModFlag. We should explicitly set -mod=readonly. Change-Id: Ibacf3d4b4c0c978d65fde345741945d6136db159 Reviewed-on: https://go-review.googlesource.com/c/tools/+/416877 Reviewed-by: Alan Donovan TryBot-Result: Gopher Robot Run-TryBot: Robert Findley --- internal/lsp/cache/imports.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/lsp/cache/imports.go b/internal/lsp/cache/imports.go index 7877c4f074d..a08953db646 100644 --- a/internal/lsp/cache/imports.go +++ b/internal/lsp/cache/imports.go @@ -156,6 +156,7 @@ func (s *importsState) populateProcessEnv(ctx context.Context, snapshot *snapsho } pe.BuildFlags = inv.BuildFlags + pe.ModFlag = "readonly" // processEnv operations should not mutate the modfile pe.Env = map[string]string{} for _, kv := range inv.Env { split := strings.SplitN(kv, "=", 2) From 3db2cdc06058a734d8038db36aa889f80a7d8c5a Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Fri, 8 Jul 2022 15:06:14 -0400 Subject: [PATCH 094/136] internal/lsp: wait for ongoing work to complete during server shutdown Add a new WaitGroup to the View that allows waiting for all snapshot destroy operations to complete. This helps ensure that the server properly cleans up resources when shutting down, and lets us remove work-arounds in the gopls regtests intended to avoid races during shutdown. Also: - re-enable postfix completion tests that had to be disabled due to being too racy - rename the inlayHints regtest package to follow lower-cased naming conventions - add several TODOs Fixes golang/go#50707 Fixes golang/go#53735 Change-Id: If216763fb7a32f487f6116459e3dc45f4c903b8a Reviewed-on: https://go-review.googlesource.com/c/tools/+/416594 Run-TryBot: Robert Findley TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan --- .../completion/postfix_snippet_test.go | 6 +-- .../inlayhints_test.go} | 3 +- gopls/internal/vulncheck/command_test.go | 9 ++++- internal/lsp/cache/session.go | 13 ------- internal/lsp/cache/snapshot.go | 16 +++++++- internal/lsp/cache/view.go | 26 +++++++++++-- internal/lsp/general.go | 2 + internal/lsp/regtest/runner.go | 39 ++++++++----------- 8 files changed, 66 insertions(+), 48 deletions(-) rename gopls/internal/regtest/{inlayHints/inlayHints_test.go => inlayhints/inlayhints_test.go} (98%) diff --git a/gopls/internal/regtest/completion/postfix_snippet_test.go b/gopls/internal/regtest/completion/postfix_snippet_test.go index 7e595aaad1e..5a7ffb80d26 100644 --- a/gopls/internal/regtest/completion/postfix_snippet_test.go +++ b/gopls/internal/regtest/completion/postfix_snippet_test.go @@ -13,8 +13,6 @@ import ( ) func TestPostfixSnippetCompletion(t *testing.T) { - t.Skipf("skipping test due to suspected synchronization bug; see https://go.dev/issue/50707") - const mod = ` -- go.mod -- module mod.com @@ -400,7 +398,7 @@ func _() { before: ` package foo -func foo() []string { +func foo() []string { x := "test" return x.split }`, @@ -409,7 +407,7 @@ package foo import "strings" -func foo() []string { +func foo() []string { x := "test" return strings.Split(x, "$0") }`, diff --git a/gopls/internal/regtest/inlayHints/inlayHints_test.go b/gopls/internal/regtest/inlayhints/inlayhints_test.go similarity index 98% rename from gopls/internal/regtest/inlayHints/inlayHints_test.go rename to gopls/internal/regtest/inlayhints/inlayhints_test.go index 67931fbdc83..a7cbe65731d 100644 --- a/gopls/internal/regtest/inlayHints/inlayHints_test.go +++ b/gopls/internal/regtest/inlayhints/inlayhints_test.go @@ -1,7 +1,7 @@ // Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package inlayHint +package inlayhint import ( "testing" @@ -17,6 +17,7 @@ func TestMain(m *testing.M) { bug.PanicOnBugs = true Main(m, hooks.Options) } + func TestEnablingInlayHints(t *testing.T) { testenv.NeedsGo1Point(t, 14) // Test fails on 1.13. const workspace = ` diff --git a/gopls/internal/vulncheck/command_test.go b/gopls/internal/vulncheck/command_test.go index f6e2d1b7612..e7bf7085f88 100644 --- a/gopls/internal/vulncheck/command_test.go +++ b/gopls/internal/vulncheck/command_test.go @@ -309,8 +309,13 @@ func runTest(t *testing.T, workspaceData, proxyData string, test func(context.Co if err != nil { t.Fatal(err) } - defer release() - defer view.Shutdown(ctx) + + defer func() { + // The snapshot must be released before calling view.Shutdown, to avoid a + // deadlock. + release() + view.Shutdown(ctx) + }() test(ctx, snapshot) } diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 8c90c77ea8d..9c1505850c2 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -301,19 +301,6 @@ func (s *Session) viewOf(uri span.URI) (*View, error) { return s.viewMap[uri], nil } -func (s *Session) viewsOf(uri span.URI) []*View { - s.viewMu.RLock() - defer s.viewMu.RUnlock() - - var views []*View - for _, view := range s.views { - if source.InDir(view.folder.Filename(), uri.Filename()) { - views = append(views, view) - } - } - return views -} - func (s *Session) Views() []source.View { s.viewMu.RLock() defer s.viewMu.RUnlock() diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index c2438cdf208..a9dd1dfb935 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -187,11 +187,25 @@ type actionKey struct { // destroy waits for all leases on the snapshot to expire then releases // any resources (reference counts and files) associated with it. +// Snapshots being destroyed can be awaited using v.destroyWG. // // TODO(adonovan): move this logic into the release function returned // by Acquire when the reference count becomes zero. (This would cost // us the destroyedBy debug info, unless we add it to the signature of // memoize.RefCounted.Acquire.) +// +// The destroyedBy argument is used for debugging. +// +// v.snapshotMu must be held while calling this function, in order to preserve +// the invariants described by the the docstring for v.snapshot. +func (v *View) destroy(s *snapshot, destroyedBy string) { + v.snapshotWG.Add(1) + go func() { + defer v.snapshotWG.Done() + s.destroy(destroyedBy) + }() +} + func (s *snapshot) destroy(destroyedBy string) { // Wait for all leases to end before commencing destruction. s.refcount.Wait() @@ -1678,7 +1692,7 @@ func (s *snapshot) orphanedFiles() []source.VersionedFileHandle { } // If the URI doesn't belong to this view, then it's not in a workspace // package and should not be reloaded directly. - if !contains(s.view.session.viewsOf(uri), s.view) { + if !source.InDir(s.view.folder.Filename(), uri.Filename()) { return } // If the file is not open and is in a vendor directory, don't treat it diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go index 8ed9ecd0d16..0991797b8e4 100644 --- a/internal/lsp/cache/view.go +++ b/internal/lsp/cache/view.go @@ -67,9 +67,18 @@ type View struct { // attempt at initialization. initCancelFirstAttempt context.CancelFunc + // Track the latest snapshot via the snapshot field, guarded by snapshotMu. + // + // Invariant: whenever the snapshot field is overwritten, destroy(snapshot) + // is called on the previous (overwritten) snapshot while snapshotMu is held, + // incrementing snapshotWG. During shutdown the final snapshot is + // overwritten with nil and destroyed, guaranteeing that all observed + // snapshots have been destroyed via the destroy method, and snapshotWG may + // be waited upon to let these destroy operations complete. snapshotMu sync.Mutex - snapshot *snapshot // nil after shutdown has been called - releaseSnapshot func() // called when snapshot is no longer needed + snapshot *snapshot // latest snapshot + releaseSnapshot func() // called when snapshot is no longer needed + snapshotWG sync.WaitGroup // refcount for pending destroy operations // initialWorkspaceLoad is closed when the first workspace initialization has // completed. If we failed to load, we only retry if the go.mod file changes, @@ -125,6 +134,11 @@ type environmentVariables struct { gocache, gopath, goroot, goprivate, gomodcache, go111module string } +// workspaceMode holds various flags defining how the gopls workspace should +// behave. They may be derived from the environment, user configuration, or +// depend on the Go version. +// +// TODO(rfindley): remove workspace mode, in favor of explicit checks. type workspaceMode int const ( @@ -521,6 +535,9 @@ func (v *View) Shutdown(ctx context.Context) { v.session.removeView(ctx, v) } +// shutdown releases resources associated with the view, and waits for ongoing +// work to complete. +// // TODO(rFindley): probably some of this should also be one in View.Shutdown // above? func (v *View) shutdown(ctx context.Context) { @@ -530,13 +547,14 @@ func (v *View) shutdown(ctx context.Context) { v.snapshotMu.Lock() if v.snapshot != nil { v.releaseSnapshot() - go v.snapshot.destroy("View.shutdown") + v.destroy(v.snapshot, "View.shutdown") v.snapshot = nil v.releaseSnapshot = nil } v.snapshotMu.Unlock() v.importsState.destroy() + v.snapshotWG.Wait() } func (v *View) Session() *Session { @@ -732,7 +750,7 @@ func (v *View) invalidateContent(ctx context.Context, changes map[span.URI]*file v.snapshot, v.releaseSnapshot = prevSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata) prevReleaseSnapshot() - go prevSnapshot.destroy("View.invalidateContent") + v.destroy(prevSnapshot, "View.invalidateContent") // Return a second lease to the caller. return v.snapshot, v.snapshot.Acquire() diff --git a/internal/lsp/general.go b/internal/lsp/general.go index fbb9692ea82..8ea4d7f5fa3 100644 --- a/internal/lsp/general.go +++ b/internal/lsp/general.go @@ -487,6 +487,8 @@ func (s *Server) beginFileRequest(ctx context.Context, pURI protocol.DocumentURI return snapshot, fh, true, release, nil } +// shutdown implements the 'shutdown' LSP handler. It releases resources +// associated with the server and waits for all ongoing work to complete. func (s *Server) shutdown(ctx context.Context) error { s.stateMu.Lock() defer s.stateMu.Unlock() diff --git a/internal/lsp/regtest/runner.go b/internal/lsp/regtest/runner.go index bebec53c527..5726f88ea0c 100644 --- a/internal/lsp/regtest/runner.go +++ b/internal/lsp/regtest/runner.go @@ -66,9 +66,6 @@ type Runner struct { mu sync.Mutex ts *servertest.TCPServer socketDir string - // closers is a queue of clean-up functions to run at the end of the entire - // test suite. - closers []io.Closer } type runConfig struct { @@ -228,6 +225,8 @@ type TestFunc func(t *testing.T, env *Env) // modes. For each a test run, a new workspace is created containing the // un-txtared files specified by filedata. func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOption) { + // TODO(rfindley): this function has gotten overly complicated, and warrants + // refactoring. t.Helper() checkBuilder(t) @@ -259,6 +258,10 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio } t.Run(tc.name, func(t *testing.T) { + // TODO(rfindley): once jsonrpc2 shutdown is fixed, we should not leak + // goroutines in this test function. + // stacktest.NoLeak(t) + ctx := context.Background() if r.Timeout != 0 && !config.noDefaultTimeout { var cancel context.CancelFunc @@ -282,6 +285,7 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio if err := os.MkdirAll(rootDir, 0755); err != nil { t.Fatal(err) } + files := fake.UnpackTxt(files) if config.editor.WindowsLineEndings { for name, data := range files { @@ -294,13 +298,14 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio if err != nil { t.Fatal(err) } - // Deferring the closure of ws until the end of the entire test suite - // has, in testing, given the LSP server time to properly shutdown and - // release any file locks held in workspace, which is a problem on - // Windows. This may still be flaky however, and in the future we need a - // better solution to ensure that all Go processes started by gopls have - // exited before we clean up. - r.AddCloser(sandbox) + defer func() { + if !r.SkipCleanup { + if err := sandbox.Close(); err != nil { + pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) + t.Errorf("closing the sandbox: %v", err) + } + } + }() ss := tc.getServer(t, config.optionsHook) framer := jsonrpc2.NewRawStream ls := &loggingFramer{} @@ -322,6 +327,7 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio closeCtx, cancel := context.WithTimeout(xcontext.Detach(ctx), 5*time.Second) defer cancel() if err := env.Editor.Close(closeCtx); err != nil { + pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) t.Errorf("closing editor: %v", err) } }() @@ -493,14 +499,6 @@ func (r *Runner) getRemoteSocket(t *testing.T) string { return socket } -// AddCloser schedules a closer to be closed at the end of the test run. This -// is useful for Windows in particular, as -func (r *Runner) AddCloser(closer io.Closer) { - r.mu.Lock() - defer r.mu.Unlock() - r.closers = append(r.closers, closer) -} - // Close cleans up resource that have been allocated to this workspace. func (r *Runner) Close() error { r.mu.Lock() @@ -518,11 +516,6 @@ func (r *Runner) Close() error { } } if !r.SkipCleanup { - for _, closer := range r.closers { - if err := closer.Close(); err != nil { - errmsgs = append(errmsgs, err.Error()) - } - } if err := os.RemoveAll(r.TempDir); err != nil { errmsgs = append(errmsgs, err.Error()) } From 6e6f3131ec43d80282f625583006adee3f335b78 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Mon, 11 Jul 2022 14:01:19 -0400 Subject: [PATCH 095/136] internal/lsp/regtest: simplify, consolidate, and document settings Configuration of LSP settings within the regression test runner had become a bit of a grab-bag: some were configured via explicit fields on EditorConfig, some via the catch-all EditorConfig.Settings field, and others via custom RunOption implementations. Consolidate these fields as follows: - Add an EnvVars and Settings field, for configuring environment and LSP settings. - Eliminate the EditorConfig RunOption wrapper. RunOptions help build the config. - Remove RunOptions that just wrap a key-value settings pair. By definition settings are user-facing and cannot change without breaking compatibility. Therefore, our tests can and should set the exact string keys they are using. - Eliminate the unused SendPID option. Also clean up some logic to change configuration. For golang/go#39384 Change-Id: Id5d1614f139550cbc62db2bab1d1e1f545ad9393 Reviewed-on: https://go-review.googlesource.com/c/tools/+/416876 TryBot-Result: Gopher Robot gopls-CI: kokoro Run-TryBot: Robert Findley Reviewed-by: Alan Donovan --- gopls/internal/regtest/bench/bench_test.go | 15 +- .../regtest/codelens/codelens_test.go | 11 +- .../regtest/completion/completion_test.go | 2 +- gopls/internal/regtest/debug/debug_test.go | 6 +- .../regtest/diagnostics/diagnostics_test.go | 70 +++----- .../regtest/inlayhints/inlayhints_test.go | 6 +- .../regtest/misc/configuration_test.go | 14 +- .../internal/regtest/misc/definition_test.go | 4 +- .../internal/regtest/misc/formatting_test.go | 6 +- gopls/internal/regtest/misc/imports_test.go | 3 +- gopls/internal/regtest/misc/link_test.go | 4 +- .../regtest/misc/semantictokens_test.go | 4 +- gopls/internal/regtest/misc/settings_test.go | 6 +- gopls/internal/regtest/misc/shared_test.go | 2 +- .../internal/regtest/misc/staticcheck_test.go | 8 +- .../regtest/misc/workspace_symbol_test.go | 8 +- .../internal/regtest/modfile/modfile_test.go | 10 +- .../regtest/template/template_test.go | 30 ++-- gopls/internal/regtest/watch/watch_test.go | 20 +-- .../regtest/workspace/workspace_test.go | 28 ++- internal/lsp/fake/client.go | 2 +- internal/lsp/fake/editor.go | 166 ++++++++---------- internal/lsp/regtest/env.go | 6 +- internal/lsp/regtest/runner.go | 43 +++-- internal/lsp/regtest/wrappers.go | 28 +-- 25 files changed, 201 insertions(+), 301 deletions(-) diff --git a/gopls/internal/regtest/bench/bench_test.go b/gopls/internal/regtest/bench/bench_test.go index 22f157f4719..dfe41f65b1d 100644 --- a/gopls/internal/regtest/bench/bench_test.go +++ b/gopls/internal/regtest/bench/bench_test.go @@ -93,14 +93,14 @@ func TestBenchmarkSymbols(t *testing.T) { } opts := benchmarkOptions(symbolOptions.workdir) - conf := EditorConfig{} + settings := make(Settings) if symbolOptions.matcher != "" { - conf.SymbolMatcher = &symbolOptions.matcher + settings["symbolMatcher"] = symbolOptions.matcher } if symbolOptions.style != "" { - conf.SymbolStyle = &symbolOptions.style + settings["symbolStyle"] = symbolOptions.style } - opts = append(opts, conf) + opts = append(opts, settings) WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) { // We can't Await in this test, since we have disabled hooks. Instead, run @@ -200,9 +200,10 @@ func TestBenchmarkDidChange(t *testing.T) { // Always run it in isolation since it measures global heap usage. // // Kubernetes example: -// $ go test -run=TestPrintMemStats -didchange_dir=$HOME/w/kubernetes -// TotalAlloc: 5766 MB -// HeapAlloc: 1984 MB +// +// $ go test -run=TestPrintMemStats -didchange_dir=$HOME/w/kubernetes +// TotalAlloc: 5766 MB +// HeapAlloc: 1984 MB // // Both figures exhibit variance of less than 1%. func TestPrintMemStats(t *testing.T) { diff --git a/gopls/internal/regtest/codelens/codelens_test.go b/gopls/internal/regtest/codelens/codelens_test.go index a64f9c480ae..12a75754152 100644 --- a/gopls/internal/regtest/codelens/codelens_test.go +++ b/gopls/internal/regtest/codelens/codelens_test.go @@ -63,9 +63,7 @@ const ( for _, test := range tests { t.Run(test.label, func(t *testing.T) { WithOptions( - EditorConfig{ - CodeLenses: test.enabled, - }, + Settings{"codelenses": test.enabled}, ).Run(t, workspace, func(t *testing.T, env *Env) { env.OpenFile("lib.go") lens := env.CodeLens("lib.go") @@ -308,10 +306,11 @@ func main() { } ` WithOptions( - EditorConfig{ - CodeLenses: map[string]bool{ + Settings{ + "codelenses": map[string]bool{ "gc_details": true, - }}, + }, + }, ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.ExecuteCodeLensCommand("main.go", command.GCDetails) diff --git a/gopls/internal/regtest/completion/completion_test.go b/gopls/internal/regtest/completion/completion_test.go index 1ffb0000d3b..51a54c49737 100644 --- a/gopls/internal/regtest/completion/completion_test.go +++ b/gopls/internal/regtest/completion/completion_test.go @@ -529,7 +529,7 @@ func main() { } ` WithOptions( - EditorConfig{WindowsLineEndings: true}, + WindowsLineEndings(), ).Run(t, src, func(t *testing.T, env *Env) { // Trigger unimported completions for the example.com/blah package. env.OpenFile("main.go") diff --git a/gopls/internal/regtest/debug/debug_test.go b/gopls/internal/regtest/debug/debug_test.go index d60b3f780d7..d01d44ed980 100644 --- a/gopls/internal/regtest/debug/debug_test.go +++ b/gopls/internal/regtest/debug/debug_test.go @@ -21,11 +21,7 @@ func TestBugNotification(t *testing.T) { // server. WithOptions( Modes(Singleton), // must be in-process to receive the bug report below - EditorConfig{ - Settings: map[string]interface{}{ - "showBugReports": true, - }, - }, + Settings{"showBugReports": true}, ).Run(t, "", func(t *testing.T, env *Env) { const desc = "got a bug" bug.Report(desc, nil) diff --git a/gopls/internal/regtest/diagnostics/diagnostics_test.go b/gopls/internal/regtest/diagnostics/diagnostics_test.go index 6f5db4cd419..b9dc2d434b2 100644 --- a/gopls/internal/regtest/diagnostics/diagnostics_test.go +++ b/gopls/internal/regtest/diagnostics/diagnostics_test.go @@ -471,12 +471,11 @@ func _() { } ` WithOptions( - EditorConfig{ - Env: map[string]string{ - "GOPATH": "", - "GO111MODULE": "off", - }, - }).Run(t, files, func(t *testing.T, env *Env) { + EnvVars{ + "GOPATH": "", + "GO111MODULE": "off", + }, + ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.Await(env.DiagnosticAtRegexp("main.go", "fmt")) env.SaveBuffer("main.go") @@ -500,8 +499,9 @@ package x var X = 0 ` - editorConfig := EditorConfig{Env: map[string]string{"GOFLAGS": "-tags=foo"}} - WithOptions(editorConfig).Run(t, files, func(t *testing.T, env *Env) { + WithOptions( + EnvVars{"GOFLAGS": "-tags=foo"}, + ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") env.OrganizeImports("main.go") env.Await(EmptyDiagnostics("main.go")) @@ -573,9 +573,9 @@ hi mom ` for _, go111module := range []string{"on", "off", ""} { t.Run(fmt.Sprintf("GO111MODULE_%v", go111module), func(t *testing.T) { - WithOptions(EditorConfig{ - Env: map[string]string{"GO111MODULE": go111module}, - }).Run(t, files, func(t *testing.T, env *Env) { + WithOptions( + EnvVars{"GO111MODULE": go111module}, + ).Run(t, files, func(t *testing.T, env *Env) { env.Await( NoOutstandingWork(), ) @@ -605,11 +605,7 @@ func main() { ` WithOptions( InGOPATH(), - EditorConfig{ - Env: map[string]string{ - "GO111MODULE": "off", - }, - }, + EnvVars{"GO111MODULE": "off"}, ).Run(t, collision, func(t *testing.T, env *Env) { env.OpenFile("x/x.go") env.Await( @@ -1236,7 +1232,7 @@ func main() { }) WithOptions( WorkspaceFolders("a"), - LimitWorkspaceScope(), + Settings{"expandWorkspaceToModule": false}, ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("a/main.go") env.Await( @@ -1267,11 +1263,7 @@ func main() { ` WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "staticcheck": true, - }, - }, + Settings{"staticcheck": true}, ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") var d protocol.PublishDiagnosticsParams @@ -1381,9 +1373,7 @@ func b(c bytes.Buffer) { } ` WithOptions( - EditorConfig{ - AllExperiments: true, - }, + Settings{"allExperiments": true}, ).Run(t, mod, func(t *testing.T, env *Env) { // Confirm that the setting doesn't cause any warnings. env.Await(NoShowMessage()) @@ -1495,11 +1485,7 @@ package foo_ WithOptions( ProxyFiles(proxy), InGOPATH(), - EditorConfig{ - Env: map[string]string{ - "GO111MODULE": "off", - }, - }, + EnvVars{"GO111MODULE": "off"}, ).Run(t, contents, func(t *testing.T, env *Env) { // Simulate typing character by character. env.OpenFile("foo/foo_test.go") @@ -1698,9 +1684,7 @@ import ( t.Run("GOPATH", func(t *testing.T) { WithOptions( InGOPATH(), - EditorConfig{ - Env: map[string]string{"GO111MODULE": "off"}, - }, + EnvVars{"GO111MODULE": "off"}, Modes(Singleton), ).Run(t, mod, func(t *testing.T, env *Env) { env.Await( @@ -1729,11 +1713,7 @@ package b t.Run("GO111MODULE="+go111module, func(t *testing.T) { WithOptions( Modes(Singleton), - EditorConfig{ - Env: map[string]string{ - "GO111MODULE": go111module, - }, - }, + EnvVars{"GO111MODULE": go111module}, ).Run(t, modules, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") env.OpenFile("b/go.mod") @@ -1750,11 +1730,7 @@ package b t.Run("GOPATH_GO111MODULE_auto", func(t *testing.T) { WithOptions( Modes(Singleton), - EditorConfig{ - Env: map[string]string{ - "GO111MODULE": "auto", - }, - }, + EnvVars{"GO111MODULE": "auto"}, InGOPATH(), ).Run(t, modules, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") @@ -2026,9 +2002,7 @@ package a func Hello() {} ` WithOptions( - EditorConfig{ - ExperimentalUseInvalidMetadata: true, - }, + Settings{"experimentalUseInvalidMetadata": true}, Modes(Singleton), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("go.mod") @@ -2082,9 +2056,7 @@ package main func _() {} ` WithOptions( - EditorConfig{ - ExperimentalUseInvalidMetadata: true, - }, + Settings{"experimentalUseInvalidMetadata": true}, // ExperimentalWorkspaceModule has a different failure mode for this // case. Modes(Singleton), diff --git a/gopls/internal/regtest/inlayhints/inlayhints_test.go b/gopls/internal/regtest/inlayhints/inlayhints_test.go index a7cbe65731d..1ca1dfbc09e 100644 --- a/gopls/internal/regtest/inlayhints/inlayhints_test.go +++ b/gopls/internal/regtest/inlayhints/inlayhints_test.go @@ -56,10 +56,8 @@ const ( for _, test := range tests { t.Run(test.label, func(t *testing.T) { WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "hints": test.enabled, - }, + Settings{ + "hints": test.enabled, }, ).Run(t, workspace, func(t *testing.T, env *Env) { env.OpenFile("lib.go") diff --git a/gopls/internal/regtest/misc/configuration_test.go b/gopls/internal/regtest/misc/configuration_test.go index d9cce96a43e..9629a2382ee 100644 --- a/gopls/internal/regtest/misc/configuration_test.go +++ b/gopls/internal/regtest/misc/configuration_test.go @@ -9,7 +9,6 @@ import ( . "golang.org/x/tools/internal/lsp/regtest" - "golang.org/x/tools/internal/lsp/fake" "golang.org/x/tools/internal/testenv" ) @@ -40,12 +39,11 @@ var FooErr = errors.New("foo") env.DoneWithOpen(), NoDiagnostics("a/a.go"), ) - cfg := &fake.EditorConfig{} - *cfg = env.Editor.Config + cfg := env.Editor.Config() cfg.Settings = map[string]interface{}{ "staticcheck": true, } - env.ChangeConfiguration(t, cfg) + env.ChangeConfiguration(cfg) env.Await( DiagnosticAt("a/a.go", 5, 4), ) @@ -70,11 +68,9 @@ import "errors" var FooErr = errors.New("foo") ` - WithOptions(EditorConfig{ - Settings: map[string]interface{}{ - "staticcheck": true, - }, - }).Run(t, files, func(t *testing.T, env *Env) { + WithOptions( + Settings{"staticcheck": true}, + ).Run(t, files, func(t *testing.T, env *Env) { env.Await(ShownMessage("staticcheck is not supported")) }) } diff --git a/gopls/internal/regtest/misc/definition_test.go b/gopls/internal/regtest/misc/definition_test.go index 2f5a54820d0..b71cf231079 100644 --- a/gopls/internal/regtest/misc/definition_test.go +++ b/gopls/internal/regtest/misc/definition_test.go @@ -162,9 +162,7 @@ func main() {} } { t.Run(tt.importShortcut, func(t *testing.T) { WithOptions( - EditorConfig{ - ImportShortcut: tt.importShortcut, - }, + Settings{"importShortcut": tt.importShortcut}, ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("main.go") file, pos := env.GoToDefinition("main.go", env.RegexpSearch("main.go", `"fmt"`)) diff --git a/gopls/internal/regtest/misc/formatting_test.go b/gopls/internal/regtest/misc/formatting_test.go index 75d8f622458..71b8cadab40 100644 --- a/gopls/internal/regtest/misc/formatting_test.go +++ b/gopls/internal/regtest/misc/formatting_test.go @@ -352,10 +352,8 @@ const Bar = 42 ` WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "gofumpt": true, - }, + Settings{ + "gofumpt": true, }, ).Run(t, input, func(t *testing.T, env *Env) { env.OpenFile("foo.go") diff --git a/gopls/internal/regtest/misc/imports_test.go b/gopls/internal/regtest/misc/imports_test.go index 1250e78e776..c0e213e9aec 100644 --- a/gopls/internal/regtest/misc/imports_test.go +++ b/gopls/internal/regtest/misc/imports_test.go @@ -153,9 +153,8 @@ var _, _ = x.X, y.Y t.Fatal(err) } defer os.RemoveAll(modcache) - editorConfig := EditorConfig{Env: map[string]string{"GOMODCACHE": modcache}} WithOptions( - editorConfig, + EnvVars{"GOMODCACHE": modcache}, ProxyFiles(proxy), ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("main.go") diff --git a/gopls/internal/regtest/misc/link_test.go b/gopls/internal/regtest/misc/link_test.go index e84f6377eeb..1005de9a24f 100644 --- a/gopls/internal/regtest/misc/link_test.go +++ b/gopls/internal/regtest/misc/link_test.go @@ -75,7 +75,9 @@ const Hello = "Hello" } // Then change the environment to make these links private. - env.ChangeEnv(map[string]string{"GOPRIVATE": "import.test"}) + cfg := env.Editor.Config() + cfg.Env = map[string]string{"GOPRIVATE": "import.test"} + env.ChangeConfiguration(cfg) // Finally, verify that the links are gone. content, _ = env.Hover("main.go", env.RegexpSearch("main.go", "pkg.Hello")) diff --git a/gopls/internal/regtest/misc/semantictokens_test.go b/gopls/internal/regtest/misc/semantictokens_test.go index 79507876a64..dca2b8e7514 100644 --- a/gopls/internal/regtest/misc/semantictokens_test.go +++ b/gopls/internal/regtest/misc/semantictokens_test.go @@ -26,9 +26,7 @@ func main() {} ` WithOptions( Modes(Singleton), - EditorConfig{ - AllExperiments: true, - }, + Settings{"allExperiments": true}, ).Run(t, src, func(t *testing.T, env *Env) { params := &protocol.SemanticTokensParams{} const badURI = "http://foo" diff --git a/gopls/internal/regtest/misc/settings_test.go b/gopls/internal/regtest/misc/settings_test.go index 7704c3c043e..62d3d903160 100644 --- a/gopls/internal/regtest/misc/settings_test.go +++ b/gopls/internal/regtest/misc/settings_test.go @@ -24,11 +24,7 @@ func main() { ` WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "directoryFilters": []string{""}, - }, - }, + Settings{"directoryFilters": []string{""}}, ).Run(t, src, func(t *testing.T, env *Env) { // No need to do anything. Issue golang/go#51843 is triggered by the empty // directory filter above. diff --git a/gopls/internal/regtest/misc/shared_test.go b/gopls/internal/regtest/misc/shared_test.go index a6b0cd87ef1..6b5acd02f71 100644 --- a/gopls/internal/regtest/misc/shared_test.go +++ b/gopls/internal/regtest/misc/shared_test.go @@ -30,7 +30,7 @@ func runShared(t *testing.T, testFunc func(env1 *Env, env2 *Env)) { WithOptions(Modes(modes)).Run(t, sharedProgram, func(t *testing.T, env1 *Env) { // Create a second test session connected to the same workspace and server // as the first. - env2, cleanup := NewEnv(env1.Ctx, t, env1.Sandbox, env1.Server, env1.Editor.Config, true) + env2, cleanup := NewEnv(env1.Ctx, t, env1.Sandbox, env1.Server, env1.Editor.Config(), true) defer cleanup() env2.Await(InitialWorkspaceLoad) testFunc(env1, env2) diff --git a/gopls/internal/regtest/misc/staticcheck_test.go b/gopls/internal/regtest/misc/staticcheck_test.go index 94bb39903a5..6f1bda35068 100644 --- a/gopls/internal/regtest/misc/staticcheck_test.go +++ b/gopls/internal/regtest/misc/staticcheck_test.go @@ -60,11 +60,9 @@ func testGenerics[P *T, T any](p P) { var FooErr error = errors.New("foo") ` - WithOptions(EditorConfig{ - Settings: map[string]interface{}{ - "staticcheck": true, - }, - }).Run(t, files, func(t *testing.T, env *Env) { + WithOptions( + Settings{"staticcheck": true}, + ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") env.Await( env.DiagnosticAtRegexpFromSource("a/a.go", "sort.Slice", "sortslice"), diff --git a/gopls/internal/regtest/misc/workspace_symbol_test.go b/gopls/internal/regtest/misc/workspace_symbol_test.go index a21d47312dd..2dc3a1b10d0 100644 --- a/gopls/internal/regtest/misc/workspace_symbol_test.go +++ b/gopls/internal/regtest/misc/workspace_symbol_test.go @@ -72,9 +72,7 @@ const ( var symbolMatcher = string(source.SymbolFastFuzzy) WithOptions( - EditorConfig{ - SymbolMatcher: &symbolMatcher, - }, + Settings{"symbolMatcher": symbolMatcher}, ).Run(t, files, func(t *testing.T, env *Env) { want := []string{ "Foo", // prefer exact segment matches first @@ -105,9 +103,7 @@ const ( var symbolMatcher = string(source.SymbolFastFuzzy) WithOptions( - EditorConfig{ - SymbolMatcher: &symbolMatcher, - }, + Settings{"symbolMatcher": symbolMatcher}, ).Run(t, files, func(t *testing.T, env *Env) { compareSymbols(t, env.WorkspaceSymbol("ABC"), []string{"ABC", "AxxBxxCxx"}) compareSymbols(t, env.WorkspaceSymbol("'ABC"), []string{"ABC"}) diff --git a/gopls/internal/regtest/modfile/modfile_test.go b/gopls/internal/regtest/modfile/modfile_test.go index 93d43253044..c0bef833f44 100644 --- a/gopls/internal/regtest/modfile/modfile_test.go +++ b/gopls/internal/regtest/modfile/modfile_test.go @@ -740,11 +740,7 @@ func main() { } ` WithOptions( - EditorConfig{ - Env: map[string]string{ - "GOFLAGS": "-mod=readonly", - }, - }, + EnvVars{"GOFLAGS": "-mod=readonly"}, ProxyFiles(proxy), Modes(Singleton), ).Run(t, mod, func(t *testing.T, env *Env) { @@ -830,9 +826,7 @@ func main() { ` WithOptions( ProxyFiles(workspaceProxy), - EditorConfig{ - BuildFlags: []string{"-tags", "bob"}, - }, + Settings{"buildFlags": []string{"-tags", "bob"}}, ).Run(t, mod, func(t *testing.T, env *Env) { env.Await( env.DiagnosticAtRegexp("main.go", `"example.com/blah"`), diff --git a/gopls/internal/regtest/template/template_test.go b/gopls/internal/regtest/template/template_test.go index 9489e9bf7fe..0fdc3bda6ff 100644 --- a/gopls/internal/regtest/template/template_test.go +++ b/gopls/internal/regtest/template/template_test.go @@ -35,11 +35,9 @@ go 1.17 {{end}} ` WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "templateExtensions": []string{"tmpl"}, - "semanticTokens": true, - }, + Settings{ + "templateExtensions": []string{"tmpl"}, + "semanticTokens": true, }, ).Run(t, files, func(t *testing.T, env *Env) { var p protocol.SemanticTokensParams @@ -66,11 +64,9 @@ Hello {{}} <-- missing body {{end}} ` WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "templateExtensions": []string{"tmpl"}, - "semanticTokens": true, - }, + Settings{ + "templateExtensions": []string{"tmpl"}, + "semanticTokens": true, }, ).Run(t, files, func(t *testing.T, env *Env) { // TODO: can we move this diagnostic onto {{}}? @@ -112,11 +108,9 @@ B {{}} <-- missing body ` WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "templateExtensions": []string{"tmpl"}, - }, - DirectoryFilters: []string{"-b"}, + Settings{ + "directoryFilters": []string{"-b"}, + "templateExtensions": []string{"tmpl"}, }, ).Run(t, files, func(t *testing.T, env *Env) { env.Await( @@ -184,10 +178,8 @@ go 1.12 ` WithOptions( - EditorConfig{ - Settings: map[string]interface{}{ - "templateExtensions": []string{"tmpl", "gotmpl"}, - }, + Settings{ + "templateExtensions": []string{"tmpl", "gotmpl"}, }, ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("a.tmpl") diff --git a/gopls/internal/regtest/watch/watch_test.go b/gopls/internal/regtest/watch/watch_test.go index e66d08ab125..04414f6b744 100644 --- a/gopls/internal/regtest/watch/watch_test.go +++ b/gopls/internal/regtest/watch/watch_test.go @@ -389,9 +389,9 @@ func _() { package a ` t.Run("close then delete", func(t *testing.T) { - WithOptions(EditorConfig{ - VerboseOutput: true, - }).Run(t, pkg, func(t *testing.T, env *Env) { + WithOptions( + Settings{"verboseOutput": true}, + ).Run(t, pkg, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") env.OpenFile("a/a_unneeded.go") env.Await( @@ -424,7 +424,7 @@ package a t.Run("delete then close", func(t *testing.T) { WithOptions( - EditorConfig{VerboseOutput: true}, + Settings{"verboseOutput": true}, ).Run(t, pkg, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") env.OpenFile("a/a_unneeded.go") @@ -620,11 +620,7 @@ func main() { ` WithOptions( InGOPATH(), - EditorConfig{ - Env: map[string]string{ - "GO111MODULE": "auto", - }, - }, + EnvVars{"GO111MODULE": "auto"}, Modes(Experimental), // module is in a subdirectory ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("foo/main.go") @@ -663,11 +659,7 @@ func main() { ` WithOptions( InGOPATH(), - EditorConfig{ - Env: map[string]string{ - "GO111MODULE": "auto", - }, - }, + EnvVars{"GO111MODULE": "auto"}, ).Run(t, files, func(t *testing.T, env *Env) { env.OpenFile("foo/main.go") env.RemoveWorkspaceFile("foo/go.mod") diff --git a/gopls/internal/regtest/workspace/workspace_test.go b/gopls/internal/regtest/workspace/workspace_test.go index 9e4b85fced8..7eafaf191dd 100644 --- a/gopls/internal/regtest/workspace/workspace_test.go +++ b/gopls/internal/regtest/workspace/workspace_test.go @@ -1036,10 +1036,10 @@ package exclude const _ = Nonexistant ` - cfg := EditorConfig{ - DirectoryFilters: []string{"-exclude"}, - } - WithOptions(cfg).Run(t, files, func(t *testing.T, env *Env) { + + WithOptions( + Settings{"directoryFilters": []string{"-exclude"}}, + ).Run(t, files, func(t *testing.T, env *Env) { env.Await(NoDiagnostics("exclude/x.go")) }) } @@ -1064,10 +1064,9 @@ const _ = Nonexistant // should be ignored, since this is a non-workspace packag const X = 1 ` - cfg := EditorConfig{ - DirectoryFilters: []string{"-exclude"}, - } - WithOptions(cfg).Run(t, files, func(t *testing.T, env *Env) { + WithOptions( + Settings{"directoryFilters": []string{"-exclude"}}, + ).Run(t, files, func(t *testing.T, env *Env) { env.Await( NoDiagnostics("exclude/exclude.go"), // filtered out NoDiagnostics("include/include.go"), // successfully builds @@ -1114,10 +1113,11 @@ go 1.12 -- exclude.com@v1.0.0/exclude.go -- package exclude ` - cfg := EditorConfig{ - DirectoryFilters: []string{"-exclude"}, - } - WithOptions(cfg, Modes(Experimental), ProxyFiles(proxy)).Run(t, files, func(t *testing.T, env *Env) { + WithOptions( + Modes(Experimental), + ProxyFiles(proxy), + Settings{"directoryFilters": []string{"-exclude"}}, + ).Run(t, files, func(t *testing.T, env *Env) { env.Await(env.DiagnosticAtRegexp("include/include.go", `exclude.(X)`)) }) } @@ -1204,9 +1204,7 @@ go 1.12 package main ` WithOptions( - EditorConfig{Env: map[string]string{ - "GOPATH": filepath.FromSlash("$SANDBOX_WORKDIR/gopath"), - }}, + EnvVars{"GOPATH": filepath.FromSlash("$SANDBOX_WORKDIR/gopath")}, Modes(Singleton), ).Run(t, mod, func(t *testing.T, env *Env) { env.Await( diff --git a/internal/lsp/fake/client.go b/internal/lsp/fake/client.go index fdc67a6cc64..4c5f2a2e1bd 100644 --- a/internal/lsp/fake/client.go +++ b/internal/lsp/fake/client.go @@ -77,7 +77,7 @@ func (c *Client) Configuration(_ context.Context, p *protocol.ParamConfiguration if item.Section != "gopls" { continue } - results[i] = c.editor.configuration() + results[i] = c.editor.settings() } return results, nil } diff --git a/internal/lsp/fake/editor.go b/internal/lsp/fake/editor.go index 0fc99a04982..240e35cffa3 100644 --- a/internal/lsp/fake/editor.go +++ b/internal/lsp/fake/editor.go @@ -25,7 +25,6 @@ import ( // Editor is a fake editor client. It keeps track of client state and can be // used for writing LSP tests. type Editor struct { - Config EditorConfig // Server, client, and sandbox are concurrency safe and written only // at construction time, so do not require synchronization. @@ -35,13 +34,10 @@ type Editor struct { sandbox *Sandbox defaultEnv map[string]string - // Since this editor is intended just for testing, we use very coarse - // locking. - mu sync.Mutex - // Editor state. - buffers map[string]buffer - // Capabilities / Options - serverCapabilities protocol.ServerCapabilities + mu sync.Mutex // guards config, buffers, serverCapabilities + config EditorConfig // editor configuration + buffers map[string]buffer // open buffers + serverCapabilities protocol.ServerCapabilities // capabilities / options // Call metrics for the purpose of expectations. This is done in an ad-hoc // manner for now. Perhaps in the future we should do something more @@ -77,21 +73,11 @@ func (b buffer) text() string { // // The zero value for EditorConfig should correspond to its defaults. type EditorConfig struct { - Env map[string]string - BuildFlags []string - - // CodeLenses is a map defining whether codelens are enabled, keyed by the - // codeLens command. CodeLenses which are not present in this map are left in - // their default state. - CodeLenses map[string]bool - - // SymbolMatcher is the config associated with the "symbolMatcher" gopls - // config option. - SymbolMatcher, SymbolStyle *string - - // LimitWorkspaceScope is true if the user does not want to expand their - // workspace scope to the entire module. - LimitWorkspaceScope bool + // Env holds environment variables to apply on top of the default editor + // environment. When applying these variables, the special string + // $SANDBOX_WORKDIR is replaced by the absolute path to the sandbox working + // directory. + Env map[string]string // WorkspaceFolders is the workspace folders to configure on the LSP server, // relative to the sandbox workdir. @@ -101,14 +87,6 @@ type EditorConfig struct { // To explicitly send no workspace folders, use an empty (non-nil) slice. WorkspaceFolders []string - // AllExperiments sets the "allExperiments" configuration, which enables - // all of gopls's opt-in settings. - AllExperiments bool - - // Whether to send the current process ID, for testing data that is joined to - // the PID. This can only be set by one test. - SendPID bool - // Whether to edit files with windows line endings. WindowsLineEndings bool @@ -120,14 +98,8 @@ type EditorConfig struct { // "gotmpl" -> ".*tmpl" FileAssociations map[string]string - // Settings holds arbitrary additional settings to apply to the gopls config. - // TODO(rfindley): replace existing EditorConfig fields with Settings. + // Settings holds user-provided configuration for the LSP server. Settings map[string]interface{} - - ImportShortcut string - DirectoryFilters []string - VerboseOutput bool - ExperimentalUseInvalidMetadata bool } // NewEditor Creates a new Editor. @@ -136,7 +108,7 @@ func NewEditor(sandbox *Sandbox, config EditorConfig) *Editor { buffers: make(map[string]buffer), sandbox: sandbox, defaultEnv: sandbox.GoEnv(), - Config: config, + config: config, } } @@ -155,7 +127,7 @@ func (e *Editor) Connect(ctx context.Context, conn jsonrpc2.Conn, hooks ClientHo protocol.Handlers( protocol.ClientHandler(e.client, jsonrpc2.MethodNotFound))) - if err := e.initialize(ctx, e.Config.WorkspaceFolders); err != nil { + if err := e.initialize(ctx, e.config.WorkspaceFolders); err != nil { return nil, err } e.sandbox.Workdir.AddWatcher(e.onFileChanges) @@ -213,65 +185,47 @@ func (e *Editor) Client() *Client { return e.client } -func (e *Editor) overlayEnv() map[string]string { +// settings builds the settings map for use in LSP settings +// RPCs. +func (e *Editor) settings() map[string]interface{} { + e.mu.Lock() + defer e.mu.Unlock() env := make(map[string]string) for k, v := range e.defaultEnv { - v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", e.sandbox.Workdir.RootURI().SpanURI().Filename()) env[k] = v } - for k, v := range e.Config.Env { + for k, v := range e.config.Env { + env[k] = v + } + for k, v := range env { v = strings.ReplaceAll(v, "$SANDBOX_WORKDIR", e.sandbox.Workdir.RootURI().SpanURI().Filename()) env[k] = v } - return env -} -func (e *Editor) configuration() map[string]interface{} { - config := map[string]interface{}{ - "verboseWorkDoneProgress": true, - "env": e.overlayEnv(), - "expandWorkspaceToModule": !e.Config.LimitWorkspaceScope, - "completionBudget": "10s", - } + settings := map[string]interface{}{ + "env": env, - for k, v := range e.Config.Settings { - config[k] = v - } + // Use verbose progress reporting so that regtests can assert on + // asynchronous operations being completed (such as diagnosing a snapshot). + "verboseWorkDoneProgress": true, - if e.Config.BuildFlags != nil { - config["buildFlags"] = e.Config.BuildFlags - } - if e.Config.DirectoryFilters != nil { - config["directoryFilters"] = e.Config.DirectoryFilters - } - if e.Config.ExperimentalUseInvalidMetadata { - config["experimentalUseInvalidMetadata"] = true - } - if e.Config.CodeLenses != nil { - config["codelenses"] = e.Config.CodeLenses - } - if e.Config.SymbolMatcher != nil { - config["symbolMatcher"] = *e.Config.SymbolMatcher - } - if e.Config.SymbolStyle != nil { - config["symbolStyle"] = *e.Config.SymbolStyle - } - if e.Config.AllExperiments { - config["allExperiments"] = true - } + // Set a generous completion budget, so that tests don't flake because + // completions are too slow. + "completionBudget": "10s", - if e.Config.VerboseOutput { - config["verboseOutput"] = true + // Shorten the diagnostic delay to speed up test execution (else we'd add + // the default delay to each assertion about diagnostics) + "diagnosticsDelay": "10ms", } - if e.Config.ImportShortcut != "" { - config["importShortcut"] = e.Config.ImportShortcut + for k, v := range e.config.Settings { + if k == "env" { + panic("must not provide env via the EditorConfig.Settings field: use the EditorConfig.Env field instead") + } + settings[k] = v } - config["diagnosticsDelay"] = "10ms" - - // ExperimentalWorkspaceModule is only set as a mode, not a configuration. - return config + return settings } func (e *Editor) initialize(ctx context.Context, workspaceFolders []string) error { @@ -293,10 +247,7 @@ func (e *Editor) initialize(ctx context.Context, workspaceFolders []string) erro params.Capabilities.Window.WorkDoneProgress = true // TODO: set client capabilities params.Capabilities.TextDocument.Completion.CompletionItem.TagSupport.ValueSet = []protocol.CompletionItemTag{protocol.ComplDeprecated} - params.InitializationOptions = e.configuration() - if e.Config.SendPID { - params.ProcessID = int32(os.Getpid()) - } + params.InitializationOptions = e.settings() params.Capabilities.TextDocument.Completion.CompletionItem.SnippetSupport = true params.Capabilities.TextDocument.SemanticTokens.Requests.Full = true @@ -397,20 +348,21 @@ func (e *Editor) CreateBuffer(ctx context.Context, path, content string) error { } func (e *Editor) createBuffer(ctx context.Context, path string, dirty bool, content string) error { + e.mu.Lock() + defer e.mu.Unlock() + buf := buffer{ - windowsLineEndings: e.Config.WindowsLineEndings, + windowsLineEndings: e.config.WindowsLineEndings, version: 1, path: path, lines: lines(content), dirty: dirty, } - e.mu.Lock() - defer e.mu.Unlock() e.buffers[path] = buf item := protocol.TextDocumentItem{ URI: e.sandbox.Workdir.URI(buf.path), - LanguageID: e.languageID(buf.path), + LanguageID: languageID(buf.path, e.config.FileAssociations), Version: int32(buf.version), Text: buf.text(), } @@ -436,9 +388,11 @@ var defaultFileAssociations = map[string]*regexp.Regexp{ "gotmpl": regexp.MustCompile(`^.*tmpl$`), } -func (e *Editor) languageID(p string) string { +// languageID returns the language identifier for the path p given the user +// configured fileAssociations. +func languageID(p string, fileAssociations map[string]string) string { base := path.Base(p) - for lang, re := range e.Config.FileAssociations { + for lang, re := range fileAssociations { re := regexp.MustCompile(re) if re.MatchString(base) { return lang @@ -1205,6 +1159,30 @@ func (e *Editor) applyProtocolEdit(ctx context.Context, change protocol.TextDocu return e.EditBuffer(ctx, path, fakeEdits) } +// Config returns the current editor configuration. +func (e *Editor) Config() EditorConfig { + e.mu.Lock() + defer e.mu.Unlock() + return e.config +} + +// ChangeConfiguration sets the new editor configuration, and if applicable +// sends a didChangeConfiguration notification. +// +// An error is returned if the change notification failed to send. +func (e *Editor) ChangeConfiguration(ctx context.Context, newConfig EditorConfig) error { + e.mu.Lock() + e.config = newConfig + e.mu.Unlock() // don't hold e.mu during server calls + if e.Server != nil { + var params protocol.DidChangeConfigurationParams // empty: gopls ignores the Settings field + if err := e.Server.DidChangeConfiguration(ctx, ¶ms); err != nil { + return err + } + } + return nil +} + // CodeAction executes a codeAction request on the server. func (e *Editor) CodeAction(ctx context.Context, path string, rng *protocol.Range, diagnostics []protocol.Diagnostic) ([]protocol.CodeAction, error) { if e.Server == nil { diff --git a/internal/lsp/regtest/env.go b/internal/lsp/regtest/env.go index a37cbf66611..8960a0dc913 100644 --- a/internal/lsp/regtest/env.go +++ b/internal/lsp/regtest/env.go @@ -111,7 +111,11 @@ type condition struct { // NewEnv creates a new test environment using the given scratch environment // and gopls server. // -// The resulting func must be called to close the jsonrpc2 connection. +// The resulting cleanup func must be called to close the jsonrpc2 connection. +// +// TODO(rfindley): this function provides questionable value. Consider +// refactoring to move things like creating the server outside of this +// constructor. func NewEnv(ctx context.Context, tb testing.TB, sandbox *fake.Sandbox, ts servertest.Connector, editorConfig fake.EditorConfig, withHooks bool) (_ *Env, cleanup func()) { tb.Helper() diff --git a/internal/lsp/regtest/runner.go b/internal/lsp/regtest/runner.go index 5726f88ea0c..0640e452fb1 100644 --- a/internal/lsp/regtest/runner.go +++ b/internal/lsp/regtest/runner.go @@ -133,17 +133,27 @@ func Options(hook func(*source.Options)) RunOption { }) } -func SendPID() RunOption { +// WindowsLineEndings configures the editor to use windows line endings. +func WindowsLineEndings() RunOption { return optionSetter(func(opts *runConfig) { - opts.editor.SendPID = true + opts.editor.WindowsLineEndings = true }) } -// EditorConfig is a RunOption option that configured the regtest editor. -type EditorConfig fake.EditorConfig +// Settings is a RunOption that sets user-provided configuration for the LSP +// server. +// +// As a special case, the env setting must not be provided via Settings: use +// EnvVars instead. +type Settings map[string]interface{} -func (c EditorConfig) set(opts *runConfig) { - opts.editor = fake.EditorConfig(c) +func (s Settings) set(opts *runConfig) { + if opts.editor.Settings == nil { + opts.editor.Settings = make(map[string]interface{}) + } + for k, v := range s { + opts.editor.Settings[k] = v + } } // WorkspaceFolders configures the workdir-relative workspace folders to send @@ -160,6 +170,20 @@ func WorkspaceFolders(relFolders ...string) RunOption { }) } +// EnvVars sets environment variables for the LSP session. When applying these +// variables to the session, the special string $SANDBOX_WORKDIR is replaced by +// the absolute path to the sandbox working directory. +type EnvVars map[string]string + +func (e EnvVars) set(opts *runConfig) { + if opts.editor.Env == nil { + opts.editor.Env = make(map[string]string) + } + for k, v := range e { + opts.editor.Env[k] = v + } +} + // InGOPATH configures the workspace working directory to be GOPATH, rather // than a separate working directory for use with modules. func InGOPATH() RunOption { @@ -212,13 +236,6 @@ func GOPROXY(goproxy string) RunOption { }) } -// LimitWorkspaceScope sets the LimitWorkspaceScope configuration. -func LimitWorkspaceScope() RunOption { - return optionSetter(func(opts *runConfig) { - opts.editor.LimitWorkspaceScope = true - }) -} - type TestFunc func(t *testing.T, env *Env) // Run executes the test function in the default configured gopls execution diff --git a/internal/lsp/regtest/wrappers.go b/internal/lsp/regtest/wrappers.go index 96e2de96271..d8c080c0fec 100644 --- a/internal/lsp/regtest/wrappers.go +++ b/internal/lsp/regtest/wrappers.go @@ -7,7 +7,6 @@ package regtest import ( "encoding/json" "path" - "testing" "golang.org/x/tools/internal/lsp/command" "golang.org/x/tools/internal/lsp/fake" @@ -427,31 +426,10 @@ func (e *Env) CodeAction(path string, diagnostics []protocol.Diagnostic) []proto return actions } -func (e *Env) ChangeConfiguration(t *testing.T, config *fake.EditorConfig) { - e.Editor.Config = *config - if err := e.Editor.Server.DidChangeConfiguration(e.Ctx, &protocol.DidChangeConfigurationParams{ - // gopls currently ignores the Settings field - }); err != nil { - t.Fatal(err) - } -} - -// ChangeEnv modifies the editor environment and reconfigures the LSP client. -// TODO: extend this to "ChangeConfiguration", once we refactor the way editor -// configuration is defined. -func (e *Env) ChangeEnv(overlay map[string]string) { +// ChangeConfiguration updates the editor config, calling t.Fatal on any error. +func (e *Env) ChangeConfiguration(newConfig fake.EditorConfig) { e.T.Helper() - // TODO: to be correct, this should probably be synchronized, but right now - // configuration is only ever modified synchronously in a regtest, so this - // correctness can wait for the previously mentioned refactoring. - if e.Editor.Config.Env == nil { - e.Editor.Config.Env = make(map[string]string) - } - for k, v := range overlay { - e.Editor.Config.Env[k] = v - } - var params protocol.DidChangeConfigurationParams - if err := e.Editor.Server.DidChangeConfiguration(e.Ctx, ¶ms); err != nil { + if err := e.Editor.ChangeConfiguration(e.Ctx, newConfig); err != nil { e.T.Fatal(err) } } From 7c06b01db6ec68c2ffb216019a21bf59ac40cb35 Mon Sep 17 00:00:00 2001 From: Zvonimir Pavlinovic Date: Fri, 8 Jul 2022 12:55:50 -0700 Subject: [PATCH 096/136] go/callgraph/vta: remove interprocedural flows for receiver objects Suppose a call i.Foo(j) and suppose that the initial call graph resolves this call to just a.Foo(x). Here, i is an interface and a is concrete type. VTA then creates flows between j and x. However, it also tries to create flows between i and a. The latter flows are not needed. The flow from i to a will not be registered in the type flow graph as it does not make sense. The flow from a to i would bake in the information from the initial call graph which would defy the purpose of VTA. Note that the flow a -> i doesn't occur in practice as that flow is only created when a and i can alias. Change-Id: Ia4087651c72a14b94d83d07bb5e6d77603842362 Reviewed-on: https://go-review.googlesource.com/c/tools/+/416517 TryBot-Result: Gopher Robot Run-TryBot: Zvonimir Pavlinovic gopls-CI: kokoro Reviewed-by: Tim King --- go/callgraph/vta/graph.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/go/callgraph/vta/graph.go b/go/callgraph/vta/graph.go index 365d7a5b0f7..48547a52527 100644 --- a/go/callgraph/vta/graph.go +++ b/go/callgraph/vta/graph.go @@ -586,14 +586,14 @@ func addArgumentFlows(b *builder, c ssa.CallInstruction, f *ssa.Function) { return } cc := c.Common() - // When c is an unresolved method call (cc.Method != nil), cc.Value contains - // the receiver object rather than cc.Args[0]. - if cc.Method != nil { - b.addInFlowAliasEdges(b.nodeFromVal(f.Params[0]), b.nodeFromVal(cc.Value)) - } offset := 0 if cc.Method != nil { + // We don't add interprocedural flows for receiver objects. + // At a call site, the receiver object is interface while the + // callee object is concrete. The flow from interface to + // concrete type does not make sense. The flow other way around + // would bake in information from the initial call graph. offset = 1 } for i, v := range cc.Args { From 459e2b88fc869374454543f70b377f13599756f7 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Tue, 12 Jul 2022 17:13:32 -0400 Subject: [PATCH 097/136] internal/lsp/progress: actually close over Context in WorkDoneWriter CL 409936 eliminated cases where we close over a Context during progress reporting, except in one instance where it wasn't possible: the WorkDoneWriter that must implement the io.Writer interface. Unfortunately it contained a glaring bug that the ctx field was never set, and the regression test for progress reporting during `go generate` was disabled due to flakiness (golang/go#49901). Incidentally, the fundamental problem that CL 409936 addressed may also fix the flakiness of TestGenerateProgress. Fix the bug, and re-enable the test. Fixes golang/go#53781 Change-Id: Ideb99a5525667e45d2e41fcc5078699ba1e0f1a3 Reviewed-on: https://go-review.googlesource.com/c/tools/+/417115 gopls-CI: kokoro TryBot-Result: Gopher Robot Run-TryBot: Robert Findley Auto-Submit: Robert Findley Reviewed-by: Alan Donovan --- gopls/internal/regtest/misc/generate_test.go | 2 -- internal/lsp/command.go | 4 ++-- internal/lsp/progress/progress.go | 4 ++-- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/gopls/internal/regtest/misc/generate_test.go b/gopls/internal/regtest/misc/generate_test.go index 1dc22d737ba..44789514f40 100644 --- a/gopls/internal/regtest/misc/generate_test.go +++ b/gopls/internal/regtest/misc/generate_test.go @@ -16,8 +16,6 @@ import ( ) func TestGenerateProgress(t *testing.T) { - t.Skipf("skipping flaky test: https://golang.org/issue/49901") - const generatedWorkspace = ` -- go.mod -- module fake.test diff --git a/internal/lsp/command.go b/internal/lsp/command.go index cd4c7273101..c173ef23543 100644 --- a/internal/lsp/command.go +++ b/internal/lsp/command.go @@ -404,7 +404,7 @@ func (c *commandHandler) runTests(ctx context.Context, snapshot source.Snapshot, // create output buf := &bytes.Buffer{} ew := progress.NewEventWriter(ctx, "test") - out := io.MultiWriter(ew, progress.NewWorkDoneWriter(work), buf) + out := io.MultiWriter(ew, progress.NewWorkDoneWriter(ctx, work), buf) // Run `go test -run Func` on each test. var failedTests int @@ -487,7 +487,7 @@ func (c *commandHandler) Generate(ctx context.Context, args command.GenerateArgs Args: []string{"-x", pattern}, WorkingDir: args.Dir.SpanURI().Filename(), } - stderr := io.MultiWriter(er, progress.NewWorkDoneWriter(deps.work)) + stderr := io.MultiWriter(er, progress.NewWorkDoneWriter(ctx, deps.work)) if err := deps.snapshot.RunGoCommandPiped(ctx, source.Normal, inv, er, stderr); err != nil { return err } diff --git a/internal/lsp/progress/progress.go b/internal/lsp/progress/progress.go index d6794cf338b..8b0d1c6a224 100644 --- a/internal/lsp/progress/progress.go +++ b/internal/lsp/progress/progress.go @@ -260,8 +260,8 @@ type WorkDoneWriter struct { wd *WorkDone } -func NewWorkDoneWriter(wd *WorkDone) *WorkDoneWriter { - return &WorkDoneWriter{wd: wd} +func NewWorkDoneWriter(ctx context.Context, wd *WorkDone) *WorkDoneWriter { + return &WorkDoneWriter{ctx: ctx, wd: wd} } func (wdw *WorkDoneWriter) Write(p []byte) (n int, err error) { From 8730184efb0c83f831a94eb2456ac28d3511f8ae Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Tue, 12 Jul 2022 17:56:04 -0400 Subject: [PATCH 098/136] internal/lsp/fake: retry spurious file lock errors on windows Cleaning the regtest sandbox sometimes fails on windows with errors that may be spurious. Copy logic from the testing package to retry these errors. For golang/go#53819 Change-Id: I059fbb5e023af1cd52a5d231cd11a7c2ae72bc92 Reviewed-on: https://go-review.googlesource.com/c/tools/+/417117 Run-TryBot: Robert Findley Reviewed-by: Bryan Mills TryBot-Result: Gopher Robot gopls-CI: kokoro --- internal/lsp/fake/sandbox.go | 31 +++++++++++++++++++++++++++- internal/lsp/fake/workdir.go | 4 ++++ internal/lsp/fake/workdir_windows.go | 25 ++++++++++++++++++++-- 3 files changed, 57 insertions(+), 3 deletions(-) diff --git a/internal/lsp/fake/sandbox.go b/internal/lsp/fake/sandbox.go index b4395646bc6..72b01217ae7 100644 --- a/internal/lsp/fake/sandbox.go +++ b/internal/lsp/fake/sandbox.go @@ -9,9 +9,11 @@ import ( "errors" "fmt" "io/ioutil" + "math/rand" "os" "path/filepath" "strings" + "time" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/testenv" @@ -266,9 +268,36 @@ func (sb *Sandbox) Close() error { if sb.gopath != "" { goCleanErr = sb.RunGoCommand(context.Background(), "", "clean", []string{"-modcache"}, false) } - err := os.RemoveAll(sb.rootdir) + err := removeAll(sb.rootdir) if err != nil || goCleanErr != nil { return fmt.Errorf("error(s) cleaning sandbox: cleaning modcache: %v; removing files: %v", goCleanErr, err) } return nil } + +// removeAll is copied from GOROOT/src/testing/testing.go +// +// removeAll is like os.RemoveAll, but retries Windows "Access is denied." +// errors up to an arbitrary timeout. +// +// See https://go.dev/issue/50051 for additional context. +func removeAll(path string) error { + const arbitraryTimeout = 2 * time.Second + var ( + start time.Time + nextSleep = 1 * time.Millisecond + ) + for { + err := os.RemoveAll(path) + if !isWindowsRetryable(err) { + return err + } + if start.IsZero() { + start = time.Now() + } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout { + return err + } + time.Sleep(nextSleep) + nextSleep += time.Duration(rand.Int63n(int64(nextSleep))) + } +} diff --git a/internal/lsp/fake/workdir.go b/internal/lsp/fake/workdir.go index 734f5fd8197..0a72083bf2f 100644 --- a/internal/lsp/fake/workdir.go +++ b/internal/lsp/fake/workdir.go @@ -77,6 +77,10 @@ func WriteFileData(path string, content []byte, rel RelativeTo) error { // on Windows. var isWindowsErrLockViolation = func(err error) bool { return false } +// isWindowsRetryable reports whether err is a Windows error code +// that may be fixed by retrying a failed filesystem operation. +var isWindowsRetryable = func(err error) bool { return false } + // Workdir is a temporary working directory for tests. It exposes file // operations in terms of relative paths, and fakes file watching by triggering // events on file operations. diff --git a/internal/lsp/fake/workdir_windows.go b/internal/lsp/fake/workdir_windows.go index bcd18b7a226..fc5ad1a89af 100644 --- a/internal/lsp/fake/workdir_windows.go +++ b/internal/lsp/fake/workdir_windows.go @@ -10,10 +10,31 @@ import ( ) func init() { - // from https://docs.microsoft.com/en-us/windows/win32/debug/system-error-codes--0-499- - const ERROR_LOCK_VIOLATION syscall.Errno = 33 + // constants copied from GOROOT/src/internal/syscall/windows/syscall_windows.go + const ( + ERROR_SHARING_VIOLATION syscall.Errno = 32 + ERROR_LOCK_VIOLATION syscall.Errno = 33 + ) isWindowsErrLockViolation = func(err error) bool { return errors.Is(err, ERROR_LOCK_VIOLATION) } + + // Copied from GOROOT/src/testing/testing_windows.go + isWindowsRetryable = func(err error) bool { + for { + unwrapped := errors.Unwrap(err) + if unwrapped == nil { + break + } + err = unwrapped + } + if err == syscall.ERROR_ACCESS_DENIED { + return true // Observed in https://go.dev/issue/50051. + } + if err == ERROR_SHARING_VIOLATION { + return true // Observed in https://go.dev/issue/51442. + } + return false + } } From b230791f2dc806191a96162c83840236aef3990e Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 11 Jul 2022 14:39:26 -0400 Subject: [PATCH 099/136] internal/lsp/cache: move PosTo{Decl,Field} out of cache Before, these methods of the Source interface used to use a cache of buildASTCache, which built a Pos-keyed map for a whole file, but the necessary algorithm is essentially a binary search which is plenty fast enough to avoid the need for cache. This change implements that algorithm and moves both methods out of the interface into a single function, source.FindDeclAndField. -- I measured the duration of all calls to astCacheData (before) and FindDeclAndField (after) occurring within this command: $ go test -bench=TestBenchmarkConfiguredCompletion -v ./gopls/internal/regtest/bench -completion_workdir=$HOME/w/kubernetes -completion_file=../kubernetes/pkg/generated/openapi/zz_generated.openapi.go -completion_regexp=Get (The numbers reported by this benchmark are problematic, which is why I measured call times directly; see https://github.com/golang/go/issues/53798.) Results: before (n=4727) max = 21ms, 90% = 4.4ms, median = 19us after (n=6282) max = 2.3ms, 90% = 25us, median = 14us The increased number of calls to the function after the change is due to a longstanding bug in the benchmark: each iteration of the b.N loop doesn't do a fixed amount of work, it does as much as it can in 10s. Thus making the code faster simply causes the benchmark to spend the same amount of time on other parts of the program--such as the loop that calls FindDeclAndField. See https://go-review.googlesource.com/c/tools/+/221021 for background on the previous implementation. Change-Id: I745ecc4e65378fbe97f456228cafba84105b7e49 Reviewed-on: https://go-review.googlesource.com/c/tools/+/416880 Auto-Submit: Alan Donovan Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Robert Findley --- internal/lsp/cache/parse.go | 166 ----------------------- internal/lsp/source/completion/format.go | 5 +- internal/lsp/source/hover.go | 102 +++++++++++++- internal/lsp/source/identifier.go | 5 +- internal/lsp/source/signature_help.go | 5 +- internal/lsp/source/types_format.go | 4 +- internal/lsp/source/view.go | 15 -- 7 files changed, 103 insertions(+), 199 deletions(-) diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go index ef588c60597..11075195330 100644 --- a/internal/lsp/cache/parse.go +++ b/internal/lsp/cache/parse.go @@ -26,7 +26,6 @@ import ( "golang.org/x/tools/internal/lsp/safetoken" "golang.org/x/tools/internal/lsp/source" "golang.org/x/tools/internal/memoize" - "golang.org/x/tools/internal/span" ) // parseKey uniquely identifies a parsed Go file. @@ -107,171 +106,6 @@ type parseGoResult struct { err error } -type astCacheKey struct { - pkg packageHandleKey - uri span.URI -} - -func (s *snapshot) astCacheData(ctx context.Context, spkg source.Package, pos token.Pos) (*astCacheData, error) { - pkg := spkg.(*pkg) - pkgHandle := s.getPackage(pkg.m.ID, pkg.mode) - if pkgHandle == nil { - return nil, fmt.Errorf("could not reconstruct package handle for %v", pkg.m.ID) - } - tok := s.FileSet().File(pos) - if tok == nil { - return nil, fmt.Errorf("no file for pos %v", pos) - } - pgf, err := pkg.File(span.URIFromPath(tok.Name())) - if err != nil { - return nil, err - } - - // TODO(adonovan): opt: is it necessary to cache this operation? - // - // I expect the main benefit of CL 221021, which introduced it, - // was the replacement of PathEnclosingInterval, whose - // traversal is allocation-intensive, by buildASTCache. - // - // When run on the largest file in k8s, buildASTCache took - // ~6ms, but I expect most of that cost could be eliminated by - // using a stripped-down version of PathEnclosingInterval that - // cares only about syntax trees and not tokens. A stateless - // utility function that is cheap enough to call for each Pos - // would be a nice simplification. - // - // (The basic approach would be to use ast.Inspect, compare - // each node with the search Pos, and bail out as soon - // as a match is found. The pre-order hook would return false - // to avoid descending into any tree whose End is before - // the search Pos.) - // - // A representative benchmark would help. - astHandle, release := s.store.Handle(astCacheKey{pkgHandle.key, pgf.URI}, func(ctx context.Context, arg interface{}) interface{} { - return buildASTCache(pgf) - }) - defer release() - - d, err := s.awaitHandle(ctx, astHandle) - if err != nil { - return nil, err - } - return d.(*astCacheData), nil -} - -func (s *snapshot) PosToDecl(ctx context.Context, spkg source.Package, pos token.Pos) (ast.Decl, error) { - data, err := s.astCacheData(ctx, spkg, pos) - if err != nil { - return nil, err - } - return data.posToDecl[pos], nil -} - -func (s *snapshot) PosToField(ctx context.Context, spkg source.Package, pos token.Pos) (*ast.Field, error) { - data, err := s.astCacheData(ctx, spkg, pos) - if err != nil { - return nil, err - } - return data.posToField[pos], nil -} - -// An astCacheData maps object positions to syntax nodes for a single Go file. -type astCacheData struct { - // Maps the position of each name declared by a func/var/const/type - // Decl to the Decl node. Also maps the name and type of each field - // (broadly defined) to its innermost enclosing Decl. - posToDecl map[token.Pos]ast.Decl - - // Maps the position of the Name and Type of each field - // (broadly defined) to the Field node. - posToField map[token.Pos]*ast.Field -} - -// buildASTCache builds caches to aid in quickly going from the typed -// world to the syntactic world. -func buildASTCache(pgf *source.ParsedGoFile) *astCacheData { - var ( - // path contains all ancestors, including n. - path []ast.Node - // decls contains all ancestors that are decls. - decls []ast.Decl - ) - - data := &astCacheData{ - posToDecl: make(map[token.Pos]ast.Decl), - posToField: make(map[token.Pos]*ast.Field), - } - - ast.Inspect(pgf.File, func(n ast.Node) bool { - if n == nil { - lastP := path[len(path)-1] - path = path[:len(path)-1] - if len(decls) > 0 && decls[len(decls)-1] == lastP { - decls = decls[:len(decls)-1] - } - return false - } - - path = append(path, n) - - switch n := n.(type) { - case *ast.Field: - addField := func(f ast.Node) { - if f.Pos().IsValid() { - data.posToField[f.Pos()] = n - if len(decls) > 0 { - data.posToDecl[f.Pos()] = decls[len(decls)-1] - } - } - } - - // Add mapping for *ast.Field itself. This handles embedded - // fields which have no associated *ast.Ident name. - addField(n) - - // Add mapping for each field name since you can have - // multiple names for the same type expression. - for _, name := range n.Names { - addField(name) - } - - // Also map "X" in "...X" to the containing *ast.Field. This - // makes it easy to format variadic signature params - // properly. - if elips, ok := n.Type.(*ast.Ellipsis); ok && elips.Elt != nil { - addField(elips.Elt) - } - case *ast.FuncDecl: - decls = append(decls, n) - - if n.Name != nil && n.Name.Pos().IsValid() { - data.posToDecl[n.Name.Pos()] = n - } - case *ast.GenDecl: - decls = append(decls, n) - - for _, spec := range n.Specs { - switch spec := spec.(type) { - case *ast.TypeSpec: - if spec.Name != nil && spec.Name.Pos().IsValid() { - data.posToDecl[spec.Name.Pos()] = n - } - case *ast.ValueSpec: - for _, id := range spec.Names { - if id != nil && id.Pos().IsValid() { - data.posToDecl[id.Pos()] = n - } - } - } - } - } - - return true - }) - - return data -} - // parseGoImpl parses the Go source file whose content is provided by fh. func parseGoImpl(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mode source.ParseMode) (*source.ParsedGoFile, error) { ctx, done := event.Start(ctx, "cache.parseGo", tag.File.Of(fh.URI().Filename())) diff --git a/internal/lsp/source/completion/format.go b/internal/lsp/source/completion/format.go index 72498cc6874..d34cee22ad2 100644 --- a/internal/lsp/source/completion/format.go +++ b/internal/lsp/source/completion/format.go @@ -242,10 +242,7 @@ Suffixes: return item, nil } - decl, err := c.snapshot.PosToDecl(ctx, pkg, obj.Pos()) - if err != nil { - return CompletionItem{}, err - } + decl, _ := source.FindDeclAndField(pkg.GetSyntax(), obj.Pos()) // may be nil hover, err := source.FindHoverContext(ctx, c.snapshot, pkg, obj, decl, nil) if err != nil { event.Error(ctx, "failed to find Hover", err, tag.URI.Of(uri)) diff --git a/internal/lsp/source/hover.go b/internal/lsp/source/hover.go index 58ea9696203..b2524c499e4 100644 --- a/internal/lsp/source/hover.go +++ b/internal/lsp/source/hover.go @@ -610,11 +610,7 @@ func FindHoverContext(ctx context.Context, s Snapshot, pkg Package, obj types.Ob break } - field, err := s.PosToField(ctx, pkg, obj.Pos()) - if err != nil { - return nil, err - } - + _, field := FindDeclAndField(pkg.GetSyntax(), obj.Pos()) if field != nil { comment := field.Doc if comment.Text() == "" { @@ -876,3 +872,99 @@ func anyNonEmpty(x []string) bool { } return false } + +// FindDeclAndField returns the var/func/type/const Decl that declares +// the identifier at pos, searching the given list of file syntax +// trees. If pos is the position of an ast.Field or one of its Names +// or Ellipsis.Elt, the field is returned, along with the innermost +// enclosing Decl, which could be only loosely related---consider: +// +// var decl = f( func(field int) {} ) +// +// It returns (nil, nil) if no Field or Decl is found at pos. +func FindDeclAndField(files []*ast.File, pos token.Pos) (decl ast.Decl, field *ast.Field) { + // panic(nil) breaks off the traversal and + // causes the function to return normally. + defer func() { + if x := recover(); x != nil { + panic(x) + } + }() + + // Visit the files in search of the node at pos. + var stack []ast.Node + for _, file := range files { + ast.Inspect(file, func(n ast.Node) bool { + if n != nil { + stack = append(stack, n) // push + } else { + stack = stack[:len(stack)-1] // pop + return false + } + + // Skip subtrees (incl. files) that don't contain the search point. + if !(n.Pos() <= pos && pos < n.End()) { + return false + } + + switch n := n.(type) { + case *ast.Field: + checkField := func(f ast.Node) { + if f.Pos() == pos { + field = n + for i := len(stack) - 1; i >= 0; i-- { + if d, ok := stack[i].(ast.Decl); ok { + decl = d // innermost enclosing decl + break + } + } + panic(nil) // found + } + } + + // Check *ast.Field itself. This handles embedded + // fields which have no associated *ast.Ident name. + checkField(n) + + // Check each field name since you can have + // multiple names for the same type expression. + for _, name := range n.Names { + checkField(name) + } + + // Also check "X" in "...X". This makes it easy + // to format variadic signature params properly. + if ell, ok := n.Type.(*ast.Ellipsis); ok && ell.Elt != nil { + checkField(ell.Elt) + } + + case *ast.FuncDecl: + if n.Name.Pos() == pos { + decl = n + panic(nil) // found + } + + case *ast.GenDecl: + for _, spec := range n.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: + if spec.Name.Pos() == pos { + decl = n + panic(nil) // found + } + case *ast.ValueSpec: + for _, id := range spec.Names { + if id.Pos() == pos { + decl = n + panic(nil) // found + } + } + } + } + } + return true + }) + } + + return nil, nil +} diff --git a/internal/lsp/source/identifier.go b/internal/lsp/source/identifier.go index c87725c4854..5378ae840ed 100644 --- a/internal/lsp/source/identifier.go +++ b/internal/lsp/source/identifier.go @@ -292,9 +292,8 @@ func findIdentifier(ctx context.Context, snapshot Snapshot, pkg Package, pgf *Pa if err != nil { return nil, err } - if result.Declaration.node, err = snapshot.PosToDecl(ctx, declPkg, result.Declaration.obj.Pos()); err != nil { - return nil, err - } + result.Declaration.node, _ = FindDeclAndField(declPkg.GetSyntax(), result.Declaration.obj.Pos()) // may be nil + // Ensure that we have the full declaration, in case the declaration was // parsed in ParseExported and therefore could be missing information. if result.Declaration.fullDecl, err = fullNode(snapshot, result.Declaration.obj, declPkg); err != nil { diff --git a/internal/lsp/source/signature_help.go b/internal/lsp/source/signature_help.go index 12e359008fe..5b087e83769 100644 --- a/internal/lsp/source/signature_help.go +++ b/internal/lsp/source/signature_help.go @@ -98,10 +98,7 @@ FindCall: if err != nil { return nil, 0, err } - node, err := snapshot.PosToDecl(ctx, declPkg, obj.Pos()) - if err != nil { - return nil, 0, err - } + node, _ := FindDeclAndField(declPkg.GetSyntax(), obj.Pos()) // may be nil d, err := FindHoverContext(ctx, snapshot, pkg, obj, node, nil) if err != nil { return nil, 0, err diff --git a/internal/lsp/source/types_format.go b/internal/lsp/source/types_format.go index 5e10a509005..756d02de22a 100644 --- a/internal/lsp/source/types_format.go +++ b/internal/lsp/source/types_format.go @@ -259,8 +259,8 @@ func FormatVarType(ctx context.Context, snapshot Snapshot, srcpkg Package, obj * return types.TypeString(obj.Type(), qf) } - field, err := snapshot.PosToField(ctx, pkg, obj.Pos()) - if err != nil || field == nil { + _, field := FindDeclAndField(pkg.GetSyntax(), obj.Pos()) + if field == nil { return types.TypeString(obj.Type(), qf) } expr := field.Type diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go index 004d830dd07..c067fe5a4f6 100644 --- a/internal/lsp/source/view.go +++ b/internal/lsp/source/view.go @@ -79,21 +79,6 @@ type Snapshot interface { // If the file is not available, returns nil and an error. ParseGo(ctx context.Context, fh FileHandle, mode ParseMode) (*ParsedGoFile, error) - // PosToField is a cache of *ast.Fields by token.Pos. This allows us - // to quickly find corresponding *ast.Field node given a *types.Var. - // We must refer to the AST to render type aliases properly when - // formatting signatures and other types. - // May return (nil, nil) if the file didn't declare an object at that position. - // TODO(adonovan): seems like a bug? - PosToField(ctx context.Context, pkg Package, pos token.Pos) (*ast.Field, error) - - // PosToDecl maps certain objects' positions to their surrounding - // ast.Decl. This mapping is used when building the documentation - // string for the objects. - // May return (nil, nil) if the file didn't declare an object at that position. - // TODO(adonovan): seems like a bug? - PosToDecl(ctx context.Context, pkg Package, pos token.Pos) (ast.Decl, error) - // DiagnosePackage returns basic diagnostics, including list, parse, and type errors // for pkg, grouped by file. DiagnosePackage(ctx context.Context, pkg Package) (map[span.URI][]*Diagnostic, error) From dcb576d3b6e02adc9dadcbaafedf16b67034edda Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Tue, 12 Jul 2022 16:48:37 -0400 Subject: [PATCH 100/136] internal/lsp/cache: simplify modtidy Previously, the modtidy operation used a persistent map of handles in the central store that cached the result of a parsing the go.mod file after running 'go mod tidy'. The key was complex, including the session, view, imports of all dependencies, and the names of all unsaved overlays. The fine-grained key prevented spurious cache hits for invalid inputs by (we suspect) preventing nearly all cache hits. The existing snapshot invalidation mechanism should be sufficient to solve this problem, as the map entry is evicted whenever the metadata or overlays change. So, this change avoids keeping handles in the central store, so they are never shared across views. Also, modtidy exploited the fact that a packageHandle used to include a copy of all the Go source files of each package, to avoid having to read the files itself. As a result it would entail lots of unnecessary work building package handles and reading dependencies when it has no business even thinking about type checking. This change: - extracts the logic to read Metadata.{GoFiles,CompiledGo}Files so that it can be shared by modtidy and buildPackageHandle. - packageHandle.imports has moved into mod_tidy. One call (to compute the hash key) has gone away, as have various other hashing operations. - removes the packagesMap typed persistent.Map wrapper. - analysis: check cache before calling buildPackageHandle. - decouple Handle from Store so that unstored handles may be used. - adds various TODO comments for further simplification. Change-Id: Ibdc086ca76d6483b094ef48aac5b1dd0cdd04973 Reviewed-on: https://go-review.googlesource.com/c/tools/+/417116 TryBot-Result: Gopher Robot Reviewed-by: Robert Findley Run-TryBot: Alan Donovan gopls-CI: kokoro Auto-Submit: Alan Donovan --- internal/lsp/cache/analysis.go | 22 ++++-- internal/lsp/cache/check.go | 90 ++++++++++++------------- internal/lsp/cache/maps.go | 46 +------------ internal/lsp/cache/mod.go | 1 + internal/lsp/cache/mod_tidy.go | 119 ++++++++++++++++----------------- internal/lsp/cache/session.go | 2 +- internal/lsp/cache/snapshot.go | 49 ++++++-------- internal/lsp/cache/view.go | 2 +- internal/memoize/memoize.go | 29 ++++++-- 9 files changed, 160 insertions(+), 200 deletions(-) diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go index e196d1c4a35..a5e34b406da 100644 --- a/internal/lsp/cache/analysis.go +++ b/internal/lsp/cache/analysis.go @@ -23,6 +23,11 @@ import ( ) func (s *snapshot) Analyze(ctx context.Context, id string, analyzers []*source.Analyzer) ([]*source.Diagnostic, error) { + // TODO(adonovan): merge these two loops. There's no need to + // construct all the root action handles before beginning + // analysis. Operations should be concurrent (though that first + // requires buildPackageHandle not to be inefficient when + // called in parallel.) var roots []*actionHandle for _, a := range analyzers { if !a.IsEnabled(s.view) { @@ -95,15 +100,18 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A // use of concurrency would lead to an exponential amount of duplicated // work. We should instead use an atomically updated future cache // and a parallel graph traversal. - ph, err := s.buildPackageHandle(ctx, id, source.ParseFull) - if err != nil { - return nil, err - } - if act := s.getActionHandle(id, ph.mode, a); act != nil { + + // TODO(adonovan): in the code below, follow the structure of + // the other handle-map accessors. + + const mode = source.ParseFull + if act := s.getActionHandle(id, mode, a); act != nil { return act, nil } - if len(ph.key) == 0 { - return nil, fmt.Errorf("actionHandle: no key for package %s", id) + + ph, err := s.buildPackageHandle(ctx, id, mode) + if err != nil { + return nil, err } pkg, err := ph.check(ctx, s) if err != nil { diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index a2599f930c2..a830cc7f59d 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -14,8 +14,6 @@ import ( "path" "path/filepath" "regexp" - "sort" - "strconv" "strings" "sync" @@ -34,23 +32,32 @@ import ( "golang.org/x/tools/internal/typesinternal" ) +// A packageKey identifies a packageHandle in the snapshot.packages map. +type packageKey struct { + mode source.ParseMode + id PackageID +} + type packageHandleKey source.Hash +// A packageHandle is a handle to the future result of type-checking a package. +// The resulting package is obtained from the check() method. type packageHandle struct { handle *memoize.Handle - // goFiles and compiledGoFiles are the lists of files in the package. - // The latter is the list of files seen by the type checker (in which - // those that import "C" have been replaced by generated code). - goFiles, compiledGoFiles []source.FileHandle - - // mode is the mode the files were parsed in. + // mode is the mode the files will be parsed in. mode source.ParseMode // m is the metadata associated with the package. m *KnownMetadata // key is the hashed key for the package. + // + // It includes the all bits of the transitive closure of + // dependencies's sources. This is more than type checking + // really depends on: export data of direct deps should be + // enough. (The key for analysis actions could similarly + // hash only Facts of direct dependencies.) key packageHandleKey } @@ -61,26 +68,6 @@ func (ph *packageHandle) packageKey() packageKey { } } -func (ph *packageHandle) imports(ctx context.Context, s source.Snapshot) (result []string) { - for _, goFile := range ph.goFiles { - f, err := s.ParseGo(ctx, goFile, source.ParseHeader) - if err != nil { - continue - } - seen := map[string]struct{}{} - for _, impSpec := range f.File.Imports { - imp, _ := strconv.Unquote(impSpec.Path.Value) - if _, ok := seen[imp]; !ok { - seen[imp] = struct{}{} - result = append(result, imp) - } - } - } - - sort.Strings(result) - return result -} - // packageData contains the data produced by type-checking a package. type packageData struct { pkg *pkg @@ -145,21 +132,8 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so } // Read both lists of files of this package, in parallel. - var group errgroup.Group - getFileHandles := func(files []span.URI) []source.FileHandle { - fhs := make([]source.FileHandle, len(files)) - for i, uri := range files { - i, uri := i, uri - group.Go(func() (err error) { - fhs[i], err = s.GetFile(ctx, uri) // ~25us - return - }) - } - return fhs - } - goFiles := getFileHandles(m.GoFiles) - compiledGoFiles := getFileHandles(m.CompiledGoFiles) - if err := group.Wait(); err != nil { + goFiles, compiledGoFiles, err := readGoFiles(ctx, s, m.Metadata) + if err != nil { return nil, err } @@ -197,12 +171,10 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so }) ph := &packageHandle{ - handle: handle, - goFiles: goFiles, - compiledGoFiles: compiledGoFiles, - mode: mode, - m: m, - key: key, + handle: handle, + mode: mode, + m: m, + key: key, } // Cache the handle in the snapshot. If a package handle has already @@ -212,6 +184,26 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so return s.addPackageHandle(ph, release) } +// readGoFiles reads the content of Metadata.GoFiles and +// Metadata.CompiledGoFiles, in parallel. +func readGoFiles(ctx context.Context, s *snapshot, m *Metadata) (goFiles, compiledGoFiles []source.FileHandle, err error) { + var group errgroup.Group + getFileHandles := func(files []span.URI) []source.FileHandle { + fhs := make([]source.FileHandle, len(files)) + for i, uri := range files { + i, uri := i, uri + group.Go(func() (err error) { + fhs[i], err = s.GetFile(ctx, uri) // ~25us + return + }) + } + return fhs + } + return getFileHandles(m.GoFiles), + getFileHandles(m.CompiledGoFiles), + group.Wait() +} + func (s *snapshot) workspaceParseMode(id PackageID) source.ParseMode { s.mu.Lock() defer s.mu.Unlock() diff --git a/internal/lsp/cache/maps.go b/internal/lsp/cache/maps.go index 4bb3b3b2689..eef918843e8 100644 --- a/internal/lsp/cache/maps.go +++ b/internal/lsp/cache/maps.go @@ -149,16 +149,8 @@ func (m parseKeysByURIMap) Delete(key span.URI) { m.impl.Delete(key) } -type packagesMap struct { - impl *persistent.Map -} - -func newPackagesMap() packagesMap { - return packagesMap{ - impl: persistent.NewMap(func(a, b interface{}) bool { - return packageKeyLess(a.(packageKey), b.(packageKey)) - }), - } +func packageKeyLessInterface(x, y interface{}) bool { + return packageKeyLess(x.(packageKey), y.(packageKey)) } func packageKeyLess(x, y packageKey) bool { @@ -168,40 +160,6 @@ func packageKeyLess(x, y packageKey) bool { return x.id < y.id } -func (m packagesMap) Clone() packagesMap { - return packagesMap{ - impl: m.impl.Clone(), - } -} - -func (m packagesMap) Destroy() { - m.impl.Destroy() -} - -func (m packagesMap) Get(key packageKey) (*packageHandle, bool) { - value, ok := m.impl.Get(key) - if !ok { - return nil, false - } - return value.(*packageHandle), true -} - -func (m packagesMap) Range(do func(key packageKey, value *packageHandle)) { - m.impl.Range(func(key, value interface{}) { - do(key.(packageKey), value.(*packageHandle)) - }) -} - -func (m packagesMap) Set(key packageKey, value *packageHandle, release func()) { - m.impl.Set(key, value, func(key, value interface{}) { - release() - }) -} - -func (m packagesMap) Delete(key packageKey) { - m.impl.Delete(key) -} - type knownDirsSet struct { impl *persistent.Map } diff --git a/internal/lsp/cache/mod.go b/internal/lsp/cache/mod.go index 79b3fd016d6..8e6017648cc 100644 --- a/internal/lsp/cache/mod.go +++ b/internal/lsp/cache/mod.go @@ -225,6 +225,7 @@ func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string if !hit { // TODO(adonovan): use a simpler cache of promises that // is shared across snapshots. See comment at modTidyKey. + // We can then delete hashEnv too. type modWhyKey struct { // TODO(rfindley): is sessionID used to identify overlays because modWhy // looks at overlay state? In that case, I am not sure that this key diff --git a/internal/lsp/cache/mod_tidy.go b/internal/lsp/cache/mod_tidy.go index b59b4fd8832..a04bacf8ee2 100644 --- a/internal/lsp/cache/mod_tidy.go +++ b/internal/lsp/cache/mod_tidy.go @@ -12,7 +12,6 @@ import ( "io/ioutil" "os" "path/filepath" - "sort" "strconv" "strings" @@ -28,7 +27,8 @@ import ( "golang.org/x/tools/internal/span" ) -// modTidyImpl runs "go mod tidy" on a go.mod file, using a cache. +// ModTidy returns the go.mod file that would be obtained by running +// "go mod tidy". Concurrent requests are combined into a single command. func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*source.TidiedModule, error) { uri := pm.URI if pm.File == nil { @@ -46,63 +46,38 @@ func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*sourc // Cache miss? if !hit { + // If the file handle is an overlay, it may not be written to disk. + // The go.mod file has to be on disk for `go mod tidy` to work. + // TODO(rfindley): is this still true with Go 1.16 overlay support? fh, err := s.GetFile(ctx, pm.URI) if err != nil { return nil, err } - // If the file handle is an overlay, it may not be written to disk. - // The go.mod file has to be on disk for `go mod tidy` to work. - // TODO(rfindley): is this still true with Go 1.16 overlay support? if _, ok := fh.(*overlay); ok { - if info, _ := os.Stat(fh.URI().Filename()); info == nil { + if info, _ := os.Stat(uri.Filename()); info == nil { return nil, source.ErrNoModOnDisk } } + if criticalErr := s.GetCriticalError(ctx); criticalErr != nil { return &source.TidiedModule{ Diagnostics: criticalErr.DiagList, }, nil } - workspacePkgs, err := s.workspacePackageHandles(ctx) - if err != nil { + + if err := s.awaitLoaded(ctx); err != nil { return nil, err } - s.mu.Lock() - overlayHash := hashUnsavedOverlays(s.files) - s.mu.Unlock() + handle := memoize.NewHandle("modTidy", func(ctx context.Context, arg interface{}) interface{} { - // There's little reason at to use the shared cache for mod - // tidy (and mod why) as their key includes the view and session. - // Its only real value is to de-dup requests in flight, for - // which a singleflight in the View would suffice. - // TODO(adonovan): use a simpler cache of promises that - // is shared across snapshots. - type modTidyKey struct { - // TODO(rfindley): this key is also suspicious (see modWhyKey). - sessionID string - env source.Hash - gomod source.FileIdentity - imports source.Hash - unsavedOverlays source.Hash - view string - } - key := modTidyKey{ - sessionID: s.view.session.id, - view: s.view.folder.Filename(), - imports: s.hashImports(ctx, workspacePkgs), - unsavedOverlays: overlayHash, - gomod: fh.FileIdentity(), - env: hashEnv(s), - } - handle, release := s.store.Handle(key, func(ctx context.Context, arg interface{}) interface{} { - tidied, err := modTidyImpl(ctx, arg.(*snapshot), fh, pm, workspacePkgs) + tidied, err := modTidyImpl(ctx, arg.(*snapshot), uri.Filename(), pm) return modTidyResult{tidied, err} }) entry = handle s.mu.Lock() - s.modTidyHandles.Set(uri, entry, func(_, _ interface{}) { release() }) + s.modTidyHandles.Set(uri, entry, nil) s.mu.Unlock() } @@ -116,15 +91,16 @@ func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*sourc } // modTidyImpl runs "go mod tidy" on a go.mod file. -func modTidyImpl(ctx context.Context, snapshot *snapshot, fh source.FileHandle, pm *source.ParsedModule, workspacePkgs []*packageHandle) (*source.TidiedModule, error) { - ctx, done := event.Start(ctx, "cache.ModTidy", tag.URI.Of(fh.URI())) +func modTidyImpl(ctx context.Context, snapshot *snapshot, filename string, pm *source.ParsedModule) (*source.TidiedModule, error) { + ctx, done := event.Start(ctx, "cache.ModTidy", tag.URI.Of(filename)) defer done() inv := &gocommand.Invocation{ Verb: "mod", Args: []string{"tidy"}, - WorkingDir: filepath.Dir(fh.URI().Filename()), + WorkingDir: filepath.Dir(filename), } + // TODO(adonovan): ensure that unsaved overlays are passed through to 'go'. tmpURI, inv, cleanup, err := snapshot.goCommandInvocation(ctx, source.WriteTemporaryModFile, inv) if err != nil { return nil, err @@ -151,7 +127,7 @@ func modTidyImpl(ctx context.Context, snapshot *snapshot, fh source.FileHandle, // Compare the original and tidied go.mod files to compute errors and // suggested fixes. - diagnostics, err := modTidyDiagnostics(ctx, snapshot, pm, ideal, workspacePkgs) + diagnostics, err := modTidyDiagnostics(ctx, snapshot, pm, ideal) if err != nil { return nil, err } @@ -162,25 +138,10 @@ func modTidyImpl(ctx context.Context, snapshot *snapshot, fh source.FileHandle, }, nil } -func (s *snapshot) hashImports(ctx context.Context, wsPackages []*packageHandle) source.Hash { - seen := map[string]struct{}{} - var imports []string - for _, ph := range wsPackages { - for _, imp := range ph.imports(ctx, s) { - if _, ok := seen[imp]; !ok { - imports = append(imports, imp) - seen[imp] = struct{}{} - } - } - } - sort.Strings(imports) - return source.Hashf("%s", imports) -} - // modTidyDiagnostics computes the differences between the original and tidied // go.mod files to produce diagnostic and suggested fixes. Some diagnostics // may appear on the Go files that import packages from missing modules. -func modTidyDiagnostics(ctx context.Context, snapshot source.Snapshot, pm *source.ParsedModule, ideal *modfile.File, workspacePkgs []*packageHandle) (diagnostics []*source.Diagnostic, err error) { +func modTidyDiagnostics(ctx context.Context, snapshot *snapshot, pm *source.ParsedModule, ideal *modfile.File) (diagnostics []*source.Diagnostic, err error) { // First, determine which modules are unused and which are missing from the // original go.mod file. var ( @@ -229,15 +190,25 @@ func modTidyDiagnostics(ctx context.Context, snapshot source.Snapshot, pm *sourc } // Add diagnostics for missing modules anywhere they are imported in the // workspace. - for _, ph := range workspacePkgs { + // TODO(adonovan): opt: opportunities for parallelism abound. + for _, id := range snapshot.workspacePackageIDs() { + m := snapshot.getMetadata(id) + if m == nil { + return nil, fmt.Errorf("no metadata for %s", id) + } + + // Read both lists of files of this package, in parallel. + goFiles, compiledGoFiles, err := readGoFiles(ctx, snapshot, m.Metadata) + if err != nil { + return nil, err + } + missingImports := map[string]*modfile.Require{} // If -mod=readonly is not set we may have successfully imported // packages from missing modules. Otherwise they'll be in // MissingDependencies. Combine both. - importedPkgs := ph.imports(ctx, snapshot) - - for _, imp := range importedPkgs { + for imp := range parseImports(ctx, snapshot, goFiles) { if req, ok := missing[imp]; ok { missingImports[imp] = req break @@ -266,7 +237,7 @@ func modTidyDiagnostics(ctx context.Context, snapshot source.Snapshot, pm *sourc if len(missingImports) == 0 { continue } - for _, goFile := range ph.compiledGoFiles { + for _, goFile := range compiledGoFiles { pgf, err := snapshot.ParseGo(ctx, goFile, source.ParseHeader) if err != nil { continue @@ -507,3 +478,27 @@ func spanFromPositions(m *protocol.ColumnMapper, s, e modfile.Position) (span.Sp } return span.New(m.URI, start, end), nil } + +// parseImports parses the headers of the specified files and returns +// the set of strings that appear in import declarations within +// GoFiles. Errors are ignored. +// +// (We can't simply use ph.m.Metadata.Deps because it contains +// PackageIDs--not import paths--and is based on CompiledGoFiles, +// after cgo processing.) +func parseImports(ctx context.Context, s *snapshot, files []source.FileHandle) map[string]bool { + s.mu.Lock() // peekOrParse requires a locked snapshot (!) + defer s.mu.Unlock() + seen := make(map[string]bool) + for _, file := range files { + f, err := peekOrParse(ctx, s, file, source.ParseHeader) + if err != nil { + continue + } + for _, spec := range f.File.Imports { + path, _ := strconv.Unquote(spec.Path.Value) + seen[path] = true + } + } + return seen +} diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 9c1505850c2..2374a528757 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -232,7 +232,7 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, cancel: cancel, initializeOnce: &sync.Once{}, store: &s.cache.store, - packages: newPackagesMap(), + packages: persistent.NewMap(packageKeyLessInterface), meta: &metadataGraph{}, files: newFilesMap(), isActivePackageCache: newIsActivePackageCacheMap(), diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index a9dd1dfb935..1ba945cf0ed 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -85,7 +85,7 @@ type snapshot struct { files filesMap // parsedGoFiles maps a parseKey to the handle of the future result of parsing it. - parsedGoFiles *persistent.Map // from parseKey to *memoize.Handle + parsedGoFiles *persistent.Map // from parseKey to *memoize.Handle[parseGoResult] // parseKeysByURI records the set of keys of parsedGoFiles that // need to be invalidated for each URI. @@ -95,7 +95,7 @@ type snapshot struct { // symbolizeHandles maps each file URI to a handle for the future // result of computing the symbols declared in that file. - symbolizeHandles *persistent.Map // from span.URI to *memoize.Handle + symbolizeHandles *persistent.Map // from span.URI to *memoize.Handle[symbolizeResult] // packages maps a packageKey to a *packageHandle. // It may be invalidated when a file's content changes. @@ -104,7 +104,7 @@ type snapshot struct { // - packages.Get(id).m.Metadata == meta.metadata[id].Metadata for all ids // - if a package is in packages, then all of its dependencies should also // be in packages, unless there is a missing import - packages packagesMap + packages *persistent.Map // from packageKey to *memoize.Handle[*packageHandle] // isActivePackageCache maps package ID to the cached value if it is active or not. // It may be invalidated when metadata changes or a new file is opened or closed. @@ -123,17 +123,17 @@ type snapshot struct { // parseModHandles keeps track of any parseModHandles for the snapshot. // The handles need not refer to only the view's go.mod file. - parseModHandles *persistent.Map // from span.URI to *memoize.Handle + parseModHandles *persistent.Map // from span.URI to *memoize.Handle[parseModResult] // parseWorkHandles keeps track of any parseWorkHandles for the snapshot. // The handles need not refer to only the view's go.work file. - parseWorkHandles *persistent.Map // from span.URI to *memoize.Handle + parseWorkHandles *persistent.Map // from span.URI to *memoize.Handle[parseWorkResult] // Preserve go.mod-related handles to avoid garbage-collecting the results // of various calls to the go command. The handles need not refer to only // the view's go.mod file. - modTidyHandles *persistent.Map // from span.URI to *memoize.Handle - modWhyHandles *persistent.Map // from span.URI to *memoize.Handle + modTidyHandles *persistent.Map // from span.URI to *memoize.Handle[modTidyResult] + modWhyHandles *persistent.Map // from span.URI to *memoize.Handle[modWhyResult] workspace *workspace // (not guarded by mu) @@ -175,11 +175,6 @@ func (s *snapshot) awaitHandle(ctx context.Context, h *memoize.Handle) (interfac return h.Get(ctx, s) } -type packageKey struct { - mode source.ParseMode - id PackageID -} - type actionKey struct { pkg packageKey analyzer *analysis.Analyzer @@ -603,17 +598,6 @@ func (s *snapshot) buildOverlay() map[string][]byte { return overlays } -func hashUnsavedOverlays(files filesMap) source.Hash { - var unsaved []string - files.Range(func(uri span.URI, fh source.VersionedFileHandle) { - if overlay, ok := fh.(*overlay); ok && !overlay.saved { - unsaved = append(unsaved, uri.Filename()) - } - }) - sort.Strings(unsaved) - return source.Hashf("%s", unsaved) -} - func (s *snapshot) PackagesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, includeTestVariants bool) ([]source.Package, error) { ctx = event.Label(ctx, tag.URI.Of(uri)) @@ -791,6 +775,8 @@ func (s *snapshot) getImportedBy(id PackageID) []PackageID { // for the given package key, if it exists. // // An error is returned if the metadata used to build ph is no longer relevant. +// +// TODO(adonovan): inline sole use in buildPackageHandle. func (s *snapshot) addPackageHandle(ph *packageHandle, release func()) (*packageHandle, error) { s.mu.Lock() defer s.mu.Unlock() @@ -801,14 +787,15 @@ func (s *snapshot) addPackageHandle(ph *packageHandle, release func()) (*package // If the package handle has already been cached, // return the cached handle instead of overriding it. - if result, ok := s.packages.Get(ph.packageKey()); ok { + if v, ok := s.packages.Get(ph.packageKey()); ok { + result := v.(*packageHandle) release() if result.m.Metadata != ph.m.Metadata { return nil, bug.Errorf("existing package handle does not match for %s", ph.m.ID) } return result, nil } - s.packages.Set(ph.packageKey(), ph, release) + s.packages.Set(ph.packageKey(), ph, func(_, _ interface{}) { release() }) return ph, nil } @@ -1179,8 +1166,8 @@ func (s *snapshot) CachedImportPaths(ctx context.Context) (map[string]source.Pac defer s.mu.Unlock() results := map[string]source.Package{} - s.packages.Range(func(key packageKey, ph *packageHandle) { - cachedPkg, err := ph.cached() + s.packages.Range(func(_, v interface{}) { + cachedPkg, err := v.(*packageHandle).cached() if err != nil { return } @@ -1215,6 +1202,7 @@ func moduleForURI(modFiles map[span.URI]struct{}, uri span.URI) span.URI { return match } +// TODO(adonovan): inline sole use in buildPackageHandle. func (s *snapshot) getPackage(id PackageID, mode source.ParseMode) *packageHandle { s.mu.Lock() defer s.mu.Unlock() @@ -1223,8 +1211,11 @@ func (s *snapshot) getPackage(id PackageID, mode source.ParseMode) *packageHandl id: id, mode: mode, } - ph, _ := s.packages.Get(key) - return ph + v, ok := s.packages.Get(key) + if !ok { + return nil + } + return v.(*packageHandle) } func (s *snapshot) getActionHandle(id PackageID, m source.ParseMode, a *analysis.Analyzer) *actionHandle { diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go index 0991797b8e4..15a2f90da57 100644 --- a/internal/lsp/cache/view.go +++ b/internal/lsp/cache/view.go @@ -76,7 +76,7 @@ type View struct { // snapshots have been destroyed via the destroy method, and snapshotWG may // be waited upon to let these destroy operations complete. snapshotMu sync.Mutex - snapshot *snapshot // latest snapshot + snapshot *snapshot // latest snapshot; nil after shutdown has been called releaseSnapshot func() // called when snapshot is no longer needed snapshotWG sync.WaitGroup // refcount for pending destroy operations diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index 6d62ebb0d96..8c921c7e169 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -22,10 +22,12 @@ import ( "golang.org/x/tools/internal/xcontext" ) +// TODO(adonovan): rename Handle to Promise, and present it before Store. + // Store binds keys to functions, returning handles that can be used to access -// the functions results. +// the function's result. type Store struct { - handlesMu sync.Mutex // lock ordering: Store.handlesMu before Handle.mu + handlesMu sync.Mutex handles map[interface{}]*Handle } @@ -71,8 +73,9 @@ const ( // A Handle represents the future result of a call to a function. type Handle struct { - key interface{} - mu sync.Mutex // lock ordering: Store.handlesMu before Handle.mu + debug string // for observability + + mu sync.Mutex // lock ordering: Store.handlesMu before Handle.mu // A Handle starts out IDLE, waiting for something to demand // its evaluation. It then transitions into RUNNING state. @@ -119,9 +122,9 @@ func (store *Store) Handle(key interface{}, function Function) (*Handle, func()) if !ok { // new handle h = &Handle{ - key: key, function: function, refcount: 1, + debug: reflect.TypeOf(key).String(), } if store.handles == nil { @@ -137,7 +140,7 @@ func (store *Store) Handle(key interface{}, function Function) (*Handle, func()) release := func() { if atomic.AddInt32(&h.refcount, -1) == 0 { store.handlesMu.Lock() - delete(store.handles, h.key) + delete(store.handles, key) store.handlesMu.Unlock() } } @@ -170,6 +173,18 @@ func (s *Store) DebugOnlyIterate(f func(k, v interface{})) { } } +// NewHandle returns a handle for the future result of calling the +// specified function. +// +// The debug string is used to classify handles in logs and metrics. +// It should be drawn from a small set. +func NewHandle(debug string, function Function) *Handle { + return &Handle{ + debug: debug, + function: function, + } +} + // Cached returns the value associated with a handle. // // It will never cause the value to be generated. @@ -220,7 +235,7 @@ func (h *Handle) run(ctx context.Context, arg interface{}) (interface{}, error) } go func() { - trace.WithRegion(childCtx, fmt.Sprintf("Handle.run %T", h.key), func() { + trace.WithRegion(childCtx, fmt.Sprintf("Handle.run %s", h.debug), func() { defer release() // Just in case the function does something expensive without checking // the context, double-check we're still alive. From b2eae762671e8ccd6d473aca4239e46ff3a0f108 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 13 Jul 2022 15:05:28 -0400 Subject: [PATCH 101/136] internal/lsp/cache: simplify modwhy cache As with mod tidy in CL 417116, we can rely on invalidation rather than cache keys to avoid reusing stale results, and there's no need to save these handles in the global store. Change-Id: I3763c01fa21c6114248c1d541e3c168fc6a128c9 Reviewed-on: https://go-review.googlesource.com/c/tools/+/417416 Reviewed-by: Robert Findley Auto-Submit: Alan Donovan gopls-CI: kokoro Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot --- internal/lsp/cache/check.go | 9 --------- internal/lsp/cache/mod.go | 23 ++--------------------- 2 files changed, 2 insertions(+), 30 deletions(-) diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index a830cc7f59d..366a7afe133 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -249,15 +249,6 @@ func computePackageKey(id PackageID, files []source.FileHandle, m *KnownMetadata return packageHandleKey(source.HashOf(b.Bytes())) } -// hashEnv returns a hash of the snapshot's configuration. -func hashEnv(s *snapshot) source.Hash { - s.view.optionsMu.Lock() - env := s.view.options.EnvSlice() - s.view.optionsMu.Unlock() - - return source.Hashf("%s", env) -} - // hashConfig returns the hash for the *packages.Config. func hashConfig(config *packages.Config) source.Hash { // TODO(adonovan): opt: don't materialize the bytes; hash them directly. diff --git a/internal/lsp/cache/mod.go b/internal/lsp/cache/mod.go index 8e6017648cc..f9d148b7379 100644 --- a/internal/lsp/cache/mod.go +++ b/internal/lsp/cache/mod.go @@ -223,33 +223,14 @@ func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string // cache miss? if !hit { - // TODO(adonovan): use a simpler cache of promises that - // is shared across snapshots. See comment at modTidyKey. - // We can then delete hashEnv too. - type modWhyKey struct { - // TODO(rfindley): is sessionID used to identify overlays because modWhy - // looks at overlay state? In that case, I am not sure that this key - // is actually correct. The key should probably just be URI, and - // invalidated in clone when any import changes. - sessionID string - env source.Hash - view string - mod source.FileIdentity - } - key := modWhyKey{ - sessionID: s.view.session.id, - env: hashEnv(s), - mod: fh.FileIdentity(), - view: s.view.rootURI.Filename(), - } - handle, release := s.store.Handle(key, func(ctx context.Context, arg interface{}) interface{} { + handle := memoize.NewHandle("modWhy", func(ctx context.Context, arg interface{}) interface{} { why, err := modWhyImpl(ctx, arg.(*snapshot), fh) return modWhyResult{why, err} }) entry = handle s.mu.Lock() - s.modWhyHandles.Set(uri, entry, func(_, _ interface{}) { release() }) + s.modWhyHandles.Set(uri, entry, nil) s.mu.Unlock() } From 85173cc4bdf8d8d786177e25534065c10b85c56c Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 13 Jul 2022 13:46:53 -0400 Subject: [PATCH 102/136] internal/lsp/cache: follow usual structure for packages, analysis maps All Get/Set operations on the maps now happen within a single function (buildPackageKey, actionHandle). No behavior change. Change-Id: I347dfda578c28657a28538e228ecfb6f0871b94b Reviewed-on: https://go-review.googlesource.com/c/tools/+/417415 Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Robert Findley Auto-Submit: Alan Donovan --- internal/lsp/cache/analysis.go | 45 ++++++++++++----- internal/lsp/cache/cache.go | 2 +- internal/lsp/cache/check.go | 78 ++++++++++++++++++----------- internal/lsp/cache/snapshot.go | 91 ---------------------------------- 4 files changed, 83 insertions(+), 133 deletions(-) diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go index a5e34b406da..b277324a0ae 100644 --- a/internal/lsp/cache/analysis.go +++ b/internal/lsp/cache/analysis.go @@ -58,6 +58,11 @@ func (s *snapshot) Analyze(ctx context.Context, id string, analyzers []*source.A return results, nil } +type actionKey struct { + pkg packageKey + analyzer *analysis.Analyzer +} + type actionHandleKey source.Hash // An action represents one unit of analysis work: the application of @@ -90,6 +95,20 @@ type packageFactKey struct { } func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.Analyzer) (*actionHandle, error) { + const mode = source.ParseFull + key := actionKey{ + pkg: packageKey{id: id, mode: mode}, + analyzer: a, + } + + s.mu.Lock() + entry, hit := s.actions.Get(key) + s.mu.Unlock() + + if hit { + return entry.(*actionHandle), nil + } + // TODO(adonovan): opt: this block of code sequentially loads a package // (and all its dependencies), then sequentially creates action handles // for the direct dependencies (whose packages have by then been loaded @@ -100,15 +119,6 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A // use of concurrency would lead to an exponential amount of duplicated // work. We should instead use an atomically updated future cache // and a parallel graph traversal. - - // TODO(adonovan): in the code below, follow the structure of - // the other handle-map accessors. - - const mode = source.ParseFull - if act := s.getActionHandle(id, mode, a); act != nil { - return act, nil - } - ph, err := s.buildPackageHandle(ctx, id, mode) if err != nil { return nil, err @@ -157,13 +167,24 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A return runAnalysis(ctx, snapshot, a, pkg, results) }) - act := &actionHandle{ + ah := &actionHandle{ analyzer: a, pkg: pkg, handle: handle, } - act = s.addActionHandle(act, release) - return act, nil + + s.mu.Lock() + defer s.mu.Unlock() + + // Check cache again in case another thread got there first. + if result, ok := s.actions.Get(key); ok { + release() + return result.(*actionHandle), nil + } + + s.actions.Set(key, ah, func(_, _ interface{}) { release() }) + + return ah, nil } func (act *actionHandle) analyze(ctx context.Context, snapshot *snapshot) ([]*source.Diagnostic, interface{}, error) { diff --git a/internal/lsp/cache/cache.go b/internal/lsp/cache/cache.go index 8ac8b851a25..a59c8908d5a 100644 --- a/internal/lsp/cache/cache.go +++ b/internal/lsp/cache/cache.go @@ -199,7 +199,7 @@ func (c *Cache) PackageStats(withNames bool) template.HTML { c.store.DebugOnlyIterate(func(k, v interface{}) { switch k.(type) { case packageHandleKey: - v := v.(*packageData) + v := v.(typeCheckResult) if v.pkg == nil { break } diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index 366a7afe133..e51f86e5d9e 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -22,6 +22,7 @@ import ( "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/packages" "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/lsp/bug" "golang.org/x/tools/internal/lsp/debug/tag" "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/lsp/source" @@ -43,10 +44,7 @@ type packageHandleKey source.Hash // A packageHandle is a handle to the future result of type-checking a package. // The resulting package is obtained from the check() method. type packageHandle struct { - handle *memoize.Handle - - // mode is the mode the files will be parsed in. - mode source.ParseMode + handle *memoize.Handle // [typeCheckResult] // m is the metadata associated with the package. m *KnownMetadata @@ -61,15 +59,9 @@ type packageHandle struct { key packageHandleKey } -func (ph *packageHandle) packageKey() packageKey { - return packageKey{ - id: ph.m.ID, - mode: ph.mode, - } -} - -// packageData contains the data produced by type-checking a package. -type packageData struct { +// typeCheckResult contains the result of a call to +// typeCheck, which type-checks a package. +type typeCheckResult struct { pkg *pkg err error } @@ -80,15 +72,21 @@ type packageData struct { // attempt to reload missing or invalid metadata. The caller must reload // metadata if needed. func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode source.ParseMode) (*packageHandle, error) { - if ph := s.getPackage(id, mode); ph != nil { - return ph, nil - } + packageKey := packageKey{id: id, mode: mode} + + s.mu.Lock() + entry, hit := s.packages.Get(packageKey) + m := s.meta.metadata[id] + s.mu.Unlock() - m := s.getMetadata(id) if m == nil { return nil, fmt.Errorf("no metadata for %s", id) } + if hit { + return entry.(*packageHandle), nil + } + // Begin computing the key by getting the depKeys for all dependencies. // This requires reading the transitive closure of dependencies' source files. // @@ -140,10 +138,10 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so // All the file reading has now been done. // Create a handle for the result of type checking. experimentalKey := s.View().Options().ExperimentalPackageCacheKey - key := computePackageKey(m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey) + phKey := computePackageKey(m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey) // TODO(adonovan): extract lambda into a standalone function to // avoid implicit lexical dependencies. - handle, release := s.store.Handle(key, func(ctx context.Context, arg interface{}) interface{} { + handle, release := s.store.Handle(phKey, func(ctx context.Context, arg interface{}) interface{} { snapshot := arg.(*snapshot) // Start type checking of direct dependencies, @@ -167,21 +165,43 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so defer wg.Wait() pkg, err := typeCheck(ctx, snapshot, goFiles, compiledGoFiles, m.Metadata, mode, deps) - return &packageData{pkg, err} + return typeCheckResult{pkg, err} }) ph := &packageHandle{ handle: handle, - mode: mode, m: m, - key: key, + key: phKey, } - // Cache the handle in the snapshot. If a package handle has already - // been cached, addPackage will return the cached value. This is fine, - // since the original package handle above will have no references and be - // garbage collected. - return s.addPackageHandle(ph, release) + s.mu.Lock() + defer s.mu.Unlock() + + // Check that the metadata has not changed + // (which should invalidate this handle). + // + // (In future, handles should form a graph with edges from a + // packageHandle to the handles for parsing its files and the + // handles for type-checking its immediate deps, at which + // point there will be no need to even access s.meta.) + if s.meta.metadata[ph.m.ID].Metadata != ph.m.Metadata { + return nil, fmt.Errorf("stale metadata for %s", ph.m.ID) + } + + // Check cache again in case another thread got there first. + if prev, ok := s.packages.Get(packageKey); ok { + prevPH := prev.(*packageHandle) + release() + if prevPH.m.Metadata != ph.m.Metadata { + return nil, bug.Errorf("existing package handle does not match for %s", ph.m.ID) + } + return prevPH, nil + } + + // Update the map. + s.packages.Set(packageKey, ph, func(_, _ interface{}) { release() }) + + return ph, nil } // readGoFiles reads the content of Metadata.GoFiles and @@ -273,7 +293,7 @@ func (ph *packageHandle) check(ctx context.Context, s *snapshot) (*pkg, error) { if err != nil { return nil, err } - data := v.(*packageData) + data := v.(typeCheckResult) return data.pkg, data.err } @@ -290,7 +310,7 @@ func (ph *packageHandle) cached() (*pkg, error) { if v == nil { return nil, fmt.Errorf("no cached type information for %s", ph.m.PkgPath) } - data := v.(*packageData) + data := v.(typeCheckResult) return data.pkg, data.err } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 1ba945cf0ed..9f26b1e59cc 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -30,7 +30,6 @@ import ( "golang.org/x/mod/module" "golang.org/x/mod/semver" "golang.org/x/sync/errgroup" - "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" @@ -175,11 +174,6 @@ func (s *snapshot) awaitHandle(ctx context.Context, h *memoize.Handle) (interfac return h.Get(ctx, s) } -type actionKey struct { - pkg packageKey - analyzer *analysis.Analyzer -} - // destroy waits for all leases on the snapshot to expire then releases // any resources (reference counts and files) associated with it. // Snapshots being destroyed can be awaited using v.destroyWG. @@ -771,34 +765,6 @@ func (s *snapshot) getImportedBy(id PackageID) []PackageID { return s.meta.importedBy[id] } -// addPackageHandle stores ph in the snapshot, or returns a pre-existing handle -// for the given package key, if it exists. -// -// An error is returned if the metadata used to build ph is no longer relevant. -// -// TODO(adonovan): inline sole use in buildPackageHandle. -func (s *snapshot) addPackageHandle(ph *packageHandle, release func()) (*packageHandle, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if s.meta.metadata[ph.m.ID].Metadata != ph.m.Metadata { - return nil, fmt.Errorf("stale metadata for %s", ph.m.ID) - } - - // If the package handle has already been cached, - // return the cached handle instead of overriding it. - if v, ok := s.packages.Get(ph.packageKey()); ok { - result := v.(*packageHandle) - release() - if result.m.Metadata != ph.m.Metadata { - return nil, bug.Errorf("existing package handle does not match for %s", ph.m.ID) - } - return result, nil - } - s.packages.Set(ph.packageKey(), ph, func(_, _ interface{}) { release() }) - return ph, nil -} - func (s *snapshot) workspacePackageIDs() (ids []PackageID) { s.mu.Lock() defer s.mu.Unlock() @@ -1202,63 +1168,6 @@ func moduleForURI(modFiles map[span.URI]struct{}, uri span.URI) span.URI { return match } -// TODO(adonovan): inline sole use in buildPackageHandle. -func (s *snapshot) getPackage(id PackageID, mode source.ParseMode) *packageHandle { - s.mu.Lock() - defer s.mu.Unlock() - - key := packageKey{ - id: id, - mode: mode, - } - v, ok := s.packages.Get(key) - if !ok { - return nil - } - return v.(*packageHandle) -} - -func (s *snapshot) getActionHandle(id PackageID, m source.ParseMode, a *analysis.Analyzer) *actionHandle { - key := actionKey{ - pkg: packageKey{ - id: id, - mode: m, - }, - analyzer: a, - } - - s.mu.Lock() - defer s.mu.Unlock() - - ah, ok := s.actions.Get(key) - if !ok { - return nil - } - return ah.(*actionHandle) -} - -func (s *snapshot) addActionHandle(ah *actionHandle, release func()) *actionHandle { - key := actionKey{ - analyzer: ah.analyzer, - pkg: packageKey{ - id: ah.pkg.m.ID, - mode: ah.pkg.mode, - }, - } - - s.mu.Lock() - defer s.mu.Unlock() - - // If another thread since cached a different handle, - // return it instead of overriding it. - if result, ok := s.actions.Get(key); ok { - release() - return result.(*actionHandle) - } - s.actions.Set(key, ah, func(_, _ interface{}) { release() }) - return ah -} - func (s *snapshot) getIDsForURI(uri span.URI) []PackageID { s.mu.Lock() defer s.mu.Unlock() From 9b6c01892a361929559f87c2ba1b87825744596c Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 1 Jul 2022 13:31:27 -0400 Subject: [PATCH 103/136] internal/lsp/cache: don't trim unexported struct fields The trimming optimization deletes parts of the syntax tree that don't affect the type checking of package-level declarations. It used to remove unexported struct fields, but this had observable consequences: it would affect the offset of later fields, and the size and aligment of structs, causing the 'fieldalignment' analyzer to report incorrect findings. Also, it required a complex workaround in the UI element for hovering over a type to account for the missing parts. This change restores unexported fields. The logic of recordFieldsUses has been inlined and specialized for each case (params+results, struct fields, interface methods) as they are more different than alike. BenchmarkMemStats on k8s shows +4% HeapAlloc: a lot, but a small part of the 32% saving of the trimming optimization as a whole. Also: - trimAST: delete func bodies without visiting them. - minor clarifications. Updates golang/go#51016 Change-Id: Ifae15564a8fb86af3ea186af351a2a92eb9deb22 Reviewed-on: https://go-review.googlesource.com/c/tools/+/415503 gopls-CI: kokoro Run-TryBot: Alan Donovan Auto-Submit: Alan Donovan TryBot-Result: Gopher Robot Reviewed-by: Robert Findley --- gopls/internal/regtest/bench/bench_test.go | 2 +- internal/lsp/cache/check.go | 28 ++++--- internal/lsp/cache/parse.go | 96 ++++++++++++---------- internal/lsp/cache/parse_test.go | 2 +- 4 files changed, 71 insertions(+), 57 deletions(-) diff --git a/gopls/internal/regtest/bench/bench_test.go b/gopls/internal/regtest/bench/bench_test.go index dfe41f65b1d..7f0da83fb37 100644 --- a/gopls/internal/regtest/bench/bench_test.go +++ b/gopls/internal/regtest/bench/bench_test.go @@ -201,7 +201,7 @@ func TestBenchmarkDidChange(t *testing.T) { // // Kubernetes example: // -// $ go test -run=TestPrintMemStats -didchange_dir=$HOME/w/kubernetes +// $ go test -v -run=TestPrintMemStats -didchange_dir=$HOME/w/kubernetes // TotalAlloc: 5766 MB // HeapAlloc: 1984 MB // diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index e51f86e5d9e..79a6ff3eeb6 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -590,20 +590,24 @@ func parseCompiledGoFiles(ctx context.Context, compiledGoFiles []source.FileHand // errors, as they may be completely nonsensical. pkg.hasFixedFiles = pkg.hasFixedFiles || pgf.Fixed } - if mode != source.ParseExported { - return nil - } - if astFilter != nil { - var files []*ast.File - for _, cgf := range pkg.compiledGoFiles { - files = append(files, cgf.File) - } - astFilter.Filter(files) - } else { - for _, cgf := range pkg.compiledGoFiles { - trimAST(cgf.File) + + // Optionally remove parts that don't affect the exported API. + if mode == source.ParseExported { + if astFilter != nil { + // aggressive pruning based on reachability + var files []*ast.File + for _, cgf := range pkg.compiledGoFiles { + files = append(files, cgf.File) + } + astFilter.Filter(files) + } else { + // simple trimming of function bodies + for _, cgf := range pkg.compiledGoFiles { + trimAST(cgf.File) + } } } + return nil } diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go index 11075195330..62aea2229b4 100644 --- a/internal/lsp/cache/parse.go +++ b/internal/lsp/cache/parse.go @@ -279,6 +279,8 @@ func (f *unexportedFilter) filterSpec(spec ast.Spec) bool { } switch typ := spec.Type.(type) { case *ast.StructType: + // In practice this no longer filters anything; + // see comment at StructType case in recordUses. f.filterFieldList(typ.Fields) case *ast.InterfaceType: f.filterFieldList(typ.Methods) @@ -334,9 +336,19 @@ func (f *unexportedFilter) recordUses(file *ast.File) { case *ast.TypeSpec: switch typ := spec.Type.(type) { case *ast.StructType: - f.recordFieldUses(false, typ.Fields) + // We used to trim unexported fields but this + // had observable consequences. For example, + // the 'fieldalignment' analyzer would compute + // incorrect diagnostics from the size and + // offsets, and the UI hover information for + // types was inaccurate. So now we keep them. + if typ.Fields != nil { + for _, field := range typ.Fields.List { + f.recordIdents(field.Type) + } + } case *ast.InterfaceType: - f.recordFieldUses(false, typ.Methods) + f.recordInterfaceMethodUses(typ.Methods) } } } @@ -385,37 +397,32 @@ func (f *unexportedFilter) recordIdents(x ast.Expr) { } // recordFuncType records the types mentioned by a function type. -func (f *unexportedFilter) recordFuncType(x *ast.FuncType) { - f.recordFieldUses(true, x.Params) - f.recordFieldUses(true, x.Results) -} - -// recordFieldUses records unexported identifiers used in fields, which may be -// struct members, interface members, or function parameter/results. -func (f *unexportedFilter) recordFieldUses(isParams bool, fields *ast.FieldList) { - if fields == nil { - return - } - for _, field := range fields.List { - if isParams { - // Parameter types of retained functions need to be retained. +func (f *unexportedFilter) recordFuncType(fn *ast.FuncType) { + // Parameter and result types of retained functions need to be retained. + if fn.Params != nil { + for _, field := range fn.Params.List { f.recordIdents(field.Type) - continue - } - if ft, ok := field.Type.(*ast.FuncType); ok { - // Function declarations in interfaces need all their types retained. - f.recordFuncType(ft) - continue } - if len(field.Names) == 0 { - // Embedded fields might contribute exported names. + } + if fn.Results != nil { + for _, field := range fn.Results.List { f.recordIdents(field.Type) } - for _, name := range field.Names { - // We only need normal fields if they're exported. - if ast.IsExported(name.Name) { - f.recordIdents(field.Type) - break + } +} + +// recordInterfaceMethodUses records unexported identifiers used in interface methods. +func (f *unexportedFilter) recordInterfaceMethodUses(methods *ast.FieldList) { + if methods != nil { + for _, method := range methods.List { + if len(method.Names) == 0 { + // I, pkg.I, I[T] -- embedded interface: + // may contribute exported names. + f.recordIdents(method.Type) + } else if ft, ok := method.Type.(*ast.FuncType); ok { + // f(T) -- ordinary interface method: + // needs all its types retained. + f.recordFuncType(ft) } } } @@ -442,32 +449,35 @@ func (f *unexportedFilter) ProcessErrors(errors []types.Error) (map[string]bool, } // trimAST clears any part of the AST not relevant to type checking -// expressions at pos. +// the package-level declarations. func trimAST(file *ast.File) { - ast.Inspect(file, func(n ast.Node) bool { - if n == nil { - return false + // Eliminate bodies of top-level functions, methods, inits. + for _, decl := range file.Decls { + if fn, ok := decl.(*ast.FuncDecl); ok { + fn.Body = nil } + } + + // Simplify remaining declarations. + ast.Inspect(file, func(n ast.Node) bool { switch n := n.(type) { - case *ast.FuncDecl: - n.Body = nil - case *ast.BlockStmt: - n.List = nil - case *ast.CaseClause: - n.Body = nil - case *ast.CommClause: - n.Body = nil + case *ast.FuncLit: + // Eliminate bodies of literal functions. + // func() { ... } => func() {} + n.Body.List = nil case *ast.CompositeLit: // types.Info.Types for long slice/array literals are particularly - // expensive. Try to clear them out. + // expensive. Try to clear them out: T{e, ..., e} => T{} at, ok := n.Type.(*ast.ArrayType) if !ok { - // Composite literal. No harm removing all its fields. + // Map or struct literal: no harm removing all its fields. n.Elts = nil break } + // Removing the elements from an ellipsis array changes its type. // Try to set the length explicitly so we can continue. + // [...]T{e, ..., e} => [3]T[]{} if _, ok := at.Len.(*ast.Ellipsis); ok { length, ok := arrayLength(n) if !ok { diff --git a/internal/lsp/cache/parse_test.go b/internal/lsp/cache/parse_test.go index cb620f27432..e8db64530e6 100644 --- a/internal/lsp/cache/parse_test.go +++ b/internal/lsp/cache/parse_test.go @@ -149,7 +149,7 @@ type Exported struct { } var Var = Exported{foo:1} `, - kept: []string{"Exported", "Var"}, + kept: []string{"Exported", "Var", "x"}, }, { name: "drop_function_literals", From a7c53b59a64e59d60bbd08f4d0bd086aaa90b32e Mon Sep 17 00:00:00 2001 From: "Hana (Hyang-Ah) Kim" Date: Wed, 13 Jul 2022 17:10:18 -0400 Subject: [PATCH 104/136] internal/analysisinternal: move FindBestMatch to internal/lsp/fuzzy This is used by internal/lsp/analysis/fillreturns and internal/lsp/analysis/fillstruct. This doesn't need to be in this analysisinternal package. This removes the dependency path go/analysis/internal/checker -> internal/analysisinternal -> internal/lsp/fuzzy Change-Id: I5db674ca30eb06ae6ce7021397cf5530a695af4e Reviewed-on: https://go-review.googlesource.com/c/tools/+/417418 gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Robert Findley Run-TryBot: Hyang-Ah Hana Kim --- internal/analysisinternal/analysis.go | 29 ------------------- .../lsp/analysis/fillreturns/fillreturns.go | 3 +- .../lsp/analysis/fillstruct/fillstruct.go | 3 +- internal/lsp/fuzzy/matcher.go | 28 ++++++++++++++++++ 4 files changed, 32 insertions(+), 31 deletions(-) diff --git a/internal/analysisinternal/analysis.go b/internal/analysisinternal/analysis.go index 3f1e573342f..e32152ac223 100644 --- a/internal/analysisinternal/analysis.go +++ b/internal/analysisinternal/analysis.go @@ -12,8 +12,6 @@ import ( "go/token" "go/types" "strconv" - - "golang.org/x/tools/internal/lsp/fuzzy" ) // Flag to gate diagnostics for fuzz tests in 1.18. @@ -397,30 +395,3 @@ func equivalentTypes(want, got types.Type) bool { } return types.AssignableTo(want, got) } - -// FindBestMatch employs fuzzy matching to evaluate the similarity of each given identifier to the -// given pattern. We return the identifier whose name is most similar to the pattern. -func FindBestMatch(pattern string, idents []*ast.Ident) ast.Expr { - fuzz := fuzzy.NewMatcher(pattern) - var bestFuzz ast.Expr - highScore := float32(0) // minimum score is 0 (no match) - for _, ident := range idents { - // TODO: Improve scoring algorithm. - score := fuzz.Score(ident.Name) - if score > highScore { - highScore = score - bestFuzz = ident - } else if score == 0 { - // Order matters in the fuzzy matching algorithm. If we find no match - // when matching the target to the identifier, try matching the identifier - // to the target. - revFuzz := fuzzy.NewMatcher(ident.Name) - revScore := revFuzz.Score(pattern) - if revScore > highScore { - highScore = revScore - bestFuzz = ident - } - } - } - return bestFuzz -} diff --git a/internal/lsp/analysis/fillreturns/fillreturns.go b/internal/lsp/analysis/fillreturns/fillreturns.go index 72fe65d79ca..4a30934c63c 100644 --- a/internal/lsp/analysis/fillreturns/fillreturns.go +++ b/internal/lsp/analysis/fillreturns/fillreturns.go @@ -19,6 +19,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/lsp/fuzzy" "golang.org/x/tools/internal/typeparams" ) @@ -191,7 +192,7 @@ outer: // Find the identifier whose name is most similar to the return type. // If we do not find any identifier that matches the pattern, // generate a zero value. - value := analysisinternal.FindBestMatch(retTyp.String(), idents) + value := fuzzy.FindBestMatch(retTyp.String(), idents) if value == nil { value = analysisinternal.ZeroValue(file, pass.Pkg, retTyp) } diff --git a/internal/lsp/analysis/fillstruct/fillstruct.go b/internal/lsp/analysis/fillstruct/fillstruct.go index f160d4422ae..2103a55879e 100644 --- a/internal/lsp/analysis/fillstruct/fillstruct.go +++ b/internal/lsp/analysis/fillstruct/fillstruct.go @@ -21,6 +21,7 @@ import ( "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/lsp/fuzzy" "golang.org/x/tools/internal/span" "golang.org/x/tools/internal/typeparams" ) @@ -254,7 +255,7 @@ func SuggestedFix(fset *token.FileSet, rng span.Range, content []byte, file *ast // Find the identifier whose name is most similar to the name of the field's key. // If we do not find any identifier that matches the pattern, generate a new value. // NOTE: We currently match on the name of the field key rather than the field type. - value := analysisinternal.FindBestMatch(obj.Field(i).Name(), idents) + value := fuzzy.FindBestMatch(obj.Field(i).Name(), idents) if value == nil { value = populateValue(file, pkg, fieldTyp) } diff --git a/internal/lsp/fuzzy/matcher.go b/internal/lsp/fuzzy/matcher.go index 265cdcf1604..92e1001fd12 100644 --- a/internal/lsp/fuzzy/matcher.go +++ b/internal/lsp/fuzzy/matcher.go @@ -8,6 +8,7 @@ package fuzzy import ( "bytes" "fmt" + "go/ast" ) const ( @@ -405,3 +406,30 @@ func (m *Matcher) poorMatch() bool { } return false } + +// FindBestMatch employs fuzzy matching to evaluate the similarity of each given identifier to the +// given pattern. We return the identifier whose name is most similar to the pattern. +func FindBestMatch(pattern string, idents []*ast.Ident) ast.Expr { + fuzz := NewMatcher(pattern) + var bestFuzz ast.Expr + highScore := float32(0) // minimum score is 0 (no match) + for _, ident := range idents { + // TODO: Improve scoring algorithm. + score := fuzz.Score(ident.Name) + if score > highScore { + highScore = score + bestFuzz = ident + } else if score == 0 { + // Order matters in the fuzzy matching algorithm. If we find no match + // when matching the target to the identifier, try matching the identifier + // to the target. + revFuzz := NewMatcher(ident.Name) + revScore := revFuzz.Score(pattern) + if revScore > highScore { + highScore = revScore + bestFuzz = ident + } + } + } + return bestFuzz +} From db8f89b397771c885c6218de3f383d800d72e62a Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Wed, 13 Jul 2022 16:34:32 -0400 Subject: [PATCH 105/136] internal/memoize: rename Handle to Promise Also: - add test of NewHandle - update package doc and other doc comments - factor Store.Handle with NewHandle - declare Handle before Store Change-Id: I4bcea2c9debf1e77f973ef7ea9dbe2fd7a373996 Reviewed-on: https://go-review.googlesource.com/c/tools/+/417417 Auto-Submit: Alan Donovan gopls-CI: kokoro Reviewed-by: Robert Findley Run-TryBot: Alan Donovan TryBot-Result: Gopher Robot --- internal/lsp/cache/analysis.go | 10 +- internal/lsp/cache/check.go | 14 +- internal/lsp/cache/mod.go | 14 +- internal/lsp/cache/mod_tidy.go | 4 +- internal/lsp/cache/parse.go | 6 +- internal/lsp/cache/snapshot.go | 18 +- internal/lsp/cache/symbols.go | 6 +- internal/memoize/memoize.go | 300 ++++++++++++++++--------------- internal/memoize/memoize_test.go | 65 ++++--- 9 files changed, 231 insertions(+), 206 deletions(-) diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go index b277324a0ae..ee80bbcd529 100644 --- a/internal/lsp/cache/analysis.go +++ b/internal/lsp/cache/analysis.go @@ -70,7 +70,7 @@ type actionHandleKey source.Hash // package (as different analyzers are applied, either in sequence or // parallel), and across packages (as dependencies are analyzed). type actionHandle struct { - handle *memoize.Handle + promise *memoize.Promise analyzer *analysis.Analyzer pkg *pkg @@ -155,7 +155,7 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A } } - handle, release := s.store.Handle(buildActionKey(a, ph), func(ctx context.Context, arg interface{}) interface{} { + promise, release := s.store.Promise(buildActionKey(a, ph), func(ctx context.Context, arg interface{}) interface{} { snapshot := arg.(*snapshot) // Analyze dependencies first. results, err := execAll(ctx, snapshot, deps) @@ -170,7 +170,7 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A ah := &actionHandle{ analyzer: a, pkg: pkg, - handle: handle, + promise: promise, } s.mu.Lock() @@ -188,7 +188,7 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A } func (act *actionHandle) analyze(ctx context.Context, snapshot *snapshot) ([]*source.Diagnostic, interface{}, error) { - d, err := snapshot.awaitHandle(ctx, act.handle) + d, err := snapshot.awaitPromise(ctx, act.promise) if err != nil { return nil, nil, err } @@ -218,7 +218,7 @@ func execAll(ctx context.Context, snapshot *snapshot, actions []*actionHandle) ( for _, act := range actions { act := act g.Go(func() error { - v, err := snapshot.awaitHandle(ctx, act.handle) + v, err := snapshot.awaitPromise(ctx, act.promise) if err != nil { return err } diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index 79a6ff3eeb6..abc17245726 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -44,7 +44,7 @@ type packageHandleKey source.Hash // A packageHandle is a handle to the future result of type-checking a package. // The resulting package is obtained from the check() method. type packageHandle struct { - handle *memoize.Handle // [typeCheckResult] + promise *memoize.Promise // [typeCheckResult] // m is the metadata associated with the package. m *KnownMetadata @@ -141,7 +141,7 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so phKey := computePackageKey(m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey) // TODO(adonovan): extract lambda into a standalone function to // avoid implicit lexical dependencies. - handle, release := s.store.Handle(phKey, func(ctx context.Context, arg interface{}) interface{} { + promise, release := s.store.Promise(phKey, func(ctx context.Context, arg interface{}) interface{} { snapshot := arg.(*snapshot) // Start type checking of direct dependencies, @@ -169,9 +169,9 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so }) ph := &packageHandle{ - handle: handle, - m: m, - key: phKey, + promise: promise, + m: m, + key: phKey, } s.mu.Lock() @@ -289,7 +289,7 @@ func hashConfig(config *packages.Config) source.Hash { } func (ph *packageHandle) check(ctx context.Context, s *snapshot) (*pkg, error) { - v, err := s.awaitHandle(ctx, ph.handle) + v, err := s.awaitPromise(ctx, ph.promise) if err != nil { return nil, err } @@ -306,7 +306,7 @@ func (ph *packageHandle) ID() string { } func (ph *packageHandle) cached() (*pkg, error) { - v := ph.handle.Cached() + v := ph.promise.Cached() if v == nil { return nil, fmt.Errorf("no cached type information for %s", ph.m.PkgPath) } diff --git a/internal/lsp/cache/mod.go b/internal/lsp/cache/mod.go index f9d148b7379..57fa1e2d0aa 100644 --- a/internal/lsp/cache/mod.go +++ b/internal/lsp/cache/mod.go @@ -39,19 +39,19 @@ func (s *snapshot) ParseMod(ctx context.Context, fh source.FileHandle) (*source. // cache miss? if !hit { - handle, release := s.store.Handle(fh.FileIdentity(), func(ctx context.Context, _ interface{}) interface{} { + promise, release := s.store.Promise(fh.FileIdentity(), func(ctx context.Context, _ interface{}) interface{} { parsed, err := parseModImpl(ctx, fh) return parseModResult{parsed, err} }) - entry = handle + entry = promise s.mu.Lock() s.parseModHandles.Set(uri, entry, func(_, _ interface{}) { release() }) s.mu.Unlock() } // Await result. - v, err := s.awaitHandle(ctx, entry.(*memoize.Handle)) + v, err := s.awaitPromise(ctx, entry.(*memoize.Promise)) if err != nil { return nil, err } @@ -116,7 +116,7 @@ func (s *snapshot) ParseWork(ctx context.Context, fh source.FileHandle) (*source // cache miss? if !hit { - handle, release := s.store.Handle(fh.FileIdentity(), func(ctx context.Context, _ interface{}) interface{} { + handle, release := s.store.Promise(fh.FileIdentity(), func(ctx context.Context, _ interface{}) interface{} { parsed, err := parseWorkImpl(ctx, fh) return parseWorkResult{parsed, err} }) @@ -128,7 +128,7 @@ func (s *snapshot) ParseWork(ctx context.Context, fh source.FileHandle) (*source } // Await result. - v, err := s.awaitHandle(ctx, entry.(*memoize.Handle)) + v, err := s.awaitPromise(ctx, entry.(*memoize.Promise)) if err != nil { return nil, err } @@ -223,7 +223,7 @@ func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string // cache miss? if !hit { - handle := memoize.NewHandle("modWhy", func(ctx context.Context, arg interface{}) interface{} { + handle := memoize.NewPromise("modWhy", func(ctx context.Context, arg interface{}) interface{} { why, err := modWhyImpl(ctx, arg.(*snapshot), fh) return modWhyResult{why, err} }) @@ -235,7 +235,7 @@ func (s *snapshot) ModWhy(ctx context.Context, fh source.FileHandle) (map[string } // Await result. - v, err := s.awaitHandle(ctx, entry.(*memoize.Handle)) + v, err := s.awaitPromise(ctx, entry.(*memoize.Promise)) if err != nil { return nil, err } diff --git a/internal/lsp/cache/mod_tidy.go b/internal/lsp/cache/mod_tidy.go index a04bacf8ee2..361f526ddfb 100644 --- a/internal/lsp/cache/mod_tidy.go +++ b/internal/lsp/cache/mod_tidy.go @@ -69,7 +69,7 @@ func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*sourc return nil, err } - handle := memoize.NewHandle("modTidy", func(ctx context.Context, arg interface{}) interface{} { + handle := memoize.NewPromise("modTidy", func(ctx context.Context, arg interface{}) interface{} { tidied, err := modTidyImpl(ctx, arg.(*snapshot), uri.Filename(), pm) return modTidyResult{tidied, err} @@ -82,7 +82,7 @@ func (s *snapshot) ModTidy(ctx context.Context, pm *source.ParsedModule) (*sourc } // Await result. - v, err := s.awaitHandle(ctx, entry.(*memoize.Handle)) + v, err := s.awaitPromise(ctx, entry.(*memoize.Promise)) if err != nil { return nil, err } diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go index 62aea2229b4..77e893a6681 100644 --- a/internal/lsp/cache/parse.go +++ b/internal/lsp/cache/parse.go @@ -58,7 +58,7 @@ func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode sourc // cache miss? if !hit { - handle, release := s.store.Handle(key, func(ctx context.Context, arg interface{}) interface{} { + handle, release := s.store.Promise(key, func(ctx context.Context, arg interface{}) interface{} { parsed, err := parseGoImpl(ctx, arg.(*snapshot).FileSet(), fh, mode) return parseGoResult{parsed, err} }) @@ -76,7 +76,7 @@ func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode sourc } // Await result. - v, err := s.awaitHandle(ctx, entry.(*memoize.Handle)) + v, err := s.awaitPromise(ctx, entry.(*memoize.Promise)) if err != nil { return nil, err } @@ -92,7 +92,7 @@ func (s *snapshot) peekParseGoLocked(fh source.FileHandle, mode source.ParseMode if !hit { return nil, nil // no-one has requested this file } - v := entry.(*memoize.Handle).Cached() + v := entry.(*memoize.Promise).Cached() if v == nil { return nil, nil // parsing is still in progress } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 9f26b1e59cc..9e52cda5aff 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -84,7 +84,7 @@ type snapshot struct { files filesMap // parsedGoFiles maps a parseKey to the handle of the future result of parsing it. - parsedGoFiles *persistent.Map // from parseKey to *memoize.Handle[parseGoResult] + parsedGoFiles *persistent.Map // from parseKey to *memoize.Promise[parseGoResult] // parseKeysByURI records the set of keys of parsedGoFiles that // need to be invalidated for each URI. @@ -94,7 +94,7 @@ type snapshot struct { // symbolizeHandles maps each file URI to a handle for the future // result of computing the symbols declared in that file. - symbolizeHandles *persistent.Map // from span.URI to *memoize.Handle[symbolizeResult] + symbolizeHandles *persistent.Map // from span.URI to *memoize.Promise[symbolizeResult] // packages maps a packageKey to a *packageHandle. // It may be invalidated when a file's content changes. @@ -103,7 +103,7 @@ type snapshot struct { // - packages.Get(id).m.Metadata == meta.metadata[id].Metadata for all ids // - if a package is in packages, then all of its dependencies should also // be in packages, unless there is a missing import - packages *persistent.Map // from packageKey to *memoize.Handle[*packageHandle] + packages *persistent.Map // from packageKey to *memoize.Promise[*packageHandle] // isActivePackageCache maps package ID to the cached value if it is active or not. // It may be invalidated when metadata changes or a new file is opened or closed. @@ -122,17 +122,17 @@ type snapshot struct { // parseModHandles keeps track of any parseModHandles for the snapshot. // The handles need not refer to only the view's go.mod file. - parseModHandles *persistent.Map // from span.URI to *memoize.Handle[parseModResult] + parseModHandles *persistent.Map // from span.URI to *memoize.Promise[parseModResult] // parseWorkHandles keeps track of any parseWorkHandles for the snapshot. // The handles need not refer to only the view's go.work file. - parseWorkHandles *persistent.Map // from span.URI to *memoize.Handle[parseWorkResult] + parseWorkHandles *persistent.Map // from span.URI to *memoize.Promise[parseWorkResult] // Preserve go.mod-related handles to avoid garbage-collecting the results // of various calls to the go command. The handles need not refer to only // the view's go.mod file. - modTidyHandles *persistent.Map // from span.URI to *memoize.Handle[modTidyResult] - modWhyHandles *persistent.Map // from span.URI to *memoize.Handle[modWhyResult] + modTidyHandles *persistent.Map // from span.URI to *memoize.Promise[modTidyResult] + modWhyHandles *persistent.Map // from span.URI to *memoize.Promise[modWhyResult] workspace *workspace // (not guarded by mu) @@ -170,8 +170,8 @@ func (s *snapshot) Acquire() func() { return s.refcount.Done } -func (s *snapshot) awaitHandle(ctx context.Context, h *memoize.Handle) (interface{}, error) { - return h.Get(ctx, s) +func (s *snapshot) awaitPromise(ctx context.Context, p *memoize.Promise) (interface{}, error) { + return p.Get(ctx, s) } // destroy waits for all leases on the snapshot to expire then releases diff --git a/internal/lsp/cache/symbols.go b/internal/lsp/cache/symbols.go index b562d5bbdd8..e98f554969c 100644 --- a/internal/lsp/cache/symbols.go +++ b/internal/lsp/cache/symbols.go @@ -35,12 +35,12 @@ func (s *snapshot) symbolize(ctx context.Context, fh source.FileHandle) ([]sourc if !hit { type symbolHandleKey source.Hash key := symbolHandleKey(fh.FileIdentity().Hash) - handle, release := s.store.Handle(key, func(_ context.Context, arg interface{}) interface{} { + promise, release := s.store.Promise(key, func(_ context.Context, arg interface{}) interface{} { symbols, err := symbolizeImpl(arg.(*snapshot), fh) return symbolizeResult{symbols, err} }) - entry = handle + entry = promise s.mu.Lock() s.symbolizeHandles.Set(uri, entry, func(_, _ interface{}) { release() }) @@ -48,7 +48,7 @@ func (s *snapshot) symbolize(ctx context.Context, fh source.FileHandle) ([]sourc } // Await result. - v, err := s.awaitHandle(ctx, entry.(*memoize.Handle)) + v, err := s.awaitPromise(ctx, entry.(*memoize.Promise)) if err != nil { return nil, err } diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index 8c921c7e169..aa4d58d2f26 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -2,13 +2,20 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package memoize supports memoizing the return values of functions with -// idempotent results that are expensive to compute. +// Package memoize defines a "promise" abstraction that enables +// memoization of the result of calling an expensive but idempotent +// function. // -// To use this package, create a Store, call its Handle method to -// acquire a handle to (aka a "promise" of) the future result of a -// function, and call Handle.Get to obtain the result. Get may block -// if the function has not finished (or started). +// Call p = NewPromise(f) to obtain a promise for the future result of +// calling f(), and call p.Get() to obtain that result. All calls to +// p.Get return the result of a single call of f(). +// Get blocks if the function has not finished (or started). +// +// A Store is a map of arbitrary keys to promises. Use Store.Promise +// to create a promise in the store. All calls to Handle(k) return the +// same promise as long as it is in the store. These promises are +// reference-counted and must be explicitly released. Once the last +// reference is released, the promise is removed from the store. package memoize import ( @@ -22,22 +29,13 @@ import ( "golang.org/x/tools/internal/xcontext" ) -// TODO(adonovan): rename Handle to Promise, and present it before Store. - -// Store binds keys to functions, returning handles that can be used to access -// the function's result. -type Store struct { - handlesMu sync.Mutex - handles map[interface{}]*Handle -} - // Function is the type of a function that can be memoized. // // If the arg is a RefCounted, its Acquire/Release operations are called. // // The argument must not materially affect the result of the function -// in ways that are not captured by the handle's key, since if -// Handle.Get is called twice concurrently, with the same (implicit) +// in ways that are not captured by the promise's key, since if +// Promise.Get is called twice concurrently, with the same (implicit) // key but different arguments, the Function is called only once but // its result must be suitable for both callers. // @@ -63,21 +61,13 @@ type RefCounted interface { Acquire() func() } -type state int - -const ( - stateIdle = iota // newly constructed, or last waiter was cancelled - stateRunning // start was called and not cancelled - stateCompleted // function call ran to completion -) - -// A Handle represents the future result of a call to a function. -type Handle struct { +// A Promise represents the future result of a call to a function. +type Promise struct { debug string // for observability - mu sync.Mutex // lock ordering: Store.handlesMu before Handle.mu + mu sync.Mutex - // A Handle starts out IDLE, waiting for something to demand + // A Promise starts out IDLE, waiting for something to demand // its evaluation. It then transitions into RUNNING state. // // While RUNNING, waiters tracks the number of Get calls @@ -105,128 +95,78 @@ type Handle struct { refcount int32 // accessed using atomic load/store } -// Handle returns a reference-counted handle for the future result of -// calling the specified function. Calls to Handle with the same key -// return the same handle, and all calls to Handle.Get on a given -// handle return the same result but the function is called at most once. +// NewPromise returns a promise for the future result of calling the +// specified function. // -// The caller must call the returned function to decrement the -// handle's reference count when it is no longer needed. -func (store *Store) Handle(key interface{}, function Function) (*Handle, func()) { +// The debug string is used to classify promises in logs and metrics. +// It should be drawn from a small set. +func NewPromise(debug string, function Function) *Promise { if function == nil { panic("nil function") } - - store.handlesMu.Lock() - h, ok := store.handles[key] - if !ok { - // new handle - h = &Handle{ - function: function, - refcount: 1, - debug: reflect.TypeOf(key).String(), - } - - if store.handles == nil { - store.handles = map[interface{}]*Handle{} - } - store.handles[key] = h - } else { - // existing handle - atomic.AddInt32(&h.refcount, 1) - } - store.handlesMu.Unlock() - - release := func() { - if atomic.AddInt32(&h.refcount, -1) == 0 { - store.handlesMu.Lock() - delete(store.handles, key) - store.handlesMu.Unlock() - } - } - return h, release -} - -// Stats returns the number of each type of value in the store. -func (s *Store) Stats() map[reflect.Type]int { - result := map[reflect.Type]int{} - - s.handlesMu.Lock() - defer s.handlesMu.Unlock() - - for k := range s.handles { - result[reflect.TypeOf(k)]++ - } - return result -} - -// DebugOnlyIterate iterates through all live cache entries and calls f on them. -// It should only be used for debugging purposes. -func (s *Store) DebugOnlyIterate(f func(k, v interface{})) { - s.handlesMu.Lock() - defer s.handlesMu.Unlock() - - for k, h := range s.handles { - if v := h.Cached(); v != nil { - f(k, v) - } - } -} - -// NewHandle returns a handle for the future result of calling the -// specified function. -// -// The debug string is used to classify handles in logs and metrics. -// It should be drawn from a small set. -func NewHandle(debug string, function Function) *Handle { - return &Handle{ + return &Promise{ debug: debug, function: function, } } -// Cached returns the value associated with a handle. +type state int + +const ( + stateIdle = iota // newly constructed, or last waiter was cancelled + stateRunning // start was called and not cancelled + stateCompleted // function call ran to completion +) + +// Cached returns the value associated with a promise. // // It will never cause the value to be generated. // It will return the cached value, if present. -func (h *Handle) Cached() interface{} { - h.mu.Lock() - defer h.mu.Unlock() - if h.state == stateCompleted { - return h.value +func (p *Promise) Cached() interface{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.state == stateCompleted { + return p.value } return nil } -// Get returns the value associated with a handle. +// Get returns the value associated with a promise. +// +// All calls to Promise.Get on a given promise return the +// same result but the function is called (to completion) at most once. // // If the value is not yet ready, the underlying function will be invoked. +// // If ctx is cancelled, Get returns (nil, Canceled). -func (h *Handle) Get(ctx context.Context, arg interface{}) (interface{}, error) { +// If all concurrent calls to Get are cancelled, the context provided +// to the function is cancelled. A later call to Get may attempt to +// call the function again. +func (p *Promise) Get(ctx context.Context, arg interface{}) (interface{}, error) { if ctx.Err() != nil { return nil, ctx.Err() } - h.mu.Lock() - switch h.state { + p.mu.Lock() + switch p.state { case stateIdle: - return h.run(ctx, arg) + return p.run(ctx, arg) case stateRunning: - return h.wait(ctx) + return p.wait(ctx) case stateCompleted: - defer h.mu.Unlock() - return h.value, nil + defer p.mu.Unlock() + return p.value, nil default: panic("unknown state") } } -// run starts h.function and returns the result. h.mu must be locked. -func (h *Handle) run(ctx context.Context, arg interface{}) (interface{}, error) { +// run starts p.function and returns the result. p.mu must be locked. +func (p *Promise) run(ctx context.Context, arg interface{}) (interface{}, error) { childCtx, cancel := context.WithCancel(xcontext.Detach(ctx)) - h.cancel = cancel - h.state = stateRunning - h.done = make(chan struct{}) - function := h.function // Read under the lock + p.cancel = cancel + p.state = stateRunning + p.done = make(chan struct{}) + function := p.function // Read under the lock // Make sure that the argument isn't destroyed while we're running in it. release := func() {} @@ -235,7 +175,7 @@ func (h *Handle) run(ctx context.Context, arg interface{}) (interface{}, error) } go func() { - trace.WithRegion(childCtx, fmt.Sprintf("Handle.run %s", h.debug), func() { + trace.WithRegion(childCtx, fmt.Sprintf("Promise.run %s", p.debug), func() { defer release() // Just in case the function does something expensive without checking // the context, double-check we're still alive. @@ -247,51 +187,115 @@ func (h *Handle) run(ctx context.Context, arg interface{}) (interface{}, error) return } - h.mu.Lock() - defer h.mu.Unlock() - // It's theoretically possible that the handle has been cancelled out + p.mu.Lock() + defer p.mu.Unlock() + // It's theoretically possible that the promise has been cancelled out // of the run that started us, and then started running again since we // checked childCtx above. Even so, that should be harmless, since each // run should produce the same results. - if h.state != stateRunning { + if p.state != stateRunning { return } - h.value = v - h.function = nil // aid GC - h.state = stateCompleted - close(h.done) + p.value = v + p.function = nil // aid GC + p.state = stateCompleted + close(p.done) }) }() - return h.wait(ctx) + return p.wait(ctx) } -// wait waits for the value to be computed, or ctx to be cancelled. h.mu must be locked. -func (h *Handle) wait(ctx context.Context) (interface{}, error) { - h.waiters++ - done := h.done - h.mu.Unlock() +// wait waits for the value to be computed, or ctx to be cancelled. p.mu must be locked. +func (p *Promise) wait(ctx context.Context) (interface{}, error) { + p.waiters++ + done := p.done + p.mu.Unlock() select { case <-done: - h.mu.Lock() - defer h.mu.Unlock() - if h.state == stateCompleted { - return h.value, nil + p.mu.Lock() + defer p.mu.Unlock() + if p.state == stateCompleted { + return p.value, nil } return nil, nil case <-ctx.Done(): - h.mu.Lock() - defer h.mu.Unlock() - h.waiters-- - if h.waiters == 0 && h.state == stateRunning { - h.cancel() - close(h.done) - h.state = stateIdle - h.done = nil - h.cancel = nil + p.mu.Lock() + defer p.mu.Unlock() + p.waiters-- + if p.waiters == 0 && p.state == stateRunning { + p.cancel() + close(p.done) + p.state = stateIdle + p.done = nil + p.cancel = nil } return nil, ctx.Err() } } + +// A Store maps arbitrary keys to reference-counted promises. +type Store struct { + promisesMu sync.Mutex + promises map[interface{}]*Promise +} + +// Promise returns a reference-counted promise for the future result of +// calling the specified function. +// +// Calls to Promise with the same key return the same promise, +// incrementing its reference count. The caller must call the +// returned function to decrement the promise's reference count when +// it is no longer needed. Once the last reference has been released, +// the promise is removed from the store. +func (store *Store) Promise(key interface{}, function Function) (*Promise, func()) { + store.promisesMu.Lock() + p, ok := store.promises[key] + if !ok { + p = NewPromise(reflect.TypeOf(key).String(), function) + if store.promises == nil { + store.promises = map[interface{}]*Promise{} + } + store.promises[key] = p + } + atomic.AddInt32(&p.refcount, 1) + store.promisesMu.Unlock() + + release := func() { + if atomic.AddInt32(&p.refcount, -1) == 0 { + store.promisesMu.Lock() + delete(store.promises, key) + store.promisesMu.Unlock() + } + } + return p, release +} + +// Stats returns the number of each type of key in the store. +func (s *Store) Stats() map[reflect.Type]int { + result := map[reflect.Type]int{} + + s.promisesMu.Lock() + defer s.promisesMu.Unlock() + + for k := range s.promises { + result[reflect.TypeOf(k)]++ + } + return result +} + +// DebugOnlyIterate iterates through the store and, for each completed +// promise, calls f(k, v) for the map key k and function result v. It +// should only be used for debugging purposes. +func (s *Store) DebugOnlyIterate(f func(k, v interface{})) { + s.promisesMu.Lock() + defer s.promisesMu.Unlock() + + for k, p := range s.promises { + if v := p.Cached(); v != nil { + f(k, v) + } + } +} diff --git a/internal/memoize/memoize_test.go b/internal/memoize/memoize_test.go index bde02bf6136..3550f1eb144 100644 --- a/internal/memoize/memoize_test.go +++ b/internal/memoize/memoize_test.go @@ -18,7 +18,7 @@ func TestGet(t *testing.T) { evaled := 0 - h, release := store.Handle("key", func(context.Context, interface{}) interface{} { + h, release := store.Promise("key", func(context.Context, interface{}) interface{} { evaled++ return "res" }) @@ -30,7 +30,7 @@ func TestGet(t *testing.T) { } } -func expectGet(t *testing.T, h *memoize.Handle, wantV interface{}) { +func expectGet(t *testing.T, h *memoize.Promise, wantV interface{}) { t.Helper() gotV, gotErr := h.Get(context.Background(), nil) if gotV != wantV || gotErr != nil { @@ -38,29 +38,50 @@ func expectGet(t *testing.T, h *memoize.Handle, wantV interface{}) { } } -func TestHandleRefCounting(t *testing.T) { +func TestNewPromise(t *testing.T) { + calls := 0 + f := func(context.Context, interface{}) interface{} { + calls++ + return calls + } + + // All calls to Get on the same promise return the same result. + p1 := memoize.NewPromise("debug", f) + expectGet(t, p1, 1) + expectGet(t, p1, 1) + + // A new promise calls the function again. + p2 := memoize.NewPromise("debug", f) + expectGet(t, p2, 2) + expectGet(t, p2, 2) + + // The original promise is unchanged. + expectGet(t, p1, 1) +} + +func TestStoredPromiseRefCounting(t *testing.T) { var store memoize.Store v1 := false v2 := false - h1, release1 := store.Handle("key1", func(context.Context, interface{}) interface{} { + p1, release1 := store.Promise("key1", func(context.Context, interface{}) interface{} { return &v1 }) - h2, release2 := store.Handle("key2", func(context.Context, interface{}) interface{} { + p2, release2 := store.Promise("key2", func(context.Context, interface{}) interface{} { return &v2 }) - expectGet(t, h1, &v1) - expectGet(t, h2, &v2) + expectGet(t, p1, &v1) + expectGet(t, p2, &v2) - expectGet(t, h1, &v1) - expectGet(t, h2, &v2) + expectGet(t, p1, &v1) + expectGet(t, p2, &v2) - h2Copy, release2Copy := store.Handle("key2", func(context.Context, interface{}) interface{} { + p2Copy, release2Copy := store.Promise("key2", func(context.Context, interface{}) interface{} { return &v1 }) - if h2 != h2Copy { - t.Error("NewHandle returned a new value while old is not destroyed yet") + if p2 != p2Copy { + t.Error("Promise returned a new value while old is not destroyed yet") } - expectGet(t, h2Copy, &v2) + expectGet(t, p2Copy, &v2) release2() if got, want := v2, false; got != want { @@ -72,23 +93,23 @@ func TestHandleRefCounting(t *testing.T) { } release1() - h2Copy, release2Copy = store.Handle("key2", func(context.Context, interface{}) interface{} { + p2Copy, release2Copy = store.Promise("key2", func(context.Context, interface{}) interface{} { return &v2 }) - if h2 == h2Copy { - t.Error("NewHandle returned previously destroyed value") + if p2 == p2Copy { + t.Error("Promise returned previously destroyed value") } release2Copy() } -func TestHandleDestroyedWhileRunning(t *testing.T) { - // Test that calls to Handle.Get return even if the handle is destroyed while running. +func TestPromiseDestroyedWhileRunning(t *testing.T) { + // Test that calls to Promise.Get return even if the promise is destroyed while running. var store memoize.Store c := make(chan int) var v int - h, release := store.Handle("key", func(ctx context.Context, _ interface{}) interface{} { + h, release := store.Promise("key", func(ctx context.Context, _ interface{}) interface{} { <-c <-c if err := ctx.Err(); err != nil { @@ -109,9 +130,9 @@ func TestHandleDestroyedWhileRunning(t *testing.T) { wg.Done() }() - c <- 0 // send once to enter the handle function - release() // release before the handle function returns - c <- 0 // let the handle function proceed + c <- 0 // send once to enter the promise function + release() // release before the promise function returns + c <- 0 // let the promise function proceed wg.Wait() From 1a4e02fee4d3f223c0b460ad15d69e630b25c0ec Mon Sep 17 00:00:00 2001 From: Francesco Renzi Date: Wed, 23 Mar 2022 01:19:43 +0100 Subject: [PATCH 106/136] internal/lsp/analysis/unusedvariable: add analyzer This analyzer suggests fixes for unused variable errors. In declarations it will remove the whole statement if the offending variable is the only one declared in that statement, otherwise it will just delete the offending variable. In assignments it will remove the whole statement if the offending variable is the only one assigned in that statement, otherwise it will rename the offending variable to `_`. If the assignment RHS contains a statement that can cause a side effect (a function call or reading from a channel), the assignment will be removed but RHS will be preserved. Fixes golang/go#48975 Change-Id: I3850f1b0340cd5ae63249931df3a5403d8617080 Reviewed-on: https://go-review.googlesource.com/c/tools/+/394934 Reviewed-by: Alan Donovan Reviewed-by: Robert Findley --- gopls/doc/analyzers.md | 9 + .../unusedvariable/testdata/src/assign/a.go | 74 +++++ .../testdata/src/assign/a.go.golden | 59 ++++ .../unusedvariable/testdata/src/decl/a.go | 30 ++ .../testdata/src/decl/a.go.golden | 24 ++ .../analysis/unusedvariable/unusedvariable.go | 300 ++++++++++++++++++ .../unusedvariable/unusedvariable_test.go | 24 ++ internal/lsp/source/api_json.go | 9 + internal/lsp/source/options.go | 8 + 9 files changed, 537 insertions(+) create mode 100644 internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go create mode 100644 internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden create mode 100644 internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go create mode 100644 internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden create mode 100644 internal/lsp/analysis/unusedvariable/unusedvariable.go create mode 100644 internal/lsp/analysis/unusedvariable/unusedvariable_test.go diff --git a/gopls/doc/analyzers.md b/gopls/doc/analyzers.md index fd65c3a2a9d..861be1ca62c 100644 --- a/gopls/doc/analyzers.md +++ b/gopls/doc/analyzers.md @@ -657,6 +657,15 @@ func <>(inferred parameters) { **Enabled by default.** +## **unusedvariable** + +check for unused variables + +The unusedvariable analyzer suggests fixes for unused variables errors. + + +**Disabled by default. Enable it by setting `"analyses": {"unusedvariable": true}`.** + ## **fillstruct** note incomplete struct initializations diff --git a/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go b/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go new file mode 100644 index 00000000000..eccfe14c3aa --- /dev/null +++ b/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go @@ -0,0 +1,74 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +import ( + "fmt" + "os" +) + +type A struct { + b int +} + +func singleAssignment() { + v := "s" // want `v declared but not used` + + s := []int{ // want `s declared but not used` + 1, + 2, + } + + a := func(s string) bool { // want `a declared but not used` + return false + } + + if 1 == 1 { + s := "v" // want `s declared but not used` + } + + panic("I should survive") +} + +func noOtherStmtsInBlock() { + v := "s" // want `v declared but not used` +} + +func partOfMultiAssignment() { + f, err := os.Open("file") // want `f declared but not used` + panic(err) +} + +func sideEffects(cBool chan bool, cInt chan int) { + b := <-c // want `b declared but not used` + s := fmt.Sprint("") // want `s declared but not used` + a := A{ // want `a declared but not used` + b: func() int { + return 1 + }(), + } + c := A{<-cInt} // want `c declared but not used` + d := fInt() + <-cInt // want `d declared but not used` + e := fBool() && <-cBool // want `e declared but not used` + f := map[int]int{ // want `f declared but not used` + fInt(): <-cInt, + } + g := []int{<-cInt} // want `g declared but not used` + h := func(s string) {} // want `h declared but not used` + i := func(s string) {}() // want `i declared but not used` +} + +func commentAbove() { + // v is a variable + v := "s" // want `v declared but not used` +} + +func fBool() bool { + return true +} + +func fInt() int { + return 1 +} diff --git a/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden b/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden new file mode 100644 index 00000000000..8d6e561fa60 --- /dev/null +++ b/internal/lsp/analysis/unusedvariable/testdata/src/assign/a.go.golden @@ -0,0 +1,59 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +import ( + "fmt" + "os" +) + +type A struct { + b int +} + +func singleAssignment() { + if 1 == 1 { + } + + panic("I should survive") +} + +func noOtherStmtsInBlock() { +} + +func partOfMultiAssignment() { + _, err := os.Open("file") // want `f declared but not used` + panic(err) +} + +func sideEffects(cBool chan bool, cInt chan int) { + <-c // want `b declared but not used` + fmt.Sprint("") // want `s declared but not used` + A{ // want `a declared but not used` + b: func() int { + return 1 + }(), + } + A{<-cInt} // want `c declared but not used` + fInt() + <-cInt // want `d declared but not used` + fBool() && <-cBool // want `e declared but not used` + map[int]int{ // want `f declared but not used` + fInt(): <-cInt, + } + []int{<-cInt} // want `g declared but not used` + func(s string) {}() // want `i declared but not used` +} + +func commentAbove() { + // v is a variable +} + +func fBool() bool { + return true +} + +func fInt() int { + return 1 +} diff --git a/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go b/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go new file mode 100644 index 00000000000..024e49db9c4 --- /dev/null +++ b/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package decl + +func a() { + var b, c bool // want `b declared but not used` + panic(c) + + if 1 == 1 { + var s string // want `s declared but not used` + } +} + +func b() { + // b is a variable + var b bool // want `b declared but not used` +} + +func c() { + var ( + d string + + // some comment for c + c bool // want `c declared but not used` + ) + + panic(d) +} diff --git a/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden b/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden new file mode 100644 index 00000000000..a589a47af1f --- /dev/null +++ b/internal/lsp/analysis/unusedvariable/testdata/src/decl/a.go.golden @@ -0,0 +1,24 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package decl + +func a() { + var c bool // want `b declared but not used` + panic(c) + + if 1 == 1 { + } +} + +func b() { + // b is a variable +} + +func c() { + var ( + d string + ) + panic(d) +} diff --git a/internal/lsp/analysis/unusedvariable/unusedvariable.go b/internal/lsp/analysis/unusedvariable/unusedvariable.go new file mode 100644 index 00000000000..47564f1f154 --- /dev/null +++ b/internal/lsp/analysis/unusedvariable/unusedvariable.go @@ -0,0 +1,300 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unusedvariable defines an analyzer that checks for unused variables. +package unusedvariable + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/analysisinternal" +) + +const Doc = `check for unused variables + +The unusedvariable analyzer suggests fixes for unused variables errors. +` + +var Analyzer = &analysis.Analyzer{ + Name: "unusedvariable", + Doc: Doc, + Requires: []*analysis.Analyzer{}, + Run: run, + RunDespiteErrors: true, // an unusedvariable diagnostic is a compile error +} + +type fixesForError map[types.Error][]analysis.SuggestedFix + +const unusedVariableSuffix = " declared but not used" + +func run(pass *analysis.Pass) (interface{}, error) { + for _, typeErr := range analysisinternal.GetTypeErrors(pass) { + if strings.HasSuffix(typeErr.Msg, unusedVariableSuffix) { + varName := strings.TrimSuffix(typeErr.Msg, unusedVariableSuffix) + err := runForError(pass, typeErr, varName) + if err != nil { + return nil, err + } + } + } + + return nil, nil +} + +func runForError(pass *analysis.Pass, err types.Error, name string) error { + var file *ast.File + for _, f := range pass.Files { + if f.Pos() <= err.Pos && err.Pos < f.End() { + file = f + break + } + } + if file == nil { + return nil + } + + path, _ := astutil.PathEnclosingInterval(file, err.Pos, err.Pos) + if len(path) < 2 { + return nil + } + + ident, ok := path[0].(*ast.Ident) + if !ok || ident.Name != name { + return nil + } + + diag := analysis.Diagnostic{ + Pos: ident.Pos(), + End: ident.End(), + Message: err.Msg, + } + + for i := range path { + switch stmt := path[i].(type) { + case *ast.ValueSpec: + // Find GenDecl to which offending ValueSpec belongs. + if decl, ok := path[i+1].(*ast.GenDecl); ok { + fixes := removeVariableFromSpec(pass, path, stmt, decl, ident) + // fixes may be nil + if len(fixes) > 0 { + diag.SuggestedFixes = fixes + pass.Report(diag) + } + } + + case *ast.AssignStmt: + if stmt.Tok != token.DEFINE { + continue + } + + containsIdent := false + for _, expr := range stmt.Lhs { + if expr == ident { + containsIdent = true + } + } + if !containsIdent { + continue + } + + fixes := removeVariableFromAssignment(pass, path, stmt, ident) + // fixes may be nil + if len(fixes) > 0 { + diag.SuggestedFixes = fixes + pass.Report(diag) + } + } + } + + return nil +} + +func removeVariableFromSpec(pass *analysis.Pass, path []ast.Node, stmt *ast.ValueSpec, decl *ast.GenDecl, ident *ast.Ident) []analysis.SuggestedFix { + newDecl := new(ast.GenDecl) + *newDecl = *decl + newDecl.Specs = nil + + for _, spec := range decl.Specs { + if spec != stmt { + newDecl.Specs = append(newDecl.Specs, spec) + continue + } + + newSpec := new(ast.ValueSpec) + *newSpec = *stmt + newSpec.Names = nil + + for _, n := range stmt.Names { + if n != ident { + newSpec.Names = append(newSpec.Names, n) + } + } + + if len(newSpec.Names) > 0 { + newDecl.Specs = append(newDecl.Specs, newSpec) + } + } + + // decl.End() does not include any comments, so if a comment is present we + // need to account for it when we delete the statement + end := decl.End() + if stmt.Comment != nil && stmt.Comment.End() > end { + end = stmt.Comment.End() + } + + // There are no other specs left in the declaration, the whole statement can + // be deleted + if len(newDecl.Specs) == 0 { + // Find parent DeclStmt and delete it + for _, node := range path { + if declStmt, ok := node.(*ast.DeclStmt); ok { + return []analysis.SuggestedFix{ + { + Message: suggestedFixMessage(ident.Name), + TextEdits: deleteStmtFromBlock(path, declStmt), + }, + } + } + } + } + + var b bytes.Buffer + if err := format.Node(&b, pass.Fset, newDecl); err != nil { + return nil + } + + return []analysis.SuggestedFix{ + { + Message: suggestedFixMessage(ident.Name), + TextEdits: []analysis.TextEdit{ + { + Pos: decl.Pos(), + // Avoid adding a new empty line + End: end + 1, + NewText: b.Bytes(), + }, + }, + }, + } +} + +func removeVariableFromAssignment(pass *analysis.Pass, path []ast.Node, stmt *ast.AssignStmt, ident *ast.Ident) []analysis.SuggestedFix { + // The only variable in the assignment is unused + if len(stmt.Lhs) == 1 { + // If LHS has only one expression to be valid it has to have 1 expression + // on RHS + // + // RHS may have side effects, preserve RHS + if exprMayHaveSideEffects(stmt.Rhs[0]) { + // Delete until RHS + return []analysis.SuggestedFix{ + { + Message: suggestedFixMessage(ident.Name), + TextEdits: []analysis.TextEdit{ + { + Pos: ident.Pos(), + End: stmt.Rhs[0].Pos(), + }, + }, + }, + } + } + + // RHS does not have any side effects, delete the whole statement + return []analysis.SuggestedFix{ + { + Message: suggestedFixMessage(ident.Name), + TextEdits: deleteStmtFromBlock(path, stmt), + }, + } + } + + // Otherwise replace ident with `_` + return []analysis.SuggestedFix{ + { + Message: suggestedFixMessage(ident.Name), + TextEdits: []analysis.TextEdit{ + { + Pos: ident.Pos(), + End: ident.End(), + NewText: []byte("_"), + }, + }, + }, + } +} + +func suggestedFixMessage(name string) string { + return fmt.Sprintf("Remove variable %s", name) +} + +func deleteStmtFromBlock(path []ast.Node, stmt ast.Stmt) []analysis.TextEdit { + // Find innermost enclosing BlockStmt. + var block *ast.BlockStmt + for i := range path { + if blockStmt, ok := path[i].(*ast.BlockStmt); ok { + block = blockStmt + break + } + } + + nodeIndex := -1 + for i, blockStmt := range block.List { + if blockStmt == stmt { + nodeIndex = i + break + } + } + + // The statement we need to delete was not found in BlockStmt + if nodeIndex == -1 { + return nil + } + + // Delete until the end of the block unless there is another statement after + // the one we are trying to delete + end := block.Rbrace + if nodeIndex < len(block.List)-1 { + end = block.List[nodeIndex+1].Pos() + } + + return []analysis.TextEdit{ + { + Pos: stmt.Pos(), + End: end, + }, + } +} + +// exprMayHaveSideEffects reports whether the expression may have side effects +// (because it contains a function call or channel receive). We disregard +// runtime panics as well written programs should not encounter them. +func exprMayHaveSideEffects(expr ast.Expr) bool { + var mayHaveSideEffects bool + ast.Inspect(expr, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.CallExpr: // possible function call + mayHaveSideEffects = true + return false + case *ast.UnaryExpr: + if n.Op == token.ARROW { // channel receive + mayHaveSideEffects = true + return false + } + case *ast.FuncLit: + return false // evaluating what's inside a FuncLit has no effect + } + return true + }) + + return mayHaveSideEffects +} diff --git a/internal/lsp/analysis/unusedvariable/unusedvariable_test.go b/internal/lsp/analysis/unusedvariable/unusedvariable_test.go new file mode 100644 index 00000000000..e6d7c020f04 --- /dev/null +++ b/internal/lsp/analysis/unusedvariable/unusedvariable_test.go @@ -0,0 +1,24 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unusedvariable_test + +import ( + "testing" + + "golang.org/x/tools/go/analysis/analysistest" + "golang.org/x/tools/internal/lsp/analysis/unusedvariable" +) + +func Test(t *testing.T) { + testdata := analysistest.TestData() + + t.Run("decl", func(t *testing.T) { + analysistest.RunWithSuggestedFixes(t, testdata, unusedvariable.Analyzer, "decl") + }) + + t.Run("assign", func(t *testing.T) { + analysistest.RunWithSuggestedFixes(t, testdata, unusedvariable.Analyzer, "assign") + }) +} diff --git a/internal/lsp/source/api_json.go b/internal/lsp/source/api_json.go index 4e2183cf4e6..2493da25d8c 100755 --- a/internal/lsp/source/api_json.go +++ b/internal/lsp/source/api_json.go @@ -433,6 +433,11 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "suggested fixes for \"undeclared name: <>\"\n\nThis checker provides suggested fixes for type errors of the\ntype \"undeclared name: <>\". It will either insert a new statement,\nsuch as:\n\n\"<> := \"\n\nor a new function declaration, such as:\n\nfunc <>(inferred parameters) {\n\tpanic(\"implement me!\")\n}\n", Default: "true", }, + { + Name: "\"unusedvariable\"", + Doc: "check for unused variables\n\nThe unusedvariable analyzer suggests fixes for unused variables errors.\n", + Default: "false", + }, { Name: "\"fillstruct\"", Doc: "note incomplete struct initializations\n\nThis analyzer provides diagnostics for any struct literals that do not have\nany fields initialized. Because the suggested fix for this analysis is\nexpensive to compute, callers should compute it separately, using the\nSuggestedFix function below.\n", @@ -1013,6 +1018,10 @@ var GeneratedAPIJSON = &APIJSON{ Doc: "suggested fixes for \"undeclared name: <>\"\n\nThis checker provides suggested fixes for type errors of the\ntype \"undeclared name: <>\". It will either insert a new statement,\nsuch as:\n\n\"<> := \"\n\nor a new function declaration, such as:\n\nfunc <>(inferred parameters) {\n\tpanic(\"implement me!\")\n}\n", Default: true, }, + { + Name: "unusedvariable", + Doc: "check for unused variables\n\nThe unusedvariable analyzer suggests fixes for unused variables errors.\n", + }, { Name: "fillstruct", Doc: "note incomplete struct initializations\n\nThis analyzer provides diagnostics for any struct literals that do not have\nany fields initialized. Because the suggested fix for this analysis is\nexpensive to compute, callers should compute it separately, using the\nSuggestedFix function below.\n", diff --git a/internal/lsp/source/options.go b/internal/lsp/source/options.go index 5da14ebfe92..09401bc6ef8 100644 --- a/internal/lsp/source/options.go +++ b/internal/lsp/source/options.go @@ -61,6 +61,7 @@ import ( "golang.org/x/tools/internal/lsp/analysis/stubmethods" "golang.org/x/tools/internal/lsp/analysis/undeclaredname" "golang.org/x/tools/internal/lsp/analysis/unusedparams" + "golang.org/x/tools/internal/lsp/analysis/unusedvariable" "golang.org/x/tools/internal/lsp/analysis/useany" "golang.org/x/tools/internal/lsp/command" "golang.org/x/tools/internal/lsp/diff" @@ -800,6 +801,9 @@ func (o *Options) enableAllExperimentMaps() { if _, ok := o.Analyses[unusedparams.Analyzer.Name]; !ok { o.Analyses[unusedparams.Analyzer.Name] = true } + if _, ok := o.Analyses[unusedvariable.Analyzer.Name]; !ok { + o.Analyses[unusedvariable.Analyzer.Name] = true + } } func (o *Options) set(name string, value interface{}, seen map[string]struct{}) OptionResult { @@ -1270,6 +1274,10 @@ func typeErrorAnalyzers() map[string]*Analyzer { Fix: UndeclaredName, Enabled: true, }, + unusedvariable.Analyzer.Name: { + Analyzer: unusedvariable.Analyzer, + Enabled: false, + }, } } From c3af7c2fa9473d5bc80d0f62e3067027afd7ccd7 Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Fri, 15 Jul 2022 08:31:52 -0400 Subject: [PATCH 107/136] internal/lsp/cache: delete workspacePackageHandles (dead code) This should have been in CL 417116. Also: - (related to CL 417415), rename packageHandle.check to await to indicate its blocking nature. - rename typeCheck to typeCheckImpl, following the pattern. - move "prefetch" parallel loop into typeCheckImpl. - add some comments. Change-Id: Iea2c8e1f1f74fb65afd0759b493509147d87a4bb Reviewed-on: https://go-review.googlesource.com/c/tools/+/417581 Run-TryBot: Alan Donovan Auto-Submit: Alan Donovan TryBot-Result: Gopher Robot Reviewed-by: Robert Findley gopls-CI: kokoro --- internal/lsp/cache/analysis.go | 2 +- internal/lsp/cache/check.go | 66 ++++++++++++++++++---------------- internal/lsp/cache/snapshot.go | 23 +++--------- 3 files changed, 40 insertions(+), 51 deletions(-) diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go index ee80bbcd529..ca0e04d64b0 100644 --- a/internal/lsp/cache/analysis.go +++ b/internal/lsp/cache/analysis.go @@ -123,7 +123,7 @@ func (s *snapshot) actionHandle(ctx context.Context, id PackageID, a *analysis.A if err != nil { return nil, err } - pkg, err := ph.check(ctx, s) + pkg, err := ph.await(ctx, s) if err != nil { return nil, err } diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index abc17245726..4caf4ba6fa7 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -42,7 +42,7 @@ type packageKey struct { type packageHandleKey source.Hash // A packageHandle is a handle to the future result of type-checking a package. -// The resulting package is obtained from the check() method. +// The resulting package is obtained from the await() method. type packageHandle struct { promise *memoize.Promise // [typeCheckResult] @@ -60,7 +60,7 @@ type packageHandle struct { } // typeCheckResult contains the result of a call to -// typeCheck, which type-checks a package. +// typeCheckImpl, which type-checks a package. type typeCheckResult struct { pkg *pkg err error @@ -130,6 +130,12 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so } // Read both lists of files of this package, in parallel. + // + // goFiles aren't presented to the type checker--nor + // are they included in the key, unsoundly--but their + // syntax trees are available from (*pkg).File(URI). + // TODO(adonovan): consider parsing them on demand? + // The need should be rare. goFiles, compiledGoFiles, err := readGoFiles(ctx, s, m.Metadata) if err != nil { return nil, err @@ -139,32 +145,9 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so // Create a handle for the result of type checking. experimentalKey := s.View().Options().ExperimentalPackageCacheKey phKey := computePackageKey(m.ID, compiledGoFiles, m, depKeys, mode, experimentalKey) - // TODO(adonovan): extract lambda into a standalone function to - // avoid implicit lexical dependencies. promise, release := s.store.Promise(phKey, func(ctx context.Context, arg interface{}) interface{} { - snapshot := arg.(*snapshot) - - // Start type checking of direct dependencies, - // in parallel and asynchronously. - // As the type checker imports each of these - // packages, it will wait for its completion. - var wg sync.WaitGroup - for _, dep := range deps { - wg.Add(1) - go func(dep *packageHandle) { - dep.check(ctx, snapshot) // ignore result - wg.Done() - }(dep) - } - // The 'defer' below is unusual but intentional: - // it is not necessary that each call to dep.check - // complete before type checking begins, as the type - // checker will wait for those it needs. But they do - // need to complete before this function returns and - // the snapshot is possibly destroyed. - defer wg.Wait() - - pkg, err := typeCheck(ctx, snapshot, goFiles, compiledGoFiles, m.Metadata, mode, deps) + + pkg, err := typeCheckImpl(ctx, arg.(*snapshot), goFiles, compiledGoFiles, m.Metadata, mode, deps) return typeCheckResult{pkg, err} }) @@ -288,7 +271,8 @@ func hashConfig(config *packages.Config) source.Hash { return source.HashOf(b.Bytes()) } -func (ph *packageHandle) check(ctx context.Context, s *snapshot) (*pkg, error) { +// await waits for typeCheckImpl to complete and returns its result. +func (ph *packageHandle) await(ctx context.Context, s *snapshot) (*pkg, error) { v, err := s.awaitPromise(ctx, ph.promise) if err != nil { return nil, err @@ -314,10 +298,30 @@ func (ph *packageHandle) cached() (*pkg, error) { return data.pkg, data.err } -// typeCheck type checks the parsed source files in compiledGoFiles. +// typeCheckImpl type checks the parsed source files in compiledGoFiles. // (The resulting pkg also holds the parsed but not type-checked goFiles.) // deps holds the future results of type-checking the direct dependencies. -func typeCheck(ctx context.Context, snapshot *snapshot, goFiles, compiledGoFiles []source.FileHandle, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle) (*pkg, error) { +func typeCheckImpl(ctx context.Context, snapshot *snapshot, goFiles, compiledGoFiles []source.FileHandle, m *Metadata, mode source.ParseMode, deps map[PackagePath]*packageHandle) (*pkg, error) { + // Start type checking of direct dependencies, + // in parallel and asynchronously. + // As the type checker imports each of these + // packages, it will wait for its completion. + var wg sync.WaitGroup + for _, dep := range deps { + wg.Add(1) + go func(dep *packageHandle) { + dep.await(ctx, snapshot) // ignore result + wg.Done() + }(dep) + } + // The 'defer' below is unusual but intentional: + // it is not necessary that each call to dep.check + // complete before type checking begins, as the type + // checker will wait for those it needs. But they do + // need to complete before this function returns and + // the snapshot is possibly destroyed. + defer wg.Wait() + var filter *unexportedFilter if mode == source.ParseExported { filter = &unexportedFilter{uses: map[string]bool{}} @@ -522,7 +526,7 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, goFiles, compiledGoFil if !source.IsValidImport(string(m.PkgPath), string(dep.m.PkgPath)) { return nil, fmt.Errorf("invalid use of internal package %s", pkgPath) } - depPkg, err := dep.check(ctx, snapshot) + depPkg, err := dep.await(ctx, snapshot) if err != nil { return nil, err } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 9e52cda5aff..a516860aafc 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -601,7 +601,7 @@ func (s *snapshot) PackagesForFile(ctx context.Context, uri span.URI, mode sourc } var pkgs []source.Package for _, ph := range phs { - pkg, err := ph.check(ctx, s) + pkg, err := ph.await(ctx, s) if err != nil { return nil, err } @@ -639,7 +639,7 @@ func (s *snapshot) PackageForFile(ctx context.Context, uri span.URI, mode source return nil, fmt.Errorf("no packages in input") } - return ph.check(ctx, s) + return ph.await(ctx, s) } func (s *snapshot) packageHandlesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, includeTestVariants bool) ([]*packageHandle, error) { @@ -756,7 +756,7 @@ func (s *snapshot) checkedPackage(ctx context.Context, id PackageID, mode source if err != nil { return nil, err } - return ph.check(ctx, s) + return ph.await(ctx, s) } func (s *snapshot) getImportedBy(id PackageID) []PackageID { @@ -996,21 +996,6 @@ func (s *snapshot) knownFilesInDir(ctx context.Context, dir span.URI) []span.URI return files } -func (s *snapshot) workspacePackageHandles(ctx context.Context) ([]*packageHandle, error) { - if err := s.awaitLoaded(ctx); err != nil { - return nil, err - } - var phs []*packageHandle - for _, pkgID := range s.workspacePackageIDs() { - ph, err := s.buildPackageHandle(ctx, pkgID, s.workspaceParseMode(pkgID)) - if err != nil { - return nil, err - } - phs = append(phs, ph) - } - return phs, nil -} - func (s *snapshot) ActivePackages(ctx context.Context) ([]source.Package, error) { phs, err := s.activePackageHandles(ctx) if err != nil { @@ -1018,7 +1003,7 @@ func (s *snapshot) ActivePackages(ctx context.Context) ([]source.Package, error) } var pkgs []source.Package for _, ph := range phs { - pkg, err := ph.check(ctx, s) + pkg, err := ph.await(ctx, s) if err != nil { return nil, err } From 22d149443a6474462b18eb49cbc26bf9b21b87f2 Mon Sep 17 00:00:00 2001 From: David Chase Date: Thu, 16 Jun 2022 16:30:48 -0400 Subject: [PATCH 108/136] internal/gcimporter: add support for reading unified IR export data This does not include writing export data in the unified IR format. Most of this change is an import of code from the Go implementation, with minor tweaks and gaskets added. This does not (yet) address the registry issue mentioned in golang/go#52163. Updates golang/go#52163. Change-Id: I98030e6c9ff35c6ff678b8a7ce9b653b18e65e17 Reviewed-on: https://go-review.googlesource.com/c/tools/+/412821 TryBot-Result: Gopher Robot Run-TryBot: David Chase Reviewed-by: Matthew Dempsky gopls-CI: kokoro --- go/gcexportdata/gcexportdata.go | 28 +- go/internal/gcimporter/gcimporter.go | 28 +- go/internal/gcimporter/gcimporter_test.go | 39 +- go/internal/gcimporter/unified_no.go | 10 + go/internal/gcimporter/unified_yes.go | 10 + go/internal/gcimporter/ureader_no.go | 19 + go/internal/gcimporter/ureader_yes.go | 612 ++++++++++++++++++++++ go/internal/pkgbits/codes.go | 77 +++ go/internal/pkgbits/decoder.go | 433 +++++++++++++++ go/internal/pkgbits/doc.go | 32 ++ go/internal/pkgbits/encoder.go | 379 ++++++++++++++ go/internal/pkgbits/flags.go | 9 + go/internal/pkgbits/frames_go1.go | 21 + go/internal/pkgbits/frames_go17.go | 28 + go/internal/pkgbits/reloc.go | 42 ++ go/internal/pkgbits/support.go | 17 + go/internal/pkgbits/sync.go | 113 ++++ go/internal/pkgbits/syncmarker_string.go | 89 ++++ 18 files changed, 1968 insertions(+), 18 deletions(-) create mode 100644 go/internal/gcimporter/unified_no.go create mode 100644 go/internal/gcimporter/unified_yes.go create mode 100644 go/internal/gcimporter/ureader_no.go create mode 100644 go/internal/gcimporter/ureader_yes.go create mode 100644 go/internal/pkgbits/codes.go create mode 100644 go/internal/pkgbits/decoder.go create mode 100644 go/internal/pkgbits/doc.go create mode 100644 go/internal/pkgbits/encoder.go create mode 100644 go/internal/pkgbits/flags.go create mode 100644 go/internal/pkgbits/frames_go1.go create mode 100644 go/internal/pkgbits/frames_go17.go create mode 100644 go/internal/pkgbits/reloc.go create mode 100644 go/internal/pkgbits/support.go create mode 100644 go/internal/pkgbits/sync.go create mode 100644 go/internal/pkgbits/syncmarker_string.go diff --git a/go/gcexportdata/gcexportdata.go b/go/gcexportdata/gcexportdata.go index ddc276cfbcb..2ed25a75024 100644 --- a/go/gcexportdata/gcexportdata.go +++ b/go/gcexportdata/gcexportdata.go @@ -116,13 +116,29 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // The indexed export format starts with an 'i'; the older // binary export format starts with a 'c', 'd', or 'v' // (from "version"). Select appropriate importer. - if len(data) > 0 && data[0] == 'i' { - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err - } + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + + case 'v', 'c', 'd': + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) - return pkg, err + case 'u': + _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) + } + } + return nil, fmt.Errorf("empty export data for %s", path) } // Write writes encoded type information for the specified package to out. diff --git a/go/internal/gcimporter/gcimporter.go b/go/internal/gcimporter/gcimporter.go index 493bfa03b0f..e96c39600d1 100644 --- a/go/internal/gcimporter/gcimporter.go +++ b/go/internal/gcimporter/gcimporter.go @@ -181,8 +181,9 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func defer rc.Close() var hdr string + var size int64 buf := bufio.NewReader(rc) - if hdr, _, err = FindExportData(buf); err != nil { + if hdr, size, err = FindExportData(buf); err != nil { return } @@ -210,10 +211,27 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // The indexed export format starts with an 'i'; the older // binary export format starts with a 'c', 'd', or 'v' // (from "version"). Select appropriate importer. - if len(data) > 0 && data[0] == 'i' { - _, pkg, err = IImportData(fset, packages, data[1:], id) - } else { - _, pkg, err = BImportData(fset, packages, data, id) + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'v', 'c', 'd': + _, pkg, err := BImportData(fset, packages, data, id) + return pkg, err + + case 'u': + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) + } } default: diff --git a/go/internal/gcimporter/gcimporter_test.go b/go/internal/gcimporter/gcimporter_test.go index 4e992af76b3..66c09269d88 100644 --- a/go/internal/gcimporter/gcimporter_test.go +++ b/go/internal/gcimporter/gcimporter_test.go @@ -45,6 +45,10 @@ func needsCompiler(t *testing.T, compiler string) { // compile runs the compiler on filename, with dirname as the working directory, // and writes the output file to outdirname. func compile(t *testing.T, dirname, filename, outdirname string) string { + return compilePkg(t, dirname, filename, outdirname, "p") +} + +func compilePkg(t *testing.T, dirname, filename, outdirname, pkg string) string { testenv.NeedsGoBuild(t) // filename must end with ".go" @@ -53,12 +57,12 @@ func compile(t *testing.T, dirname, filename, outdirname string) string { } basename := filepath.Base(filename) outname := filepath.Join(outdirname, basename[:len(basename)-2]+"o") - cmd := exec.Command("go", "tool", "compile", "-p=p", "-o", outname, filename) + cmd := exec.Command("go", "tool", "compile", "-p="+pkg, "-o", outname, filename) cmd.Dir = dirname out, err := cmd.CombinedOutput() if err != nil { t.Logf("%s", out) - t.Fatalf("go tool compile %s failed: %s", filename, err) + t.Fatalf("(cd %v && %v) failed: %s", cmd.Dir, cmd, err) } return outname } @@ -140,7 +144,11 @@ func TestImportTestdata(t *testing.T) { // For now, we just test the presence of a few packages // that we know are there for sure. got := fmt.Sprint(pkg.Imports()) - for _, want := range []string{"go/ast", "go/token"} { + wants := []string{"go/ast", "go/token"} + if unifiedIR { + wants = []string{"go/ast"} + } + for _, want := range wants { if !strings.Contains(got, want) { t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want) } @@ -364,6 +372,14 @@ func verifyInterfaceMethodRecvs(t *testing.T, named *types.Named, level int) { return // not an interface } + // The unified IR importer always sets interface method receiver + // parameters to point to the Interface type, rather than the Named. + // See #49906. + var want types.Type = named + if unifiedIR { + want = iface + } + // check explicitly declared methods for i := 0; i < iface.NumExplicitMethods(); i++ { m := iface.ExplicitMethod(i) @@ -372,8 +388,8 @@ func verifyInterfaceMethodRecvs(t *testing.T, named *types.Named, level int) { t.Errorf("%s: missing receiver type", m) continue } - if recv.Type() != named { - t.Errorf("%s: got recv type %s; want %s", m, recv.Type(), named) + if recv.Type() != want { + t.Errorf("%s: got recv type %s; want %s", m, recv.Type(), want) } } @@ -451,7 +467,7 @@ func TestIssue13566(t *testing.T) { if err != nil { t.Fatal(err) } - compile(t, "testdata", "a.go", testoutdir) + compilePkg(t, "testdata", "a.go", testoutdir, apkg(testoutdir)) compile(t, testoutdir, bpath, testoutdir) // import must succeed (test for issue at hand) @@ -611,13 +627,22 @@ func TestIssue51836(t *testing.T) { if err != nil { t.Fatal(err) } - compile(t, dir, "a.go", testoutdir) + compilePkg(t, dir, "a.go", testoutdir, apkg(testoutdir)) compile(t, testoutdir, bpath, testoutdir) // import must succeed (test for issue at hand) _ = importPkg(t, "./testdata/aa", tmpdir) } +// apkg returns the package "a" prefixed by (as a package) testoutdir +func apkg(testoutdir string) string { + apkg := testoutdir + "/a" + if os.PathSeparator != '/' { + apkg = strings.ReplaceAll(apkg, string(os.PathSeparator), "/") + } + return apkg +} + func importPkg(t *testing.T, path, srcDir string) *types.Package { pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil) if err != nil { diff --git a/go/internal/gcimporter/unified_no.go b/go/internal/gcimporter/unified_no.go new file mode 100644 index 00000000000..286bf445483 --- /dev/null +++ b/go/internal/gcimporter/unified_no.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(go1.18 && goexperiment.unified) +// +build !go1.18 !goexperiment.unified + +package gcimporter + +const unifiedIR = false diff --git a/go/internal/gcimporter/unified_yes.go b/go/internal/gcimporter/unified_yes.go new file mode 100644 index 00000000000..b5d69ffbe68 --- /dev/null +++ b/go/internal/gcimporter/unified_yes.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 && goexperiment.unified +// +build go1.18,goexperiment.unified + +package gcimporter + +const unifiedIR = true diff --git a/go/internal/gcimporter/ureader_no.go b/go/internal/gcimporter/ureader_no.go new file mode 100644 index 00000000000..8eb20729c2a --- /dev/null +++ b/go/internal/gcimporter/ureader_no.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" +) + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") + return +} diff --git a/go/internal/gcimporter/ureader_yes.go b/go/internal/gcimporter/ureader_yes.go new file mode 100644 index 00000000000..3c1a4375435 --- /dev/null +++ b/go/internal/gcimporter/ureader_yes.go @@ -0,0 +1,612 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Derived from go/internal/gcimporter/ureader.go + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import ( + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/internal/pkgbits" +) + +// A pkgReader holds the shared state for reading a unified IR package +// description. +type pkgReader struct { + pkgbits.PkgDecoder + + fake fakeFileSet + + ctxt *types.Context + imports map[string]*types.Package // previously imported packages, indexed by path + + // lazily initialized arrays corresponding to the unified IR + // PosBase, Pkg, and Type sections, respectively. + posBases []string // position bases (i.e., file names) + pkgs []*types.Package + typs []types.Type + + // laterFns holds functions that need to be invoked at the end of + // import reading. + laterFns []func() +} + +// later adds a function to be invoked at the end of import reading. +func (pr *pkgReader) later(fn func()) { + pr.laterFns = append(pr.laterFns, fn) +} + +// See cmd/compile/internal/noder.derivedInfo. +type derivedInfo struct { + idx pkgbits.Index + needed bool +} + +// See cmd/compile/internal/noder.typeInfo. +type typeInfo struct { + idx pkgbits.Index + derived bool +} + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + s := string(data) + s = s[:strings.LastIndex(s, "\n$$\n")] + input := pkgbits.NewPkgDecoder(path, s) + pkg = readUnifiedPackage(fset, nil, imports, input) + return +} + +// readUnifiedPackage reads a package description from the given +// unified IR export data decoder. +func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { + pr := pkgReader{ + PkgDecoder: input, + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + + ctxt: ctxt, + imports: imports, + + posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), + typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), + } + defer pr.fake.setLines() + + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + pkg := r.pkg() + r.Bool() // has init + + for i, n := 0, r.Len(); i < n; i++ { + // As if r.obj(), but avoiding the Scope.Lookup call, + // to avoid eager loading of imports. + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + assert(r.Len() == 0) + } + + r.Sync(pkgbits.SyncEOF) + + for _, fn := range pr.laterFns { + fn() + } + + pkg.MarkComplete() + return pkg +} + +// A reader holds the state for reading a single unified IR element +// within a package. +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict +} + +// A readerDict holds the state for type parameters that parameterize +// the current unified IR element. +type readerDict struct { + // bounds is a slice of typeInfos corresponding to the underlying + // bounds of the element's type parameters. + bounds []typeInfo + + // tparams is a slice of the constructed TypeParams for the element. + tparams []*types.TypeParam + + // devived is a slice of types derived from tparams, which may be + // instantiated while reading the current element. + derived []derivedInfo + derivedTypes []types.Type // lazily instantiated from derived +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +// @@@ Positions + +func (r *reader) pos() token.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return token.NoPos + } + + // TODO(mdempsky): Delta encoding. + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return r.p.fake.pos(posBase, int(line), int(col)) +} + +func (r *reader) posBase() string { + return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) +} + +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { + if b := pr.posBases[idx]; b != "" { + return b + } + + r := pr.newReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + + // Within types2, position bases have a lot more details (e.g., + // keeping track of where //line directives appeared exactly). + // + // For go/types, we just track the file name. + + filename := r.String() + + if r.Bool() { // file base + // Was: "b = token.NewTrimmedFileBase(filename, true)" + } else { // line base + pos := r.pos() + line := r.Uint() + col := r.Uint() + + // Was: "b = token.NewLineBase(pos, filename, true, line, col)" + _, _, _ = pos, line, col + } + + b := filename + pr.posBases[idx] = b + return b +} + +// @@@ Packages + +func (r *reader) pkg() *types.Package { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { + // TODO(mdempsky): Consider using some non-nil pointer to indicate + // the universe scope, so we don't need to keep re-reading it. + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +func (r *reader) doPkg() *types.Package { + path := r.String() + switch path { + case "": + path = r.p.PkgPath() + case "builtin": + return nil // universe + case "unsafe": + return types.Unsafe + } + + if pkg := r.p.imports[path]; pkg != nil { + return pkg + } + + name := r.String() + + pkg := types.NewPackage(path, name) + r.p.imports[path] = pkg + + imports := make([]*types.Package, r.Len()) + for i := range imports { + imports[i] = r.pkg() + } + pkg.SetImports(imports) + + return pkg +} + +// @@@ Types + +func (r *reader) typ() types.Type { + return r.p.typIdx(r.typInfo(), r.dict) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { + idx := info.idx + var where *types.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + r := pr.newReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ := r.doTyp() + assert(typ != nil) + + // See comment in pkgReader.typIdx explaining how this happens. + if prev := *where; prev != nil { + return prev + } + + *where = typ + return typ +} + +func (r *reader) doTyp() (res types.Type) { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + errorf("unhandled type tag: %v", tag) + panic("unreachable") + + case pkgbits.TypeBasic: + return types.Typ[r.Len()] + + case pkgbits.TypeNamed: + obj, targs := r.obj() + name := obj.(*types.TypeName) + if len(targs) != 0 { + t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) + return t + } + return name.Type() + + case pkgbits.TypeTypeParam: + return r.dict.tparams[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := types.ChanDir(r.Len()) + return types.NewChan(dir, r.typ()) + case pkgbits.TypeMap: + return types.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types.NewPointer(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil, nil, nil) + case pkgbits.TypeSlice: + return types.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) structType() *types.Struct { + fields := make([]*types.Var, r.Len()) + var tags []string + for i := range fields { + pos := r.pos() + pkg, name := r.selector() + ftyp := r.typ() + tag := r.String() + embedded := r.Bool() + + fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) + if tag != "" { + for len(tags) < i { + tags = append(tags, "") + } + tags = append(tags, tag) + } + } + return types.NewStruct(fields, tags) +} + +func (r *reader) unionType() *types.Union { + terms := make([]*types.Term, r.Len()) + for i := range terms { + terms[i] = types.NewTerm(r.Bool(), r.typ()) + } + return types.NewUnion(terms) +} + +func (r *reader) interfaceType() *types.Interface { + methods := make([]*types.Func, r.Len()) + embeddeds := make([]types.Type, r.Len()) + implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() + + for i := range methods { + pos := r.pos() + pkg, name := r.selector() + mtyp := r.signature(nil, nil, nil) + methods[i] = types.NewFunc(pos, pkg, name, mtyp) + } + + for i := range embeddeds { + embeddeds[i] = r.typ() + } + + iface := types.NewInterfaceType(methods, embeddeds) + if implicit { + iface.MarkImplicit() + } + return iface +} + +func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + variadic := r.Bool() + + return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) +} + +func (r *reader) params() *types.Tuple { + r.Sync(pkgbits.SyncParams) + + params := make([]*types.Var, r.Len()) + for i := range params { + params[i] = r.param() + } + + return types.NewTuple(params...) +} + +func (r *reader) param() *types.Var { + r.Sync(pkgbits.SyncParam) + + pos := r.pos() + pkg, name := r.localIdent() + typ := r.typ() + + return types.NewParam(pos, pkg, name, typ) +} + +// @@@ Objects + +func (r *reader) obj() (types.Object, []types.Type) { + r.Sync(pkgbits.SyncObject) + + assert(!r.Bool()) + + pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + obj := pkgScope(pkg).Lookup(name) + + targs := make([]types.Type, r.Len()) + for i := range targs { + targs[i] = r.typ() + } + + return obj, targs +} + +func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { + rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + + objPkg, objName := rname.qualifiedIdent() + assert(objName != "") + + tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + + if tag == pkgbits.ObjStub { + assert(objPkg == nil || objPkg == types.Unsafe) + return objPkg, objName + } + + if objPkg.Scope().Lookup(objName) == nil { + dict := pr.objDictIdx(idx) + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + r.dict = dict + + declare := func(obj types.Object) { + objPkg.Scope().Insert(obj) + } + + switch tag { + default: + panic("weird") + + case pkgbits.ObjAlias: + pos := r.pos() + typ := r.typ() + declare(types.NewTypeName(pos, objPkg, objName, typ)) + + case pkgbits.ObjConst: + pos := r.pos() + typ := r.typ() + val := r.Value() + declare(types.NewConst(pos, objPkg, objName, typ, val)) + + case pkgbits.ObjFunc: + pos := r.pos() + tparams := r.typeParamNames() + sig := r.signature(nil, nil, tparams) + declare(types.NewFunc(pos, objPkg, objName, sig)) + + case pkgbits.ObjType: + pos := r.pos() + + obj := types.NewTypeName(pos, objPkg, objName, nil) + named := types.NewNamed(obj, nil, nil) + declare(obj) + + named.SetTypeParams(r.typeParamNames()) + + // TODO(mdempsky): Rewrite receiver types to underlying is an + // Interface? The go/types importer does this (I think because + // unit tests expected that), but cmd/compile doesn't care + // about it, so maybe we can avoid worrying about that here. + rhs := r.typ() + r.p.later(func() { + underlying := rhs.Underlying() + named.SetUnderlying(underlying) + }) + + for i, n := 0, r.Len(); i < n; i++ { + named.AddMethod(r.method()) + } + + case pkgbits.ObjVar: + pos := r.pos() + typ := r.typ() + declare(types.NewVar(pos, objPkg, objName, typ)) + } + } + + return objPkg, objName +} + +func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { + r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + + var dict readerDict + + if implicits := r.Len(); implicits != 0 { + errorf("unexpected object with %v implicit type parameter(s)", implicits) + } + + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + + // function references follow, but reader doesn't need those + + return &dict +} + +func (r *reader) typeParamNames() []*types.TypeParam { + r.Sync(pkgbits.SyncTypeParamNames) + + // Note: This code assumes it only processes objects without + // implement type parameters. This is currently fine, because + // reader is only used to read in exported declarations, which are + // always package scoped. + + if len(r.dict.bounds) == 0 { + return nil + } + + // Careful: Type parameter lists may have cycles. To allow for this, + // we construct the type parameter list in two passes: first we + // create all the TypeNames and TypeParams, then we construct and + // set the bound type. + + r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) + for i := range r.dict.bounds { + pos := r.pos() + pkg, name := r.localIdent() + + tname := types.NewTypeName(pos, pkg, name, nil) + r.dict.tparams[i] = types.NewTypeParam(tname, nil) + } + + typs := make([]types.Type, len(r.dict.bounds)) + for i, bound := range r.dict.bounds { + typs[i] = r.p.typIdx(bound, r.dict) + } + + // TODO(mdempsky): This is subtle, elaborate further. + // + // We have to save tparams outside of the closure, because + // typeParamNames() can be called multiple times with the same + // dictionary instance. + // + // Also, this needs to happen later to make sure SetUnderlying has + // been called. + // + // TODO(mdempsky): Is it safe to have a single "later" slice or do + // we need to have multiple passes? See comments on CL 386002 and + // go.dev/issue/52104. + tparams := r.dict.tparams + r.p.later(func() { + for i, typ := range typs { + tparams[i].SetConstraint(typ) + } + }) + + return r.dict.tparams +} + +func (r *reader) method() *types.Func { + r.Sync(pkgbits.SyncMethod) + pos := r.pos() + pkg, name := r.selector() + + rparams := r.typeParamNames() + sig := r.signature(r.param(), rparams, nil) + + _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. + return types.NewFunc(pos, pkg, name, sig) +} + +func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } +func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } +func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } + +func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { + r.Sync(marker) + return r.pkg(), r.String() +} + +// pkgScope returns pkg.Scope(). +// If pkg is nil, it returns types.Universe instead. +// +// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. +func pkgScope(pkg *types.Package) *types.Scope { + if pkg != nil { + return pkg.Scope() + } + return types.Universe +} diff --git a/go/internal/pkgbits/codes.go b/go/internal/pkgbits/codes.go new file mode 100644 index 00000000000..f0cabde96eb --- /dev/null +++ b/go/internal/pkgbits/codes.go @@ -0,0 +1,77 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A Code is an enum value that can be encoded into bitstreams. +// +// Code types are preferable for enum types, because they allow +// Decoder to detect desyncs. +type Code interface { + // Marker returns the SyncMarker for the Code's dynamic type. + Marker() SyncMarker + + // Value returns the Code's ordinal value. + Value() int +} + +// A CodeVal distinguishes among go/constant.Value encodings. +type CodeVal int + +func (c CodeVal) Marker() SyncMarker { return SyncVal } +func (c CodeVal) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ValBool CodeVal = iota + ValString + ValInt64 + ValBigInt + ValBigRat + ValBigFloat +) + +// A CodeType distinguishes among go/types.Type encodings. +type CodeType int + +func (c CodeType) Marker() SyncMarker { return SyncType } +func (c CodeType) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + TypeBasic CodeType = iota + TypeNamed + TypePointer + TypeSlice + TypeArray + TypeChan + TypeMap + TypeSignature + TypeStruct + TypeInterface + TypeUnion + TypeTypeParam +) + +// A CodeObj distinguishes among go/types.Object encodings. +type CodeObj int + +func (c CodeObj) Marker() SyncMarker { return SyncCodeObj } +func (c CodeObj) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ObjAlias CodeObj = iota + ObjConst + ObjType + ObjFunc + ObjVar + ObjStub +) diff --git a/go/internal/pkgbits/decoder.go b/go/internal/pkgbits/decoder.go new file mode 100644 index 00000000000..2bc793668ec --- /dev/null +++ b/go/internal/pkgbits/decoder.go @@ -0,0 +1,433 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "math/big" + "os" + "runtime" + "strings" +) + +// A PkgDecoder provides methods for decoding a package's Unified IR +// export data. +type PkgDecoder struct { + // version is the file format version. + version uint32 + + // sync indicates whether the file uses sync markers. + sync bool + + // pkgPath is the package path for the package to be decoded. + // + // TODO(mdempsky): Remove; unneeded since CL 391014. + pkgPath string + + // elemData is the full data payload of the encoded package. + // Elements are densely and contiguously packed together. + // + // The last 8 bytes of elemData are the package fingerprint. + elemData string + + // elemEnds stores the byte-offset end positions of element + // bitstreams within elemData. + // + // For example, element I's bitstream data starts at elemEnds[I-1] + // (or 0, if I==0) and ends at elemEnds[I]. + // + // Note: elemEnds is indexed by absolute indices, not + // section-relative indices. + elemEnds []uint32 + + // elemEndsEnds stores the index-offset end positions of relocation + // sections within elemEnds. + // + // For example, section K's end positions start at elemEndsEnds[K-1] + // (or 0, if K==0) and end at elemEndsEnds[K]. + elemEndsEnds [numRelocs]uint32 +} + +// PkgPath returns the package path for the package +// +// TODO(mdempsky): Remove; unneeded since CL 391014. +func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } + +// SyncMarkers reports whether pr uses sync markers. +func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } + +// NewPkgDecoder returns a PkgDecoder initialized to read the Unified +// IR export data from input. pkgPath is the package path for the +// compilation unit that produced the export data. +// +// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. +func NewPkgDecoder(pkgPath, input string) PkgDecoder { + pr := PkgDecoder{ + pkgPath: pkgPath, + } + + // TODO(mdempsky): Implement direct indexing of input string to + // avoid copying the position information. + + r := strings.NewReader(input) + + assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + + switch pr.version { + default: + panic(fmt.Errorf("unsupported version: %v", pr.version)) + case 0: + // no flags + case 1: + var flags uint32 + assert(binary.Read(r, binary.LittleEndian, &flags) == nil) + pr.sync = flags&flagSyncMarkers != 0 + } + + assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) + + pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) + assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) + + pos, err := r.Seek(0, os.SEEK_CUR) + assert(err == nil) + + pr.elemData = input[pos:] + assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + return pr +} + +// NumElems returns the number of elements in section k. +func (pr *PkgDecoder) NumElems(k RelocKind) int { + count := int(pr.elemEndsEnds[k]) + if k > 0 { + count -= int(pr.elemEndsEnds[k-1]) + } + return count +} + +// TotalElems returns the total number of elements across all sections. +func (pr *PkgDecoder) TotalElems() int { + return len(pr.elemEnds) +} + +// Fingerprint returns the package fingerprint. +func (pr *PkgDecoder) Fingerprint() [8]byte { + var fp [8]byte + copy(fp[:], pr.elemData[len(pr.elemData)-8:]) + return fp +} + +// AbsIdx returns the absolute index for the given (section, index) +// pair. +func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { + absIdx := int(idx) + if k > 0 { + absIdx += int(pr.elemEndsEnds[k-1]) + } + if absIdx >= int(pr.elemEndsEnds[k]) { + errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + } + return absIdx +} + +// DataIdx returns the raw element bitstream for the given (section, +// index) pair. +func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { + absIdx := pr.AbsIdx(k, idx) + + var start uint32 + if absIdx > 0 { + start = pr.elemEnds[absIdx-1] + } + end := pr.elemEnds[absIdx] + + return pr.elemData[start:end] +} + +// StringIdx returns the string value for the given string index. +func (pr *PkgDecoder) StringIdx(idx Index) string { + return pr.DataIdx(RelocString, idx) +} + +// NewDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.NewDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +// NewDecoderRaw returns a Decoder for the given (section, index) pair. +// +// Most callers should use NewDecoder instead. +func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. + r.Data = *strings.NewReader(pr.DataIdx(k, idx)) + + r.Sync(SyncRelocs) + r.Relocs = make([]RelocEnt, r.Len()) + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +// A Decoder provides methods for decoding an individual element's +// bitstream data. +type Decoder struct { + common *PkgDecoder + + Relocs []RelocEnt + Data strings.Reader + + k RelocKind + Idx Index +} + +func (r *Decoder) checkErr(err error) { + if err != nil { + errorf("unexpected decoding error: %w", err) + } +} + +func (r *Decoder) rawUvarint() uint64 { + x, err := binary.ReadUvarint(&r.Data) + r.checkErr(err) + return x +} + +func (r *Decoder) rawVarint() int64 { + ux := r.rawUvarint() + + // Zig-zag decode. + x := int64(ux >> 1) + if ux&1 != 0 { + x = ^x + } + return x +} + +func (r *Decoder) rawReloc(k RelocKind, idx int) Index { + e := r.Relocs[idx] + assert(e.Kind == k) + return e.Idx +} + +// Sync decodes a sync marker from the element bitstream and asserts +// that it matches the expected marker. +// +// If r.common.sync is false, then Sync is a no-op. +func (r *Decoder) Sync(mWant SyncMarker) { + if !r.common.sync { + return + } + + pos, _ := r.Data.Seek(0, os.SEEK_CUR) // TODO(mdempsky): io.SeekCurrent after #44505 is resolved + mHave := SyncMarker(r.rawUvarint()) + writerPCs := make([]int, r.rawUvarint()) + for i := range writerPCs { + writerPCs[i] = int(r.rawUvarint()) + } + + if mHave == mWant { + return + } + + // There's some tension here between printing: + // + // (1) full file paths that tools can recognize (e.g., so emacs + // hyperlinks the "file:line" text for easy navigation), or + // + // (2) short file paths that are easier for humans to read (e.g., by + // omitting redundant or irrelevant details, so it's easier to + // focus on the useful bits that remain). + // + // The current formatting favors the former, as it seems more + // helpful in practice. But perhaps the formatting could be improved + // to better address both concerns. For example, use relative file + // paths if they would be shorter, or rewrite file paths to contain + // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how + // to reliably expand that again. + + fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) + + fmt.Printf("\nfound %v, written at:\n", mHave) + if len(writerPCs) == 0 { + fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) + } + for _, pc := range writerPCs { + fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) + } + + fmt.Printf("\nexpected %v, reading at:\n", mWant) + var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? + n := runtime.Callers(2, readerPCs[:]) + for _, pc := range fmtFrames(readerPCs[:n]...) { + fmt.Printf("\t%s\n", pc) + } + + // We already printed a stack trace for the reader, so now we can + // simply exit. Printing a second one with panic or base.Fatalf + // would just be noise. + os.Exit(1) +} + +// Bool decodes and returns a bool value from the element bitstream. +func (r *Decoder) Bool() bool { + r.Sync(SyncBool) + x, err := r.Data.ReadByte() + r.checkErr(err) + assert(x < 2) + return x != 0 +} + +// Int64 decodes and returns an int64 value from the element bitstream. +func (r *Decoder) Int64() int64 { + r.Sync(SyncInt64) + return r.rawVarint() +} + +// Int64 decodes and returns a uint64 value from the element bitstream. +func (r *Decoder) Uint64() uint64 { + r.Sync(SyncUint64) + return r.rawUvarint() +} + +// Len decodes and returns a non-negative int value from the element bitstream. +func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } + +// Int decodes and returns an int value from the element bitstream. +func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } + +// Uint decodes and returns a uint value from the element bitstream. +func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } + +// Code decodes a Code value from the element bitstream and returns +// its ordinal value. It's the caller's responsibility to convert the +// result to an appropriate Code type. +// +// TODO(mdempsky): Ideally this method would have signature "Code[T +// Code] T" instead, but we don't allow generic methods and the +// compiler can't depend on generics yet anyway. +func (r *Decoder) Code(mark SyncMarker) int { + r.Sync(mark) + return r.Len() +} + +// Reloc decodes a relocation of expected section k from the element +// bitstream and returns an index to the referenced element. +func (r *Decoder) Reloc(k RelocKind) Index { + r.Sync(SyncUseReloc) + return r.rawReloc(k, r.Len()) +} + +// String decodes and returns a string value from the element +// bitstream. +func (r *Decoder) String() string { + r.Sync(SyncString) + return r.common.StringIdx(r.Reloc(RelocString)) +} + +// Strings decodes and returns a variable-length slice of strings from +// the element bitstream. +func (r *Decoder) Strings() []string { + res := make([]string, r.Len()) + for i := range res { + res[i] = r.String() + } + return res +} + +// Value decodes and returns a constant.Value from the element +// bitstream. +func (r *Decoder) Value() constant.Value { + r.Sync(SyncValue) + isComplex := r.Bool() + val := r.scalar() + if isComplex { + val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) + } + return val +} + +func (r *Decoder) scalar() constant.Value { + switch tag := CodeVal(r.Code(SyncVal)); tag { + default: + panic(fmt.Errorf("unexpected scalar tag: %v", tag)) + + case ValBool: + return constant.MakeBool(r.Bool()) + case ValString: + return constant.MakeString(r.String()) + case ValInt64: + return constant.MakeInt64(r.Int64()) + case ValBigInt: + return constant.Make(r.bigInt()) + case ValBigRat: + num := r.bigInt() + denom := r.bigInt() + return constant.Make(new(big.Rat).SetFrac(num, denom)) + case ValBigFloat: + return constant.Make(r.bigFloat()) + } +} + +func (r *Decoder) bigInt() *big.Int { + v := new(big.Int).SetBytes([]byte(r.String())) + if r.Bool() { + v.Neg(v) + } + return v +} + +func (r *Decoder) bigFloat() *big.Float { + v := new(big.Float).SetPrec(512) + assert(v.UnmarshalText([]byte(r.String())) == nil) + return v +} + +// @@@ Helpers + +// TODO(mdempsky): These should probably be removed. I think they're a +// smell that the export data format is not yet quite right. + +// PeekPkgPath returns the package path for the specified package +// index. +func (pr *PkgDecoder) PeekPkgPath(idx Index) string { + r := pr.NewDecoder(RelocPkg, idx, SyncPkgDef) + path := r.String() + if path == "" { + path = pr.pkgPath + } + return path +} + +// PeekObj returns the package path, object name, and CodeObj for the +// specified object index. +func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { + r := pr.NewDecoder(RelocName, idx, SyncObject1) + r.Sync(SyncSym) + r.Sync(SyncPkg) + path := pr.PeekPkgPath(r.Reloc(RelocPkg)) + name := r.String() + assert(name != "") + + tag := CodeObj(r.Code(SyncCodeObj)) + + return path, name, tag +} diff --git a/go/internal/pkgbits/doc.go b/go/internal/pkgbits/doc.go new file mode 100644 index 00000000000..c8a2796b5e4 --- /dev/null +++ b/go/internal/pkgbits/doc.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkgbits implements low-level coding abstractions for +// Unified IR's export data format. +// +// At a low-level, a package is a collection of bitstream elements. +// Each element has a "kind" and a dense, non-negative index. +// Elements can be randomly accessed given their kind and index. +// +// Individual elements are sequences of variable-length values (e.g., +// integers, booleans, strings, go/constant values, cross-references +// to other elements). Package pkgbits provides APIs for encoding and +// decoding these low-level values, but the details of mapping +// higher-level Go constructs into elements is left to higher-level +// abstractions. +// +// Elements may cross-reference each other with "relocations." For +// example, an element representing a pointer type has a relocation +// referring to the element type. +// +// Go constructs may be composed as a constellation of multiple +// elements. For example, a declared function may have one element to +// describe the object (e.g., its name, type, position), and a +// separate element to describe its function body. This allows readers +// some flexibility in efficiently seeking or re-reading data (e.g., +// inlining requires re-reading the function body for each inlined +// call, without needing to re-read the object-level details). +// +// This is a copy of internal/pkgbits in the Go implementation. +package pkgbits diff --git a/go/internal/pkgbits/encoder.go b/go/internal/pkgbits/encoder.go new file mode 100644 index 00000000000..c50c838caae --- /dev/null +++ b/go/internal/pkgbits/encoder.go @@ -0,0 +1,379 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "bytes" + "crypto/md5" + "encoding/binary" + "go/constant" + "io" + "math/big" + "runtime" +) + +// currentVersion is the current version number. +// +// - v0: initial prototype +// +// - v1: adds the flags uint32 word +const currentVersion uint32 = 1 + +// A PkgEncoder provides methods for encoding a package's Unified IR +// export data. +type PkgEncoder struct { + // elems holds the bitstream for previously encoded elements. + elems [numRelocs][]string + + // stringsIdx maps previously encoded strings to their index within + // the RelocString section, to allow deduplication. That is, + // elems[RelocString][stringsIdx[s]] == s (if present). + stringsIdx map[string]Index + + // syncFrames is the number of frames to write at each sync + // marker. A negative value means sync markers are omitted. + syncFrames int +} + +// SyncMarkers reports whether pw uses sync markers. +func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } + +// NewPkgEncoder returns an initialized PkgEncoder. +// +// syncFrames is the number of caller frames that should be serialized +// at Sync points. Serializing additional frames results in larger +// export data files, but can help diagnosing desync errors in +// higher-level Unified IR reader/writer code. If syncFrames is +// negative, then sync markers are omitted entirely. +func NewPkgEncoder(syncFrames int) PkgEncoder { + return PkgEncoder{ + stringsIdx: make(map[string]Index), + syncFrames: syncFrames, + } +} + +// DumpTo writes the package's encoded data to out0 and returns the +// package fingerprint. +func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { + h := md5.New() + out := io.MultiWriter(out0, h) + + writeUint32 := func(x uint32) { + assert(binary.Write(out, binary.LittleEndian, x) == nil) + } + + writeUint32(currentVersion) + + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) + + // Write elemEndsEnds. + var sum uint32 + for _, elems := range &pw.elems { + sum += uint32(len(elems)) + writeUint32(sum) + } + + // Write elemEnds. + sum = 0 + for _, elems := range &pw.elems { + for _, elem := range elems { + sum += uint32(len(elem)) + writeUint32(sum) + } + } + + // Write elemData. + for _, elems := range &pw.elems { + for _, elem := range elems { + _, err := io.WriteString(out, elem) + assert(err == nil) + } + } + + // Write fingerprint. + copy(fingerprint[:], h.Sum(nil)) + _, err := out0.Write(fingerprint[:]) + assert(err == nil) + + return +} + +// StringIdx adds a string value to the strings section, if not +// already present, and returns its index. +func (pw *PkgEncoder) StringIdx(s string) Index { + if idx, ok := pw.stringsIdx[s]; ok { + assert(pw.elems[RelocString][idx] == s) + return idx + } + + idx := Index(len(pw.elems[RelocString])) + pw.elems[RelocString] = append(pw.elems[RelocString], s) + pw.stringsIdx[s] = idx + return idx +} + +// NewEncoder returns an Encoder for a new element within the given +// section, and encodes the given SyncMarker as the start of the +// element bitstream. +func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { + e := pw.NewEncoderRaw(k) + e.Sync(marker) + return e +} + +// NewEncoderRaw returns an Encoder for a new element within the given +// section. +// +// Most callers should use NewEncoder instead. +func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { + idx := Index(len(pw.elems[k])) + pw.elems[k] = append(pw.elems[k], "") // placeholder + + return Encoder{ + p: pw, + k: k, + Idx: idx, + } +} + +// An Encoder provides methods for encoding an individual element's +// bitstream data. +type Encoder struct { + p *PkgEncoder + + Relocs []RelocEnt + Data bytes.Buffer // accumulated element bitstream data + + encodingRelocHeader bool + + k RelocKind + Idx Index // index within relocation section +} + +// Flush finalizes the element's bitstream and returns its Index. +func (w *Encoder) Flush() Index { + var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + + // Backup the data so we write the relocations at the front. + var tmp bytes.Buffer + io.Copy(&tmp, &w.Data) + + // TODO(mdempsky): Consider writing these out separately so they're + // easier to strip, along with function bodies, so that we can prune + // down to just the data that's relevant to go/types. + if w.encodingRelocHeader { + panic("encodingRelocHeader already true; recursive flush?") + } + w.encodingRelocHeader = true + w.Sync(SyncRelocs) + w.Len(len(w.Relocs)) + for _, rEnt := range w.Relocs { + w.Sync(SyncReloc) + w.Len(int(rEnt.Kind)) + w.Len(int(rEnt.Idx)) + } + + io.Copy(&sb, &w.Data) + io.Copy(&sb, &tmp) + w.p.elems[w.k][w.Idx] = sb.String() + + return w.Idx +} + +func (w *Encoder) checkErr(err error) { + if err != nil { + errorf("unexpected encoding error: %v", err) + } +} + +func (w *Encoder) rawUvarint(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + _, err := w.Data.Write(buf[:n]) + w.checkErr(err) +} + +func (w *Encoder) rawVarint(x int64) { + // Zig-zag encode. + ux := uint64(x) << 1 + if x < 0 { + ux = ^ux + } + + w.rawUvarint(ux) +} + +func (w *Encoder) rawReloc(r RelocKind, idx Index) int { + // TODO(mdempsky): Use map for lookup; this takes quadratic time. + for i, rEnt := range w.Relocs { + if rEnt.Kind == r && rEnt.Idx == idx { + return i + } + } + + i := len(w.Relocs) + w.Relocs = append(w.Relocs, RelocEnt{r, idx}) + return i +} + +func (w *Encoder) Sync(m SyncMarker) { + if !w.p.SyncMarkers() { + return + } + + // Writing out stack frame string references requires working + // relocations, but writing out the relocations themselves involves + // sync markers. To prevent infinite recursion, we simply trim the + // stack frame for sync markers within the relocation header. + var frames []string + if !w.encodingRelocHeader && w.p.syncFrames > 0 { + pcs := make([]uintptr, w.p.syncFrames) + n := runtime.Callers(2, pcs) + frames = fmtFrames(pcs[:n]...) + } + + // TODO(mdempsky): Save space by writing out stack frames as a + // linked list so we can share common stack frames. + w.rawUvarint(uint64(m)) + w.rawUvarint(uint64(len(frames))) + for _, frame := range frames { + w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) + } +} + +// Bool encodes and writes a bool value into the element bitstream, +// and then returns the bool value. +// +// For simple, 2-alternative encodings, the idiomatic way to call Bool +// is something like: +// +// if w.Bool(x != 0) { +// // alternative #1 +// } else { +// // alternative #2 +// } +// +// For multi-alternative encodings, use Code instead. +func (w *Encoder) Bool(b bool) bool { + w.Sync(SyncBool) + var x byte + if b { + x = 1 + } + err := w.Data.WriteByte(x) + w.checkErr(err) + return b +} + +// Int64 encodes and writes an int64 value into the element bitstream. +func (w *Encoder) Int64(x int64) { + w.Sync(SyncInt64) + w.rawVarint(x) +} + +// Uint64 encodes and writes a uint64 value into the element bitstream. +func (w *Encoder) Uint64(x uint64) { + w.Sync(SyncUint64) + w.rawUvarint(x) +} + +// Len encodes and writes a non-negative int value into the element bitstream. +func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } + +// Int encodes and writes an int value into the element bitstream. +func (w *Encoder) Int(x int) { w.Int64(int64(x)) } + +// Len encodes and writes a uint value into the element bitstream. +func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } + +// Reloc encodes and writes a relocation for the given (section, +// index) pair into the element bitstream. +// +// Note: Only the index is formally written into the element +// bitstream, so bitstream decoders must know from context which +// section an encoded relocation refers to. +func (w *Encoder) Reloc(r RelocKind, idx Index) { + w.Sync(SyncUseReloc) + w.Len(w.rawReloc(r, idx)) +} + +// Code encodes and writes a Code value into the element bitstream. +func (w *Encoder) Code(c Code) { + w.Sync(c.Marker()) + w.Len(c.Value()) +} + +// String encodes and writes a string value into the element +// bitstream. +// +// Internally, strings are deduplicated by adding them to the strings +// section (if not already present), and then writing a relocation +// into the element bitstream. +func (w *Encoder) String(s string) { + w.Sync(SyncString) + w.Reloc(RelocString, w.p.StringIdx(s)) +} + +// Strings encodes and writes a variable-length slice of strings into +// the element bitstream. +func (w *Encoder) Strings(ss []string) { + w.Len(len(ss)) + for _, s := range ss { + w.String(s) + } +} + +// Value encodes and writes a constant.Value into the element +// bitstream. +func (w *Encoder) Value(val constant.Value) { + w.Sync(SyncValue) + if w.Bool(val.Kind() == constant.Complex) { + w.scalar(constant.Real(val)) + w.scalar(constant.Imag(val)) + } else { + w.scalar(val) + } +} + +func (w *Encoder) scalar(val constant.Value) { + switch v := constant.Val(val).(type) { + default: + errorf("unhandled %v (%v)", val, val.Kind()) + case bool: + w.Code(ValBool) + w.Bool(v) + case string: + w.Code(ValString) + w.String(v) + case int64: + w.Code(ValInt64) + w.Int64(v) + case *big.Int: + w.Code(ValBigInt) + w.bigInt(v) + case *big.Rat: + w.Code(ValBigRat) + w.bigInt(v.Num()) + w.bigInt(v.Denom()) + case *big.Float: + w.Code(ValBigFloat) + w.bigFloat(v) + } +} + +func (w *Encoder) bigInt(v *big.Int) { + b := v.Bytes() + w.String(string(b)) // TODO: More efficient encoding. + w.Bool(v.Sign() < 0) +} + +func (w *Encoder) bigFloat(v *big.Float) { + b := v.Append(nil, 'p', -1) + w.String(string(b)) // TODO: More efficient encoding. +} diff --git a/go/internal/pkgbits/flags.go b/go/internal/pkgbits/flags.go new file mode 100644 index 00000000000..654222745fa --- /dev/null +++ b/go/internal/pkgbits/flags.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +const ( + flagSyncMarkers = 1 << iota // file format contains sync markers +) diff --git a/go/internal/pkgbits/frames_go1.go b/go/internal/pkgbits/frames_go1.go new file mode 100644 index 00000000000..5294f6a63ed --- /dev/null +++ b/go/internal/pkgbits/frames_go1.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 +// +build !go1.7 + +// TODO(mdempsky): Remove after #44505 is resolved + +package pkgbits + +import "runtime" + +func walkFrames(pcs []uintptr, visit frameVisitor) { + for _, pc := range pcs { + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + + visit(file, line, fn.Name(), pc-fn.Entry()) + } +} diff --git a/go/internal/pkgbits/frames_go17.go b/go/internal/pkgbits/frames_go17.go new file mode 100644 index 00000000000..2324ae7adfe --- /dev/null +++ b/go/internal/pkgbits/frames_go17.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 +// +build go1.7 + +package pkgbits + +import "runtime" + +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} diff --git a/go/internal/pkgbits/reloc.go b/go/internal/pkgbits/reloc.go new file mode 100644 index 00000000000..7a8f04ab3fc --- /dev/null +++ b/go/internal/pkgbits/reloc.go @@ -0,0 +1,42 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A RelocKind indicates a particular section within a unified IR export. +type RelocKind int + +// An Index represents a bitstream element index within a particular +// section. +type Index int + +// A relocEnt (relocation entry) is an entry in an element's local +// reference table. +// +// TODO(mdempsky): Rename this too. +type RelocEnt struct { + Kind RelocKind + Idx Index +} + +// Reserved indices within the meta relocation section. +const ( + PublicRootIdx Index = 0 + PrivateRootIdx Index = 1 +) + +const ( + RelocString RelocKind = iota + RelocMeta + RelocPosBase + RelocPkg + RelocName + RelocType + RelocObj + RelocObjExt + RelocObjDict + RelocBody + + numRelocs = iota +) diff --git a/go/internal/pkgbits/support.go b/go/internal/pkgbits/support.go new file mode 100644 index 00000000000..ad26d3b28ca --- /dev/null +++ b/go/internal/pkgbits/support.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import "fmt" + +func assert(b bool) { + if !b { + panic("assertion failed") + } +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Errorf(format, args...)) +} diff --git a/go/internal/pkgbits/sync.go b/go/internal/pkgbits/sync.go new file mode 100644 index 00000000000..5bd51ef7170 --- /dev/null +++ b/go/internal/pkgbits/sync.go @@ -0,0 +1,113 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "fmt" + "strings" +) + +// fmtFrames formats a backtrace for reporting reader/writer desyncs. +func fmtFrames(pcs ...uintptr) []string { + res := make([]string, 0, len(pcs)) + walkFrames(pcs, func(file string, line int, name string, offset uintptr) { + // Trim package from function name. It's just redundant noise. + name = strings.TrimPrefix(name, "cmd/compile/internal/noder.") + + res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset)) + }) + return res +} + +type frameVisitor func(file string, line int, name string, offset uintptr) + +// SyncMarker is an enum type that represents markers that may be +// written to export data to ensure the reader and writer stay +// synchronized. +type SyncMarker int + +//go:generate stringer -type=SyncMarker -trimprefix=Sync + +const ( + _ SyncMarker = iota + + // Public markers (known to go/types importers). + + // Low-level coding markers. + SyncEOF + SyncBool + SyncInt64 + SyncUint64 + SyncString + SyncValue + SyncVal + SyncRelocs + SyncReloc + SyncUseReloc + + // Higher-level object and type markers. + SyncPublic + SyncPos + SyncPosBase + SyncObject + SyncObject1 + SyncPkg + SyncPkgDef + SyncMethod + SyncType + SyncTypeIdx + SyncTypeParamNames + SyncSignature + SyncParams + SyncParam + SyncCodeObj + SyncSym + SyncLocalIdent + SyncSelector + + // Private markers (only known to cmd/compile). + SyncPrivate + + SyncFuncExt + SyncVarExt + SyncTypeExt + SyncPragma + + SyncExprList + SyncExprs + SyncExpr + SyncExprType + SyncAssign + SyncOp + SyncFuncLit + SyncCompLit + + SyncDecl + SyncFuncBody + SyncOpenScope + SyncCloseScope + SyncCloseAnotherScope + SyncDeclNames + SyncDeclName + + SyncStmts + SyncBlockStmt + SyncIfStmt + SyncForStmt + SyncSwitchStmt + SyncRangeStmt + SyncCaseClause + SyncCommClause + SyncSelectStmt + SyncDecls + SyncLabeledStmt + SyncUseObjLocal + SyncAddLocal + SyncLinkname + SyncStmt1 + SyncStmtsEnd + SyncLabel + SyncOptLabel +) diff --git a/go/internal/pkgbits/syncmarker_string.go b/go/internal/pkgbits/syncmarker_string.go new file mode 100644 index 00000000000..4a5b0ca5f2f --- /dev/null +++ b/go/internal/pkgbits/syncmarker_string.go @@ -0,0 +1,89 @@ +// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT. + +package pkgbits + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SyncEOF-1] + _ = x[SyncBool-2] + _ = x[SyncInt64-3] + _ = x[SyncUint64-4] + _ = x[SyncString-5] + _ = x[SyncValue-6] + _ = x[SyncVal-7] + _ = x[SyncRelocs-8] + _ = x[SyncReloc-9] + _ = x[SyncUseReloc-10] + _ = x[SyncPublic-11] + _ = x[SyncPos-12] + _ = x[SyncPosBase-13] + _ = x[SyncObject-14] + _ = x[SyncObject1-15] + _ = x[SyncPkg-16] + _ = x[SyncPkgDef-17] + _ = x[SyncMethod-18] + _ = x[SyncType-19] + _ = x[SyncTypeIdx-20] + _ = x[SyncTypeParamNames-21] + _ = x[SyncSignature-22] + _ = x[SyncParams-23] + _ = x[SyncParam-24] + _ = x[SyncCodeObj-25] + _ = x[SyncSym-26] + _ = x[SyncLocalIdent-27] + _ = x[SyncSelector-28] + _ = x[SyncPrivate-29] + _ = x[SyncFuncExt-30] + _ = x[SyncVarExt-31] + _ = x[SyncTypeExt-32] + _ = x[SyncPragma-33] + _ = x[SyncExprList-34] + _ = x[SyncExprs-35] + _ = x[SyncExpr-36] + _ = x[SyncExprType-37] + _ = x[SyncAssign-38] + _ = x[SyncOp-39] + _ = x[SyncFuncLit-40] + _ = x[SyncCompLit-41] + _ = x[SyncDecl-42] + _ = x[SyncFuncBody-43] + _ = x[SyncOpenScope-44] + _ = x[SyncCloseScope-45] + _ = x[SyncCloseAnotherScope-46] + _ = x[SyncDeclNames-47] + _ = x[SyncDeclName-48] + _ = x[SyncStmts-49] + _ = x[SyncBlockStmt-50] + _ = x[SyncIfStmt-51] + _ = x[SyncForStmt-52] + _ = x[SyncSwitchStmt-53] + _ = x[SyncRangeStmt-54] + _ = x[SyncCaseClause-55] + _ = x[SyncCommClause-56] + _ = x[SyncSelectStmt-57] + _ = x[SyncDecls-58] + _ = x[SyncLabeledStmt-59] + _ = x[SyncUseObjLocal-60] + _ = x[SyncAddLocal-61] + _ = x[SyncLinkname-62] + _ = x[SyncStmt1-63] + _ = x[SyncStmtsEnd-64] + _ = x[SyncLabel-65] + _ = x[SyncOptLabel-66] +} + +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" + +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} + +func (i SyncMarker) String() string { + i -= 1 + if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) { + return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]] +} From 32129bf2c952acf093fdc20f81734e68d68ed8af Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Thu, 14 Jul 2022 19:30:14 -0400 Subject: [PATCH 109/136] go/internal/gcimporter: adjust importer to match compiler importer This is a port of CL 288632 to x/tools/go/internal/gcimporter. This logic was ostensibly unported to avoid breaking build compatibility with go1.12. Now that gopls no longer support 1.12, It is safe to make this change. Fixes golang/go#53803 Change-Id: Ic9b4d7a60511076a83d8fa72cf7c4a3bdcab3fce Reviewed-on: https://go-review.googlesource.com/c/tools/+/417580 Reviewed-by: Jamal Carvalho gopls-CI: kokoro TryBot-Result: Gopher Robot Run-TryBot: Robert Findley --- go/internal/gcimporter/iimport.go | 60 +++++++++++-------------------- 1 file changed, 20 insertions(+), 40 deletions(-) diff --git a/go/internal/gcimporter/iimport.go b/go/internal/gcimporter/iimport.go index 28b91b86567..4caa0f55d9d 100644 --- a/go/internal/gcimporter/iimport.go +++ b/go/internal/gcimporter/iimport.go @@ -17,6 +17,7 @@ import ( "go/token" "go/types" "io" + "math/big" "sort" "strings" @@ -512,7 +513,9 @@ func (r *importReader) value() (typ types.Type, val constant.Value) { val = constant.MakeString(r.string()) case types.IsInteger: - val = r.mpint(b) + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) case types.IsFloat: val = r.mpfloat(b) @@ -561,8 +564,8 @@ func intSize(b *types.Basic) (signed bool, maxBytes uint) { return } -func (r *importReader) mpint(b *types.Basic) constant.Value { - signed, maxBytes := intSize(b) +func (r *importReader) mpint(x *big.Int, typ *types.Basic) { + signed, maxBytes := intSize(typ) maxSmall := 256 - maxBytes if signed { @@ -581,7 +584,8 @@ func (r *importReader) mpint(b *types.Basic) constant.Value { v = ^v } } - return constant.MakeInt64(v) + x.SetInt64(v) + return } v := -n @@ -591,47 +595,23 @@ func (r *importReader) mpint(b *types.Basic) constant.Value { if v < 1 || uint(v) > maxBytes { errorf("weird decoding: %v, %v => %v", n, signed, v) } - - buf := make([]byte, v) - io.ReadFull(&r.declReader, buf) - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { - buf[i], buf[j] = buf[j], buf[i] - } - - x := constant.MakeFromBytes(buf) + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) if signed && n&1 != 0 { - x = constant.UnaryOp(token.SUB, x, 0) + x.Neg(x) } - return x } -func (r *importReader) mpfloat(b *types.Basic) constant.Value { - x := r.mpint(b) - if constant.Sign(x) == 0 { - return x - } - - exp := r.int64() - switch { - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - // Ensure that the imported Kind is Float, else this constant may run into - // bitsize limits on overlarge integers. Eventually we can instead adopt - // the approach of CL 288632, but that CL relies on go/constant APIs that - // were introduced in go1.13. - // - // TODO(rFindley): sync the logic here with tip Go once we no longer - // support go1.12. - x = constant.ToFloat(x) - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) +func (r *importReader) mpfloat(typ *types.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) } - return x + return constant.Make(&f) } func (r *importReader) ident() string { From ce6ce766265714553035d328a9be4f313c63e5fd Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Fri, 15 Jul 2022 11:11:52 -0400 Subject: [PATCH 110/136] internal/lsp/regtest: increase the time allowed for shutdown Now that we await ongoing work during shutdown, we are seeing regtest flakes simply due to outstanding go command invocations. Allow more time for cleanup. If this is insufficient, we can be more aggressive about terminating go command processes when context is cancelled. For golang/go#53820 Change-Id: I3df3c5510dae34cb14a6efeb02c2963a71e64f3a Reviewed-on: https://go-review.googlesource.com/c/tools/+/417583 gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Dylan Le Run-TryBot: Robert Findley --- internal/lsp/regtest/runner.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/internal/lsp/regtest/runner.go b/internal/lsp/regtest/runner.go index 0640e452fb1..b2992e99392 100644 --- a/internal/lsp/regtest/runner.go +++ b/internal/lsp/regtest/runner.go @@ -341,9 +341,13 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio } // For tests that failed due to a timeout, don't fail to shutdown // because ctx is done. - closeCtx, cancel := context.WithTimeout(xcontext.Detach(ctx), 5*time.Second) + // + // golang/go#53820: now that we await the completion of ongoing work in + // shutdown, we must allow a significant amount of time for ongoing go + // command invocations to exit. + ctx, cancel := context.WithTimeout(xcontext.Detach(ctx), 30*time.Second) defer cancel() - if err := env.Editor.Close(closeCtx); err != nil { + if err := env.Editor.Close(ctx); err != nil { pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) t.Errorf("closing editor: %v", err) } From dc45e742f0ab02c7fbaff52e51724f628ae84b84 Mon Sep 17 00:00:00 2001 From: Dylan Le Date: Mon, 27 Jun 2022 00:10:22 -0400 Subject: [PATCH 111/136] internal/lsp: Update FilterDisallow to support matching directories at arbitrary depth. In FilterDisallow, change filter to regex form to match with file paths. Add a unit regtest for FilterDisallow. For golang/go#46438 Change-Id: I7de1986c1cb1b65844828fa618b72b1e6b76b5b9 Reviewed-on: https://go-review.googlesource.com/c/tools/+/414317 Run-TryBot: Dylan Le Reviewed-by: Robert Findley gopls-CI: kokoro TryBot-Result: Gopher Robot --- internal/lsp/cache/load.go | 6 +- internal/lsp/cache/view.go | 36 +++++---- internal/lsp/cache/view_test.go | 7 +- internal/lsp/source/options.go | 30 ++++++- internal/lsp/source/workspace_symbol.go | 73 +++++++++++++---- internal/lsp/source/workspace_symbol_test.go | 85 ++++++++++++++++++++ 6 files changed, 200 insertions(+), 37 deletions(-) diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index 8937f934031..d0942b51bfe 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -156,6 +156,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf } moduleErrs := make(map[string][]packages.Error) // module path -> errors + filterer := buildFilterer(s.view.rootURI.Filename(), s.view.gomodcache, s.view.options) newMetadata := make(map[PackageID]*KnownMetadata) for _, pkg := range pkgs { // The Go command returns synthetic list results for module queries that @@ -201,7 +202,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf // // TODO(rfindley): why exclude metadata arbitrarily here? It should be safe // to capture all metadata. - if s.view.allFilesExcluded(pkg) { + if s.view.allFilesExcluded(pkg, filterer) { continue } if err := buildMetadata(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, newMetadata, nil); err != nil { @@ -581,10 +582,11 @@ func containsPackageLocked(s *snapshot, m *Metadata) bool { uris[uri] = struct{}{} } + filterFunc := s.view.filterFunc() for uri := range uris { // Don't use view.contains here. go.work files may include modules // outside of the workspace folder. - if !strings.Contains(string(uri), "/vendor/") && !s.view.filters(uri) { + if !strings.Contains(string(uri), "/vendor/") && !filterFunc(uri) { return true } } diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go index 15a2f90da57..e33e3400451 100644 --- a/internal/lsp/cache/view.go +++ b/internal/lsp/cache/view.go @@ -385,13 +385,14 @@ func (s *snapshot) locateTemplateFiles(ctx context.Context) { relativeTo := s.view.folder.Filename() searched := 0 + filterer := buildFilterer(dir, s.view.gomodcache, s.view.options) // Change to WalkDir when we move up to 1.16 err := filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } relpath := strings.TrimPrefix(path, relativeTo) - excluded := pathExcludedByFilter(relpath, dir, s.view.gomodcache, s.view.options) + excluded := pathExcludedByFilter(relpath, filterer) if fileHasExtension(path, suffixes) && !excluded && !fi.IsDir() { k := span.URIFromPath(path) _, err := s.GetVersionedFile(ctx, k) @@ -421,17 +422,20 @@ func (v *View) contains(uri span.URI) bool { return false } - return !v.filters(uri) + return !v.filterFunc()(uri) } -// filters reports whether uri is filtered by the currently configured +// filterFunc returns a func that reports whether uri is filtered by the currently configured // directoryFilters. -func (v *View) filters(uri span.URI) bool { - // Only filter relative to the configured root directory. - if source.InDirLex(v.folder.Filename(), uri.Filename()) { - return pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), v.rootURI.Filename(), v.gomodcache, v.Options()) +func (v *View) filterFunc() func(span.URI) bool { + filterer := buildFilterer(v.rootURI.Filename(), v.gomodcache, v.Options()) + return func(uri span.URI) bool { + // Only filter relative to the configured root directory. + if source.InDirLex(v.folder.Filename(), uri.Filename()) { + return pathExcludedByFilter(strings.TrimPrefix(uri.Filename(), v.folder.Filename()), filterer) + } + return false } - return false } func (v *View) mapFile(uri span.URI, f *fileBase) { @@ -1070,15 +1074,14 @@ func (s *snapshot) vendorEnabled(ctx context.Context, modURI span.URI, modConten return vendorEnabled, nil } -func (v *View) allFilesExcluded(pkg *packages.Package) bool { - opts := v.Options() +func (v *View) allFilesExcluded(pkg *packages.Package, filterer *source.Filterer) bool { folder := filepath.ToSlash(v.folder.Filename()) for _, f := range pkg.GoFiles { f = filepath.ToSlash(f) if !strings.HasPrefix(f, folder) { return false } - if !pathExcludedByFilter(strings.TrimPrefix(f, folder), v.rootURI.Filename(), v.gomodcache, opts) { + if !pathExcludedByFilter(strings.TrimPrefix(f, folder), filterer) { return false } } @@ -1086,8 +1089,9 @@ func (v *View) allFilesExcluded(pkg *packages.Package) bool { } func pathExcludedByFilterFunc(root, gomodcache string, opts *source.Options) func(string) bool { + filterer := buildFilterer(root, gomodcache, opts) return func(path string) bool { - return pathExcludedByFilter(path, root, gomodcache, opts) + return pathExcludedByFilter(path, filterer) } } @@ -1097,12 +1101,16 @@ func pathExcludedByFilterFunc(root, gomodcache string, opts *source.Options) fun // TODO(rfindley): passing root and gomodcache here makes it confusing whether // path should be absolute or relative, and has already caused at least one // bug. -func pathExcludedByFilter(path, root, gomodcache string, opts *source.Options) bool { +func pathExcludedByFilter(path string, filterer *source.Filterer) bool { path = strings.TrimPrefix(filepath.ToSlash(path), "/") + return filterer.Disallow(path) +} + +func buildFilterer(root, gomodcache string, opts *source.Options) *source.Filterer { gomodcache = strings.TrimPrefix(filepath.ToSlash(strings.TrimPrefix(gomodcache, root)), "/") filters := opts.DirectoryFilters if gomodcache != "" { filters = append(filters, "-"+gomodcache) } - return source.FiltersDisallow(path, filters) + return source.NewFilterer(filters) } diff --git a/internal/lsp/cache/view_test.go b/internal/lsp/cache/view_test.go index d76dcda8ed4..59684ea3614 100644 --- a/internal/lsp/cache/view_test.go +++ b/internal/lsp/cache/view_test.go @@ -161,15 +161,14 @@ func TestFilters(t *testing.T) { } for _, tt := range tests { - opts := &source.Options{} - opts.DirectoryFilters = tt.filters + filterer := source.NewFilterer(tt.filters) for _, inc := range tt.included { - if pathExcludedByFilter(inc, "root", "root/gopath/pkg/mod", opts) { + if pathExcludedByFilter(inc, filterer) { t.Errorf("filters %q excluded %v, wanted included", tt.filters, inc) } } for _, exc := range tt.excluded { - if !pathExcludedByFilter(exc, "root", "root/gopath/pkg/mod", opts) { + if !pathExcludedByFilter(exc, filterer) { t.Errorf("filters %q included %v, wanted excluded", tt.filters, exc) } } diff --git a/internal/lsp/source/options.go b/internal/lsp/source/options.go index 09401bc6ef8..c386eee45e7 100644 --- a/internal/lsp/source/options.go +++ b/internal/lsp/source/options.go @@ -806,6 +806,30 @@ func (o *Options) enableAllExperimentMaps() { } } +// validateDirectoryFilter validates if the filter string +// - is not empty +// - start with either + or - +// - doesn't contain currently unsupported glob operators: *, ? +func validateDirectoryFilter(ifilter string) (string, error) { + filter := fmt.Sprint(ifilter) + if filter == "" || (filter[0] != '+' && filter[0] != '-') { + return "", fmt.Errorf("invalid filter %v, must start with + or -", filter) + } + segs := strings.Split(filter, "/") + unsupportedOps := [...]string{"?", "*"} + for _, seg := range segs { + if seg != "**" { + for _, op := range unsupportedOps { + if strings.Contains(seg, op) { + return "", fmt.Errorf("invalid filter %v, operator %v not supported. If you want to have this operator supported, consider filing an issue.", filter, op) + } + } + } + } + + return strings.TrimRight(filepath.FromSlash(filter), "/"), nil +} + func (o *Options) set(name string, value interface{}, seen map[string]struct{}) OptionResult { // Flatten the name in case we get options with a hierarchy. split := strings.Split(name, ".") @@ -850,9 +874,9 @@ func (o *Options) set(name string, value interface{}, seen map[string]struct{}) } var filters []string for _, ifilter := range ifilters { - filter := fmt.Sprint(ifilter) - if filter == "" || (filter[0] != '+' && filter[0] != '-') { - result.errorf("invalid filter %q, must start with + or -", filter) + filter, err := validateDirectoryFilter(fmt.Sprintf("%v", ifilter)) + if err != nil { + result.errorf(err.Error()) return result } filters = append(filters, strings.TrimRight(filepath.FromSlash(filter), "/")) diff --git a/internal/lsp/source/workspace_symbol.go b/internal/lsp/source/workspace_symbol.go index 6167c586a9a..6c33effc1ae 100644 --- a/internal/lsp/source/workspace_symbol.go +++ b/internal/lsp/source/workspace_symbol.go @@ -8,7 +8,9 @@ import ( "context" "fmt" "go/types" + "path" "path/filepath" + "regexp" "runtime" "sort" "strings" @@ -305,11 +307,12 @@ func collectSymbols(ctx context.Context, views []View, matcherType SymbolMatcher roots = append(roots, strings.TrimRight(string(v.Folder()), "/")) filters := v.Options().DirectoryFilters + filterer := NewFilterer(filters) folder := filepath.ToSlash(v.Folder().Filename()) for uri, syms := range snapshot.Symbols(ctx) { norm := filepath.ToSlash(uri.Filename()) nm := strings.TrimPrefix(norm, folder) - if FiltersDisallow(nm, filters) { + if filterer.Disallow(nm) { continue } // Only scan each file once. @@ -358,28 +361,70 @@ func collectSymbols(ctx context.Context, views []View, matcherType SymbolMatcher return unified.results(), nil } -// FilterDisallow is code from the body of cache.pathExcludedByFilter in cache/view.go -// Exporting and using that function would cause an import cycle. -// Moving it here and exporting it would leave behind view_test.go. -// (This code is exported and used in the body of cache.pathExcludedByFilter) -func FiltersDisallow(path string, filters []string) bool { +type Filterer struct { + // Whether a filter is excluded depends on the operator (first char of the raw filter). + // Slices filters and excluded then should have the same length. + filters []*regexp.Regexp + excluded []bool +} + +// NewFilterer computes regular expression form of all raw filters +func NewFilterer(rawFilters []string) *Filterer { + var f Filterer + for _, filter := range rawFilters { + filter = path.Clean(filepath.ToSlash(filter)) + op, prefix := filter[0], filter[1:] + // convertFilterToRegexp adds "/" at the end of prefix to handle cases where a filter is a prefix of another filter. + // For example, it prevents [+foobar, -foo] from excluding "foobar". + f.filters = append(f.filters, convertFilterToRegexp(filepath.ToSlash(prefix))) + f.excluded = append(f.excluded, op == '-') + } + + return &f +} + +// Disallow return true if the path is excluded from the filterer's filters. +func (f *Filterer) Disallow(path string) bool { path = strings.TrimPrefix(path, "/") var excluded bool - for _, filter := range filters { - op, prefix := filter[0], filter[1:] - // Non-empty prefixes have to be precise directory matches. - if prefix != "" { - prefix = prefix + "/" - path = path + "/" + + for i, filter := range f.filters { + path := path + if !strings.HasSuffix(path, "/") { + path += "/" } - if !strings.HasPrefix(path, prefix) { + if !filter.MatchString(path) { continue } - excluded = op == '-' + excluded = f.excluded[i] } + return excluded } +// convertFilterToRegexp replaces glob-like operator substrings in a string file path to their equivalent regex forms. +// Supporting glob-like operators: +// - **: match zero or more complete path segments +func convertFilterToRegexp(filter string) *regexp.Regexp { + var ret strings.Builder + segs := strings.Split(filter, "/") + for i, seg := range segs { + if seg == "**" { + switch i { + case 0: + ret.WriteString("^.*") + default: + ret.WriteString(".*") + } + } else { + ret.WriteString(regexp.QuoteMeta(seg)) + } + ret.WriteString("/") + } + + return regexp.MustCompile(ret.String()) +} + // symbolFile holds symbol information for a single file. type symbolFile struct { uri span.URI diff --git a/internal/lsp/source/workspace_symbol_test.go b/internal/lsp/source/workspace_symbol_test.go index 314ef785df3..633550ed945 100644 --- a/internal/lsp/source/workspace_symbol_test.go +++ b/internal/lsp/source/workspace_symbol_test.go @@ -44,3 +44,88 @@ func TestParseQuery(t *testing.T) { } } } + +func TestFiltererDisallow(t *testing.T) { + tests := []struct { + filters []string + included []string + excluded []string + }{ + { + []string{"+**/c.go"}, + []string{"a/c.go", "a/b/c.go"}, + []string{}, + }, + { + []string{"+a/**/c.go"}, + []string{"a/b/c.go", "a/b/d/c.go", "a/c.go"}, + []string{}, + }, + { + []string{"-a/c.go", "+a/**"}, + []string{"a/c.go"}, + []string{}, + }, + { + []string{"+a/**/c.go", "-**/c.go"}, + []string{}, + []string{"a/b/c.go"}, + }, + { + []string{"+a/**/c.go", "-a/**"}, + []string{}, + []string{"a/b/c.go"}, + }, + { + []string{"+**/c.go", "-a/**/c.go"}, + []string{}, + []string{"a/b/c.go"}, + }, + { + []string{"+foobar", "-foo"}, + []string{"foobar", "foobar/a"}, + []string{"foo", "foo/a"}, + }, + { + []string{"+", "-"}, + []string{}, + []string{"foobar", "foobar/a", "foo", "foo/a"}, + }, + { + []string{"-", "+"}, + []string{"foobar", "foobar/a", "foo", "foo/a"}, + []string{}, + }, + { + []string{"-a/**/b/**/c.go"}, + []string{}, + []string{"a/x/y/z/b/f/g/h/c.go"}, + }, + // tests for unsupported glob operators + { + []string{"+**/c.go", "-a/*/c.go"}, + []string{"a/b/c.go"}, + []string{}, + }, + { + []string{"+**/c.go", "-a/?/c.go"}, + []string{"a/b/c.go"}, + []string{}, + }, + } + + for _, test := range tests { + filterer := NewFilterer(test.filters) + for _, inc := range test.included { + if filterer.Disallow(inc) { + t.Errorf("Filters %v excluded %v, wanted included", test.filters, inc) + } + } + + for _, exc := range test.excluded { + if !filterer.Disallow(exc) { + t.Errorf("Filters %v included %v, wanted excluded", test.filters, exc) + } + } + } +} From 2eaea86599c644bbe537e9548d6b000d331803a1 Mon Sep 17 00:00:00 2001 From: Zvonimir Pavlinovic Date: Wed, 13 Jul 2022 14:59:25 -0700 Subject: [PATCH 112/136] go/callgraph/vta: do not include interface types during propagation Some interface types could be propagated around type graph. This does not affect precision as the results are ultimately intersected with the initial call graph. However, this types of propagation are not necessary. On very large projects, this can save few seconds. Change-Id: I73d41c082a52734f50669af19fee940725aed662 Reviewed-on: https://go-review.googlesource.com/c/tools/+/417514 Reviewed-by: Tim King gopls-CI: kokoro Run-TryBot: Zvonimir Pavlinovic TryBot-Result: Gopher Robot --- go/callgraph/vta/graph.go | 4 +++- go/callgraph/vta/propagation.go | 12 ++++++++++++ go/callgraph/vta/utils.go | 12 ------------ 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/go/callgraph/vta/graph.go b/go/callgraph/vta/graph.go index 48547a52527..8a338e97d23 100644 --- a/go/callgraph/vta/graph.go +++ b/go/callgraph/vta/graph.go @@ -687,7 +687,9 @@ func (b *builder) nodeFromVal(val ssa.Value) node { // semantically equivalent types can have different implementations, // this method guarantees the same implementation is always used. func (b *builder) representative(n node) node { - if !hasInitialTypes(n) { + if n.Type() == nil { + // panicArg and recoverReturn do not have + // types and are unique by definition. return n } t := canonicalize(n.Type(), &b.canon) diff --git a/go/callgraph/vta/propagation.go b/go/callgraph/vta/propagation.go index 5934ebc2167..b3953601341 100644 --- a/go/callgraph/vta/propagation.go +++ b/go/callgraph/vta/propagation.go @@ -175,6 +175,18 @@ func nodeTypes(nodes []node, builder *trie.Builder, propTypeId func(p propType) return &typeSet } +// hasInitialTypes check if a node can have initial types. +// Returns true iff `n` is not a panic, recover, nestedPtr* +// node, nor a node whose type is an interface. +func hasInitialTypes(n node) bool { + switch n.(type) { + case panicArg, recoverReturn, nestedPtrFunction, nestedPtrInterface: + return false + default: + return !isInterface(n.Type()) + } +} + // getPropType creates a propType for `node` based on its type. // propType.typ is always node.Type(). If node is function, then // propType.val is the underlying function; nil otherwise. diff --git a/go/callgraph/vta/utils.go b/go/callgraph/vta/utils.go index 0531a227f6c..c8f0a47adf6 100644 --- a/go/callgraph/vta/utils.go +++ b/go/callgraph/vta/utils.go @@ -59,18 +59,6 @@ func hasInFlow(n node) bool { return isInterface(t) || isFunction(t) } -// hasInitialTypes check if a node can have initial types. -// Returns true iff `n` is not a panic or recover node as -// those are artificial. -func hasInitialTypes(n node) bool { - switch n.(type) { - case panicArg, recoverReturn: - return false - default: - return true - } -} - func isInterface(t types.Type) bool { _, ok := t.Underlying().(*types.Interface) return ok From 2957e9da5d156b5d2bfb51fded9c29a9fa3358f5 Mon Sep 17 00:00:00 2001 From: Zvonimir Pavlinovic Date: Thu, 14 Jul 2022 15:58:13 -0700 Subject: [PATCH 113/136] go/callgraph/vta: use types.IsInterface instead of our own isInterface Change-Id: I9e5a81e4f59f32e3bfc6baf2348ee3e4db411aae Reviewed-on: https://go-review.googlesource.com/c/tools/+/417674 gopls-CI: kokoro Reviewed-by: Tim King Run-TryBot: Zvonimir Pavlinovic TryBot-Result: Gopher Robot --- go/callgraph/vta/graph.go | 2 +- go/callgraph/vta/propagation.go | 2 +- go/callgraph/vta/propagation_test.go | 2 +- go/callgraph/vta/utils.go | 9 ++------- 4 files changed, 5 insertions(+), 10 deletions(-) diff --git a/go/callgraph/vta/graph.go b/go/callgraph/vta/graph.go index 8a338e97d23..4d0387f12bd 100644 --- a/go/callgraph/vta/graph.go +++ b/go/callgraph/vta/graph.go @@ -654,7 +654,7 @@ func (b *builder) addInFlowEdge(s, d node) { // Creates const, pointer, global, func, and local nodes based on register instructions. func (b *builder) nodeFromVal(val ssa.Value) node { - if p, ok := val.Type().(*types.Pointer); ok && !isInterface(p.Elem()) && !isFunction(p.Elem()) { + if p, ok := val.Type().(*types.Pointer); ok && !types.IsInterface(p.Elem()) && !isFunction(p.Elem()) { // Nested pointer to interfaces are modeled as a special // nestedPtrInterface node. if i := interfaceUnderPtr(p.Elem()); i != nil { diff --git a/go/callgraph/vta/propagation.go b/go/callgraph/vta/propagation.go index b3953601341..6127780ac4e 100644 --- a/go/callgraph/vta/propagation.go +++ b/go/callgraph/vta/propagation.go @@ -183,7 +183,7 @@ func hasInitialTypes(n node) bool { case panicArg, recoverReturn, nestedPtrFunction, nestedPtrInterface: return false default: - return !isInterface(n.Type()) + return !types.IsInterface(n.Type()) } } diff --git a/go/callgraph/vta/propagation_test.go b/go/callgraph/vta/propagation_test.go index 00b21277f22..f4a754f9663 100644 --- a/go/callgraph/vta/propagation_test.go +++ b/go/callgraph/vta/propagation_test.go @@ -58,7 +58,7 @@ func newLocal(name string, t types.Type) local { // newNamedType creates a bogus type named `name`. func newNamedType(name string) *types.Named { - return types.NewNamed(types.NewTypeName(token.NoPos, nil, name, nil), nil, nil) + return types.NewNamed(types.NewTypeName(token.NoPos, nil, name, nil), types.Universe.Lookup("int").Type(), nil) } // sccString is a utility for stringifying `nodeToScc`. Every diff --git a/go/callgraph/vta/utils.go b/go/callgraph/vta/utils.go index c8f0a47adf6..c0b5775907f 100644 --- a/go/callgraph/vta/utils.go +++ b/go/callgraph/vta/utils.go @@ -56,12 +56,7 @@ func hasInFlow(n node) bool { return true } - return isInterface(t) || isFunction(t) -} - -func isInterface(t types.Type) bool { - _, ok := t.Underlying().(*types.Interface) - return ok + return types.IsInterface(t) || isFunction(t) } func isFunction(t types.Type) bool { @@ -86,7 +81,7 @@ func interfaceUnderPtr(t types.Type) types.Type { return nil } - if isInterface(p.Elem()) { + if types.IsInterface(p.Elem()) { return p.Elem() } From 79f3242e4b2ee6f1bd987fdd0538e16451f7523e Mon Sep 17 00:00:00 2001 From: aarzilli Date: Thu, 9 Jun 2022 15:19:50 +0200 Subject: [PATCH 114/136] godoc: support go1.19 doc comment syntax Call go/doc.(*Package).HTML on go1.19 instead of the deprecated go/doc.ToHTML. Change-Id: Ie604d9ace7adc179f7c2e345f17a2e0c0365d1a2 Reviewed-on: https://go-review.googlesource.com/c/tools/+/411381 Run-TryBot: Alessandro Arzilli gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Jonathan Amsterdam Reviewed-by: Jenny Rakoczy --- godoc/godoc.go | 9 +++++++-- godoc/static/package.html | 22 +++++++++++----------- godoc/static/searchdoc.html | 2 +- godoc/static/static.go | 4 ++-- godoc/tohtml_go119.go | 17 +++++++++++++++++ godoc/tohtml_other.go | 17 +++++++++++++++++ 6 files changed, 55 insertions(+), 16 deletions(-) create mode 100644 godoc/tohtml_go119.go create mode 100644 godoc/tohtml_other.go diff --git a/godoc/godoc.go b/godoc/godoc.go index 7ff2eab6239..6edb8f93ce9 100644 --- a/godoc/godoc.go +++ b/godoc/godoc.go @@ -345,11 +345,16 @@ func isDigit(ch rune) bool { return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch) } -func comment_htmlFunc(comment string) string { +func comment_htmlFunc(info *PageInfo, comment string) string { var buf bytes.Buffer // TODO(gri) Provide list of words (e.g. function parameters) // to be emphasized by ToHTML. - doc.ToHTML(&buf, comment, nil) // does html-escaping + + // godocToHTML is: + // - buf.Write(info.PDoc.HTML(comment)) on go1.19 + // - go/doc.ToHTML(&buf, comment, nil) on other versions + godocToHTML(&buf, info.PDoc, comment) + return buf.String() } diff --git a/godoc/static/package.html b/godoc/static/package.html index 86445df4c08..a04b08b63f5 100644 --- a/godoc/static/package.html +++ b/godoc/static/package.html @@ -17,7 +17,7 @@ {{if $.IsMain}} {{/* command documentation */}} - {{comment_html .Doc}} + {{comment_html $ .Doc}} {{else}} {{/* package documentation */}}
@@ -42,7 +42,7 @@

Overview ▹

Overview ▾

- {{comment_html .Doc}} + {{comment_html $ .Doc}} {{example_html $ ""}}
@@ -154,14 +154,14 @@

Inter {{with .Consts}}

Constants

{{range .}} - {{comment_html .Doc}} + {{comment_html $ .Doc}}
{{node_html $ .Decl true}}
{{end}} {{end}} {{with .Vars}}

Variables

{{range .}} - {{comment_html .Doc}} + {{comment_html $ .Doc}}
{{node_html $ .Decl true}}
{{end}} {{end}} @@ -174,7 +174,7 @@

func {{$name_html}}{{$since}}{{end}}

{{node_html $ .Decl true}}
- {{comment_html .Doc}} + {{comment_html $ .Doc}} {{example_html $ .Name}} {{callgraph_html $ "" .Name}} @@ -187,16 +187,16 @@

type {{$tname_html}}< {{$since := since "type" "" .Name $.PDoc.ImportPath}} {{if $since}}{{$since}}{{end}}

- {{comment_html .Doc}} + {{comment_html $ .Doc}}
{{node_html $ .Decl true}}
{{range .Consts}} - {{comment_html .Doc}} + {{comment_html $ .Doc}}
{{node_html $ .Decl true}}
{{end}} {{range .Vars}} - {{comment_html .Doc}} + {{comment_html $ .Doc}}
{{node_html $ .Decl true}}
{{end}} @@ -212,7 +212,7 @@

func {{$name_html}}{{$since}}{{end}}

{{node_html $ .Decl true}}
- {{comment_html .Doc}} + {{comment_html $ .Doc}} {{example_html $ .Name}} {{callgraph_html $ "" .Name}} {{end}} @@ -225,7 +225,7 @@

func ({{html .Recv}}) {{$since}}{{end}}

{{node_html $ .Decl true}}
- {{comment_html .Doc}} + {{comment_html $ .Doc}} {{$name := printf "%s_%s" $tname .Name}} {{example_html $ $name}} {{callgraph_html $ .Recv .Name}} @@ -238,7 +238,7 @@

func ({{html .Recv}}) {{noteTitle $marker | html}}s

{{end}} diff --git a/godoc/static/searchdoc.html b/godoc/static/searchdoc.html index 679c02cf3a8..84dcb345270 100644 --- a/godoc/static/searchdoc.html +++ b/godoc/static/searchdoc.html @@ -15,7 +15,7 @@

{{$key.Name}}

{{html .Package}}.{{.Name}} {{end}} {{if .Doc}} -

{{comment_html .Doc}}

+

{{comment_html $ .Doc}}

{{else}}

No documentation available

{{end}} diff --git a/godoc/static/static.go b/godoc/static/static.go index ada60fab6c2..d6e5f2d2e0e 100644 --- a/godoc/static/static.go +++ b/godoc/static/static.go @@ -83,7 +83,7 @@ var Files = map[string]string{ "methodset.html": "\x0a\x09\x0a\x09\x09\xe2\x96\xb9\x20Method\x20set

\x0a\x09\x0a\x09\x0a\x09\x09\xe2\x96\xbe\x20Method\x20set

\x0a\x09\x09...\x0a\x09\x0a\x0a", - "package.html": "\x0a\x0a{{with\x20.PDoc}}\x0a\x09\x0a\x0a\x09{{if\x20$.IsMain}}\x0a\x09\x09{{/*\x20command\x20documentation\x20*/}}\x0a\x09\x09{{comment_html\x20.Doc}}\x0a\x09{{else}}\x0a\x09\x09{{/*\x20package\x20documentation\x20*/}}\x0a\x09\x09\x0a\x09\x09\x09
\x0a\x09\x09\x09
import\x20\"{{html\x20.ImportPath}}\"
\x0a\x09\x09\x09
\x0a\x09\x09\x09
\x0a\x09\x09\x09
Overview
\x0a\x09\x09\x09
Index
\x0a\x09\x09\x09{{if\x20$.Examples}}\x0a\x09\x09\x09\x09
Examples
\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Dirs}}\x0a\x09\x09\x09\x09
Subdirectories
\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09
\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Overview\x20\xe2\x96\xb9\x0a\x09\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Overview\x20\xe2\x96\xbe\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20\"\"}}\x0a\x09\x09\x09\x0a\x09\x09\x0a\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09Index\x20\xe2\x96\xb9\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09Index\x20\xe2\x96\xbe\x0a\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09
\x0a\x09\x09\x09{{if\x20.Consts}}\x0a\x09\x09\x09\x09
Constants
\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Vars}}\x0a\x09\x09\x09\x09
Variables
\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}
\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09
type\x20{{$tname_html}}
\x0a\x09\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09
 \x20 \x20{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}
\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09
 \x20 \x20{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}
\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Notes}}\x0a\x09\x09\x09\x09{{range\x20$marker,\x20$item\x20:=\x20$.Notes}}\x0a\x09\x09\x09\x09
{{noteTitle\x20$marker\x20|\x20html}}s
\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09
\x0a\x09\x09\x09\x0a\x0a\x09\x09{{if\x20$.Examples}}\x0a\x09\x09\x0a\x09\x09\x09

Examples

\x0a\x09\x09\x09(Expand\x20All)\x0a\x09\x09\x09
\x0a\x09\x09\x09{{range\x20$.Examples}}\x0a\x09\x09\x09
{{example_name\x20.Name}}
\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09
\x0a\x09\x09\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Filenames}}\x0a\x09\x09\x09

Package\x20files

\x0a\x09\x09\x09

\x0a\x09\x09\x09\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{.|filename|html}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09\x0a\x09\x09\x09

\x0a\x09\x09{{end}}\x0a\x09\x09\x0a\x09\x09\x0a\x0a\x09\x09{{if\x20ne\x20$.CallGraph\x20\"null\"}}\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09Internal\x20call\x20graph\x20\xe2\x96\xb9\x0a\x09\x09\x20\x0a\x09\x09\x0a\x09\x09\x09Internal\x20call\x20graph\x20\xe2\x96\xbe\x0a\x09\x09\x09

\x0a\x09\x09\x09\x20\x20In\x20the\x20call\x20graph\x20viewer\x20below,\x20each\x20node\x0a\x09\x09\x09\x20\x20is\x20a\x20function\x20belonging\x20to\x20this\x20package\x0a\x09\x09\x09\x20\x20and\x20its\x20children\x20are\x20the\x20functions\x20it\x0a\x09\x09\x09\x20\x20calls—perhaps\x20dynamically.\x0a\x09\x09\x09

\x0a\x09\x09\x09

\x0a\x09\x09\x09\x20\x20The\x20root\x20nodes\x20are\x20the\x20entry\x20points\x20of\x20the\x0a\x09\x09\x09\x20\x20package:\x20functions\x20that\x20may\x20be\x20called\x20from\x0a\x09\x09\x09\x20\x20outside\x20the\x20package.\x0a\x09\x09\x09\x20\x20There\x20may\x20be\x20non-exported\x20or\x20anonymous\x0a\x09\x09\x09\x20\x20functions\x20among\x20them\x20if\x20they\x20are\x20called\x0a\x09\x09\x09\x20\x20dynamically\x20from\x20another\x20package.\x0a\x09\x09\x09

\x0a\x09\x09\x09

\x0a\x09\x09\x09\x20\x20Click\x20a\x20node\x20to\x20visit\x20that\x20function's\x20source\x20code.\x0a\x09\x09\x09\x20\x20From\x20there\x20you\x20can\x20visit\x20its\x20callers\x20by\x0a\x09\x09\x09\x20\x20clicking\x20its\x20declaring\x20func\x0a\x09\x09\x09\x20\x20token.\x0a\x09\x09\x09

\x0a\x09\x09\x09

\x0a\x09\x09\x09\x20\x20Functions\x20may\x20be\x20omitted\x20if\x20they\x20were\x0a\x09\x09\x09\x20\x20determined\x20to\x20be\x20unreachable\x20in\x20the\x0a\x09\x09\x09\x20\x20particular\x20programs\x20or\x20tests\x20that\x20were\x0a\x09\x09\x09\x20\x20analyzed.\x0a\x09\x09\x09

\x0a\x09\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x0a\x09\x09\x20\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Consts}}\x0a\x09\x09\x09Constants\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{with\x20.Vars}}\x0a\x09\x09\x09Variables\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09{{/*\x20Name\x20is\x20a\x20string\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09func\x20{{$name_html}}\x0a\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x0a\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09{{$tname\x20:=\x20.Name}}\x0a\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09type\x20{{$tname_html}}\x0a\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"type\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x0a\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x0a\x09\x09\x09{{range\x20.Consts}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Vars}}\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{example_html\x20$\x20$tname}}\x0a\x09\x09\x09{{implements_html\x20$\x20$tname}}\x0a\x09\x09\x09{{methodset_html\x20$\x20$tname}}\x0a\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09func\x20{{$name_html}}\x0a\x09\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09func\x20({{html\x20.Recv}})\x20{{$name_html}}\x0a\x09\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"method\"\x20.Recv\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09
{{node_html\x20$\x20.Decl\x20true}}
\x0a\x09\x09\x09\x09{{comment_html\x20.Doc}}\x0a\x09\x09\x09\x09{{$name\x20:=\x20printf\x20\"%s_%s\"\x20$tname\x20.Name}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20$name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20.Recv\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a\x0a\x09{{with\x20$.Notes}}\x0a\x09\x09{{range\x20$marker,\x20$content\x20:=\x20.}}\x0a\x09\x09\x09{{noteTitle\x20$marker\x20|\x20html}}s\x0a\x09\x09\x09\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09
  • ☞\x20{{comment_html\x20.Body}}
  • \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.PAst}}\x0a\x09{{range\x20$filename,\x20$ast\x20:=\x20.}}\x0a\x09\x09{{$filename|filename|html}}:
    {{node_html\x20$\x20$ast\x20false}}
    \x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.Dirs}}\x0a\x09{{/*\x20DirList\x20entries\x20are\x20numbers\x20and\x20strings\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09{{if\x20$.PDoc}}\x0a\x09\x09Subdirectories\x0a\x09{{end}}\x0a\x09\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Name\x0a\x09\x09\x09\x09Synopsis\x0a\x09\x09\x09\x0a\x0a\x09\x09\x09{{if\x20not\x20(or\x20(eq\x20$.Dirname\x20\"/src/cmd\")\x20$.DirFlat)}}\x0a\x09\x09\x09\x0a\x09\x09\x09\x09..\x0a\x09\x09\x09\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09{{html\x20.Path}}\x0a\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09{{html\x20.Name}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x0a\x09\x09\x09{{end}}\x0a\x09\x09
    \x0a\x09\x0a{{end}}\x0a", + "package.html": "\x0a\x0a{{with\x20.PDoc}}\x0a\x09\x0a\x0a\x09{{if\x20$.IsMain}}\x0a\x09\x09{{/*\x20command\x20documentation\x20*/}}\x0a\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09{{else}}\x0a\x09\x09{{/*\x20package\x20documentation\x20*/}}\x0a\x09\x09\x0a\x09\x09\x09
    \x0a\x09\x09\x09
    import\x20\"{{html\x20.ImportPath}}\"
    \x0a\x09\x09\x09
    \x0a\x09\x09\x09
    \x0a\x09\x09\x09
    Overview
    \x0a\x09\x09\x09
    Index
    \x0a\x09\x09\x09{{if\x20$.Examples}}\x0a\x09\x09\x09\x09
    Examples
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Dirs}}\x0a\x09\x09\x09\x09
    Subdirectories
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09
    \x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Overview\x20\xe2\x96\xb9\x0a\x09\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Overview\x20\xe2\x96\xbe\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20\"\"}}\x0a\x09\x09\x09\x0a\x09\x09\x0a\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09Index\x20\xe2\x96\xb9\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09Index\x20\xe2\x96\xbe\x0a\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09
    \x0a\x09\x09\x09{{if\x20.Consts}}\x0a\x09\x09\x09\x09
    Constants
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Vars}}\x0a\x09\x09\x09\x09
    Variables
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09
    type\x20{{$tname_html}}
    \x0a\x09\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09
     \x20 \x20{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}
    \x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09\x09
     \x20 \x20{{node_html\x20$\x20.Decl\x20false\x20|\x20sanitize}}
    \x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20$.Notes}}\x0a\x09\x09\x09\x09{{range\x20$marker,\x20$item\x20:=\x20$.Notes}}\x0a\x09\x09\x09\x09
    {{noteTitle\x20$marker\x20|\x20html}}s
    \x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09
    \x0a\x09\x09\x09\x0a\x0a\x09\x09{{if\x20$.Examples}}\x0a\x09\x09\x0a\x09\x09\x09

    Examples

    \x0a\x09\x09\x09(Expand\x20All)\x0a\x09\x09\x09
    \x0a\x09\x09\x09{{range\x20$.Examples}}\x0a\x09\x09\x09
    {{example_name\x20.Name}}
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09
    \x0a\x09\x09\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Filenames}}\x0a\x09\x09\x09

    Package\x20files

    \x0a\x09\x09\x09

    \x0a\x09\x09\x09\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{.|filename|html}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09\x0a\x09\x09\x09

    \x0a\x09\x09{{end}}\x0a\x09\x09\x0a\x09\x09\x0a\x0a\x09\x09{{if\x20ne\x20$.CallGraph\x20\"null\"}}\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09\x09Internal\x20call\x20graph\x20\xe2\x96\xb9\x0a\x09\x09\x20\x0a\x09\x09\x0a\x09\x09\x09Internal\x20call\x20graph\x20\xe2\x96\xbe\x0a\x09\x09\x09

    \x0a\x09\x09\x09\x20\x20In\x20the\x20call\x20graph\x20viewer\x20below,\x20each\x20node\x0a\x09\x09\x09\x20\x20is\x20a\x20function\x20belonging\x20to\x20this\x20package\x0a\x09\x09\x09\x20\x20and\x20its\x20children\x20are\x20the\x20functions\x20it\x0a\x09\x09\x09\x20\x20calls—perhaps\x20dynamically.\x0a\x09\x09\x09

    \x0a\x09\x09\x09

    \x0a\x09\x09\x09\x20\x20The\x20root\x20nodes\x20are\x20the\x20entry\x20points\x20of\x20the\x0a\x09\x09\x09\x20\x20package:\x20functions\x20that\x20may\x20be\x20called\x20from\x0a\x09\x09\x09\x20\x20outside\x20the\x20package.\x0a\x09\x09\x09\x20\x20There\x20may\x20be\x20non-exported\x20or\x20anonymous\x0a\x09\x09\x09\x20\x20functions\x20among\x20them\x20if\x20they\x20are\x20called\x0a\x09\x09\x09\x20\x20dynamically\x20from\x20another\x20package.\x0a\x09\x09\x09

    \x0a\x09\x09\x09

    \x0a\x09\x09\x09\x20\x20Click\x20a\x20node\x20to\x20visit\x20that\x20function's\x20source\x20code.\x0a\x09\x09\x09\x20\x20From\x20there\x20you\x20can\x20visit\x20its\x20callers\x20by\x0a\x09\x09\x09\x20\x20clicking\x20its\x20declaring\x20func\x0a\x09\x09\x09\x20\x20token.\x0a\x09\x09\x09

    \x0a\x09\x09\x09

    \x0a\x09\x09\x09\x20\x20Functions\x20may\x20be\x20omitted\x20if\x20they\x20were\x0a\x09\x09\x09\x20\x20determined\x20to\x20be\x20unreachable\x20in\x20the\x0a\x09\x09\x09\x20\x20particular\x20programs\x20or\x20tests\x20that\x20were\x0a\x09\x09\x09\x20\x20analyzed.\x0a\x09\x09\x09

    \x0a\x09\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x0a\x09\x09\x20\x0a\x09\x09{{end}}\x0a\x0a\x09\x09{{with\x20.Consts}}\x0a\x09\x09\x09Constants\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{with\x20.Vars}}\x0a\x09\x09\x09Variables\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09{{/*\x20Name\x20is\x20a\x20string\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09func\x20{{$name_html}}\x0a\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x0a\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x0a\x09\x09{{end}}\x0a\x09\x09{{range\x20.Types}}\x0a\x09\x09\x09{{$tname\x20:=\x20.Name}}\x0a\x09\x09\x09{{$tname_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09type\x20{{$tname_html}}\x0a\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"type\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x0a\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x0a\x09\x09\x09{{range\x20.Consts}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Vars}}\x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{example_html\x20$\x20$tname}}\x0a\x09\x09\x09{{implements_html\x20$\x20$tname}}\x0a\x09\x09\x09{{methodset_html\x20$\x20$tname}}\x0a\x0a\x09\x09\x09{{range\x20.Funcs}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09func\x20{{$name_html}}\x0a\x09\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"func\"\x20\"\"\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20.Name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20\"\"\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.Methods}}\x0a\x09\x09\x09\x09{{$name_html\x20:=\x20html\x20.Name}}\x0a\x09\x09\x09\x09func\x20({{html\x20.Recv}})\x20{{$name_html}}\x0a\x09\x09\x09\x09\x09¶\x0a\x09\x09\x09\x09\x09{{$since\x20:=\x20since\x20\"method\"\x20.Recv\x20.Name\x20$.PDoc.ImportPath}}\x0a\x09\x09\x09\x09\x09{{if\x20$since}}{{$since}}{{end}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09
    {{node_html\x20$\x20.Decl\x20true}}
    \x0a\x09\x09\x09\x09{{comment_html\x20$\x20.Doc}}\x0a\x09\x09\x09\x09{{$name\x20:=\x20printf\x20\"%s_%s\"\x20$tname\x20.Name}}\x0a\x09\x09\x09\x09{{example_html\x20$\x20$name}}\x0a\x09\x09\x09\x09{{callgraph_html\x20$\x20.Recv\x20.Name}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a\x0a\x09{{with\x20$.Notes}}\x0a\x09\x09{{range\x20$marker,\x20$content\x20:=\x20.}}\x0a\x09\x09\x09{{noteTitle\x20$marker\x20|\x20html}}s\x0a\x09\x09\x09\x0a\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09
  • ☞\x20{{comment_html\x20$\x20.Body}}
  • \x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.PAst}}\x0a\x09{{range\x20$filename,\x20$ast\x20:=\x20.}}\x0a\x09\x09{{$filename|filename|html}}:
    {{node_html\x20$\x20$ast\x20false}}
    \x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.Dirs}}\x0a\x09{{/*\x20DirList\x20entries\x20are\x20numbers\x20and\x20strings\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09{{if\x20$.PDoc}}\x0a\x09\x09Subdirectories\x0a\x09{{end}}\x0a\x09\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Name\x0a\x09\x09\x09\x09Synopsis\x0a\x09\x09\x09\x0a\x0a\x09\x09\x09{{if\x20not\x20(or\x20(eq\x20$.Dirname\x20\"/src/cmd\")\x20$.DirFlat)}}\x0a\x09\x09\x09\x0a\x09\x09\x09\x09..\x0a\x09\x09\x09\x0a\x09\x09\x09{{end}}\x0a\x0a\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09{{html\x20.Path}}\x0a\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09{{html\x20.Name}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x0a\x09\x09\x09{{end}}\x0a\x09\x09
    \x0a\x09\x0a{{end}}\x0a", "packageroot.html": "\x0a\x0a{{with\x20.PAst}}\x0a\x09{{range\x20$filename,\x20$ast\x20:=\x20.}}\x0a\x09\x09{{$filename|filename|html}}:
    {{node_html\x20$\x20$ast\x20false}}
    \x0a\x09{{end}}\x0a{{end}}\x0a\x0a{{with\x20.Dirs}}\x0a\x09{{/*\x20DirList\x20entries\x20are\x20numbers\x20and\x20strings\x20-\x20no\x20need\x20for\x20FSet\x20*/}}\x0a\x09{{if\x20$.PDoc}}\x0a\x09\x09Subdirectories\x0a\x09{{end}}\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09
    \x0a\x09\x09\x09\x09
    Standard\x20library
    \x0a\x09\x09\x09\x09{{if\x20hasThirdParty\x20.List\x20}}\x0a\x09\x09\x09\x09\x09
    Third\x20party
    \x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09
    Other\x20packages
    \x0a\x09\x09\x09\x09
    Sub-repositories
    \x0a\x09\x09\x09\x09
    Community
    \x0a\x09\x09\x09
    \x0a\x09\x09\x0a\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Standard\x20library\x20\xe2\x96\xb9\x0a\x09\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Standard\x20library\x20\xe2\x96\xbe\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09Name\x0a\x09\x09\x09\x09\x09\x09\x09Synopsis\x0a\x09\x09\x09\x09\x09\x09\x0a\x0a\x09\x09\x09\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09{{if\x20eq\x20.RootType\x20\"GOROOT\"}}\x0a\x09\x09\x09\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Path}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Name}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09
    \x0a\x09\x09\x09\x09\x20\x0a\x09\x09\x09\x20\x0a\x09\x09\x20\x0a\x0a\x09{{if\x20hasThirdParty\x20.List\x20}}\x0a\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Third\x20party\x20\xe2\x96\xb9\x0a\x09\x09\x09\x0a\x09\x09\x09\x0a\x09\x09\x09\x09Third\x20party\x20\xe2\x96\xbe\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09Name\x0a\x09\x09\x09\x09\x09\x09\x09Synopsis\x0a\x09\x09\x09\x09\x09\x09\x0a\x0a\x09\x09\x09\x09\x09\x09{{range\x20.List}}\x0a\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20eq\x20.RootType\x20\"GOPATH\"}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20$.DirFlat}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09{{if\x20.HasPkg}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Path}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Name}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x09{{html\x20.Synopsis}}\x0a\x09\x09\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09
    \x0a\x09\x09\x09\x09\x20\x0a\x09\x09\x09\x20\x0a\x09\x09\x20\x0a\x09{{end}}\x0a\x0a\x09Other\x20packages\x0a\x09Sub-repositories\x0a\x09

    \x0a\x09These\x20packages\x20are\x20part\x20of\x20the\x20Go\x20Project\x20but\x20outside\x20the\x20main\x20Go\x20tree.\x0a\x09They\x20are\x20developed\x20under\x20looser\x20compatibility\x20requirements\x20than\x20the\x20Go\x20core.\x0a\x09Install\x20them\x20with\x20\"go\x20get\".\x0a\x09

    \x0a\x09
      \x0a\x09\x09
    • benchmarks\x20\xe2\x80\x94\x20benchmarks\x20to\x20measure\x20Go\x20as\x20it\x20is\x20developed.
    • \x0a\x09\x09
    • blog\x20\xe2\x80\x94\x20blog.golang.org's\x20implementation.
    • \x0a\x09\x09
    • build\x20\xe2\x80\x94\x20build.golang.org's\x20implementation.
    • \x0a\x09\x09
    • crypto\x20\xe2\x80\x94\x20additional\x20cryptography\x20packages.
    • \x0a\x09\x09
    • debug\x20\xe2\x80\x94\x20an\x20experimental\x20debugger\x20for\x20Go.
    • \x0a\x09\x09
    • image\x20\xe2\x80\x94\x20additional\x20imaging\x20packages.
    • \x0a\x09\x09
    • mobile\x20\xe2\x80\x94\x20experimental\x20support\x20for\x20Go\x20on\x20mobile\x20platforms.
    • \x0a\x09\x09
    • net\x20\xe2\x80\x94\x20additional\x20networking\x20packages.
    • \x0a\x09\x09
    • perf\x20\xe2\x80\x94\x20packages\x20and\x20tools\x20for\x20performance\x20measurement,\x20storage,\x20and\x20analysis.
    • \x0a\x09\x09
    • pkgsite\x20\xe2\x80\x94\x20home\x20of\x20the\x20pkg.go.dev\x20website.
    • \x0a\x09\x09
    • review\x20\xe2\x80\x94\x20a\x20tool\x20for\x20working\x20with\x20Gerrit\x20code\x20reviews.
    • \x0a\x09\x09
    • sync\x20\xe2\x80\x94\x20additional\x20concurrency\x20primitives.
    • \x0a\x09\x09
    • sys\x20\xe2\x80\x94\x20packages\x20for\x20making\x20system\x20calls.
    • \x0a\x09\x09
    • text\x20\xe2\x80\x94\x20packages\x20for\x20working\x20with\x20text.
    • \x0a\x09\x09
    • time\x20\xe2\x80\x94\x20additional\x20time\x20packages.
    • \x0a\x09\x09
    • tools\x20\xe2\x80\x94\x20godoc,\x20goimports,\x20gorename,\x20and\x20other\x20tools.
    • \x0a\x09\x09
    • tour\x20\xe2\x80\x94\x20tour.golang.org's\x20implementation.
    • \x0a\x09\x09
    • exp\x20\xe2\x80\x94\x20experimental\x20and\x20deprecated\x20packages\x20(handle\x20with\x20care;\x20may\x20change\x20without\x20warning).
    • \x0a\x09
    \x0a\x0a\x09Community\x0a\x09

    \x0a\x09These\x20services\x20can\x20help\x20you\x20find\x20Open\x20Source\x20packages\x20provided\x20by\x20the\x20community.\x0a\x09

    \x0a\x09
      \x0a\x09\x09
    • Pkg.go.dev\x20-\x20the\x20Go\x20package\x20discovery\x20site.
    • \x0a\x09\x09
    • Projects\x20at\x20the\x20Go\x20Wiki\x20-\x20a\x20curated\x20list\x20of\x20Go\x20projects.
    • \x0a\x09
    \x0a{{end}}\x0a", @@ -95,7 +95,7 @@ var Files = map[string]string{ "searchcode.html": "\x0a{{$query_url\x20:=\x20urlquery\x20.Query}}\x0a{{if\x20not\x20.Idents}}\x0a\x09{{with\x20.Pak}}\x0a\x09\x09Package\x20{{html\x20$.Query}}\x0a\x09\x09

    \x0a\x09\x09\x0a\x09\x09{{range\x20.}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Pak.Path\x20|\x20html}}\x0a\x09\x09\x09{{$pkg_html}}\x0a\x09\x09{{end}}\x0a\x09\x09\x0a\x09\x09

    \x0a\x09{{end}}\x0a{{end}}\x0a{{with\x20.Hit}}\x0a\x09{{with\x20.Decls}}\x0a\x09\x09Package-level\x20declarations\x0a\x09\x09{{range\x20.}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Pak.Path\x20|\x20html}}\x0a\x09\x09\x09package\x20{{html\x20.Pak.Name}}\x0a\x09\x09\x09{{range\x20.Files}}\x0a\x09\x09\x09\x09{{$file\x20:=\x20.File.Path}}\x0a\x09\x09\x09\x09{{range\x20.Groups}}\x0a\x09\x09\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09\x09\x09{{$line\x20:=\x20infoLine\x20.}}\x0a\x09\x09\x09\x09\x09\x09{{$file}}:{{$line}}\x0a\x09\x09\x09\x09\x09\x09{{infoSnippet_html\x20.}}\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a\x09{{with\x20.Others}}\x0a\x09\x09Local\x20declarations\x20and\x20uses\x0a\x09\x09{{range\x20.}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Pak.Path\x20|\x20html}}\x0a\x09\x09\x09package\x20{{html\x20.Pak.Name}}\x0a\x09\x09\x09{{range\x20.Files}}\x0a\x09\x09\x09\x09{{$file\x20:=\x20.File.Path}}\x0a\x09\x09\x09\x09{{$file}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09\x09{{range\x20.Groups}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09{{index\x20.\x200\x20|\x20infoKind_html}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09{{range\x20.}}\x0a\x09\x09\x09\x09\x09\x09{{$line\x20:=\x20infoLine\x20.}}\x0a\x09\x09\x09\x09\x09\x09{{$line}}\x0a\x09\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09\x09\x0a\x09\x09\x09\x09{{end}}\x0a\x09\x09\x09\x09\x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a", - "searchdoc.html": "\x0a{{range\x20$key,\x20$val\x20:=\x20.Idents}}\x0a\x09{{if\x20$val}}\x0a\x09\x09{{$key.Name}}\x0a\x09\x09{{range\x20$val}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Path\x20|\x20html}}\x0a\x09\x09\x09{{if\x20eq\x20\"Packages\"\x20$key.Name}}\x0a\x09\x09\x09\x09{{html\x20.Path}}\x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09{{$doc_html\x20:=\x20docLink\x20.Path\x20.Name|\x20html}}\x0a\x09\x09\x09\x09{{html\x20.Package}}.{{.Name}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Doc}}\x0a\x09\x09\x09\x09

    {{comment_html\x20.Doc}}

    \x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09

    No\x20documentation\x20available

    \x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a", + "searchdoc.html": "\x0a{{range\x20$key,\x20$val\x20:=\x20.Idents}}\x0a\x09{{if\x20$val}}\x0a\x09\x09{{$key.Name}}\x0a\x09\x09{{range\x20$val}}\x0a\x09\x09\x09{{$pkg_html\x20:=\x20pkgLink\x20.Path\x20|\x20html}}\x0a\x09\x09\x09{{if\x20eq\x20\"Packages\"\x20$key.Name}}\x0a\x09\x09\x09\x09{{html\x20.Path}}\x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09{{$doc_html\x20:=\x20docLink\x20.Path\x20.Name|\x20html}}\x0a\x09\x09\x09\x09{{html\x20.Package}}.{{.Name}}\x0a\x09\x09\x09{{end}}\x0a\x09\x09\x09{{if\x20.Doc}}\x0a\x09\x09\x09\x09

    {{comment_html\x20$\x20.Doc}}

    \x0a\x09\x09\x09{{else}}\x0a\x09\x09\x09\x09

    No\x20documentation\x20available

    \x0a\x09\x09\x09{{end}}\x0a\x09\x09{{end}}\x0a\x09{{end}}\x0a{{end}}\x0a", "searchtxt.html": "\x0a{{$query_url\x20:=\x20urlquery\x20.Query}}\x0a{{with\x20.Textual}}\x0a\x09{{if\x20$.Complete}}\x0a\x09\x09{{html\x20$.Found}}\x20textual\x20occurrences\x0a\x09{{else}}\x0a\x09\x09More\x20than\x20{{html\x20$.Found}}\x20textual\x20occurrences\x0a\x09\x09

    \x0a\x09\x09Not\x20all\x20files\x20or\x20lines\x20containing\x20\"{{html\x20$.Query}}\"\x20are\x20shown.\x0a\x09\x09

    \x0a\x09{{end}}\x0a\x09

    \x0a\x09\x0a\x09{{range\x20.}}\x0a\x09\x09{{$file\x20:=\x20.Filename}}\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09{{$file}}:\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09{{len\x20.Lines}}\x0a\x09\x09\x0a\x09\x09\x0a\x09\x09{{range\x20.Lines}}\x0a\x09\x09\x09{{html\x20.}}\x0a\x09\x09{{end}}\x0a\x09\x09{{if\x20not\x20$.Complete}}\x0a\x09\x09\x09...\x0a\x09\x09{{end}}\x0a\x09\x09\x0a\x09\x09\x0a\x09{{end}}\x0a\x09{{if\x20not\x20$.Complete}}\x0a\x09\x09...\x0a\x09{{end}}\x0a\x09\x0a\x09

    \x0a{{end}}\x0a", diff --git a/godoc/tohtml_go119.go b/godoc/tohtml_go119.go new file mode 100644 index 00000000000..6dbf7212b9a --- /dev/null +++ b/godoc/tohtml_go119.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package godoc + +import ( + "bytes" + "go/doc" +) + +func godocToHTML(buf *bytes.Buffer, pkg *doc.Package, comment string) { + buf.Write(pkg.HTML(comment)) +} diff --git a/godoc/tohtml_other.go b/godoc/tohtml_other.go new file mode 100644 index 00000000000..a1dcf2e195b --- /dev/null +++ b/godoc/tohtml_other.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 +// +build !go1.19 + +package godoc + +import ( + "bytes" + "go/doc" +) + +func godocToHTML(buf *bytes.Buffer, pkg *doc.Package, comment string) { + doc.ToHTML(buf, comment, nil) +} From ec1f92440bcb00d4cf10fee76edcbcee7c88b20c Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Tue, 19 Jul 2022 17:38:06 -0400 Subject: [PATCH 115/136] internal/lsp: add check for nil results to fillreturns Avoid panicking when allocating an array for a nil results list by returning early. Change-Id: I26953b5cef7832bad3006bd316d59978a5d94cbd Reviewed-on: https://go-review.googlesource.com/c/tools/+/418416 Run-TryBot: Suzy Mueller Reviewed-by: Robert Findley TryBot-Result: Gopher Robot gopls-CI: kokoro --- internal/lsp/analysis/fillreturns/fillreturns.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/lsp/analysis/fillreturns/fillreturns.go b/internal/lsp/analysis/fillreturns/fillreturns.go index 4a30934c63c..705ae124d57 100644 --- a/internal/lsp/analysis/fillreturns/fillreturns.go +++ b/internal/lsp/analysis/fillreturns/fillreturns.go @@ -113,7 +113,7 @@ outer: break } } - if enclosingFunc == nil { + if enclosingFunc == nil || enclosingFunc.Results == nil { continue } From 980cbfeacb418cfa8899193c93fd74d8d4211165 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 22 Jul 2022 11:11:11 -0400 Subject: [PATCH 116/136] A+C: delete AUTHORS and CONTRIBUTORS In 2009, Google's open-source lawyers asked us to create the AUTHORS file to define "The Go Authors", and the CONTRIBUTORS file was in keeping with open source best practices of the time. Re-reviewing our repos now in 2022, the open-source lawyers are comfortable with source control history taking the place of the AUTHORS file, and most open source projects no longer maintain CONTRIBUTORS files. To ease maintenance, remove AUTHORS and CONTRIBUTORS from all repos. For golang/go#53961. Change-Id: Icaaaf04cc7884b479c7fd1231c53c8bf1f349623 Reviewed-on: https://go-review.googlesource.com/c/tools/+/419105 gopls-CI: kokoro Run-TryBot: Russ Cox Reviewed-by: David Chase TryBot-Result: Gopher Robot --- AUTHORS | 3 --- CONTRIBUTORS | 3 --- 2 files changed, 6 deletions(-) delete mode 100644 AUTHORS delete mode 100644 CONTRIBUTORS diff --git a/AUTHORS b/AUTHORS deleted file mode 100644 index 15167cd746c..00000000000 --- a/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/CONTRIBUTORS b/CONTRIBUTORS deleted file mode 100644 index 1c4577e9680..00000000000 --- a/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. From a732e45cc7309582c43c3cfc2db4c73eed18aa76 Mon Sep 17 00:00:00 2001 From: Jamal Carvalho Date: Wed, 20 Jul 2022 18:39:28 +0000 Subject: [PATCH 117/136] gopls: update golang.org/x/vuln For golang/go#53869. Change-Id: If173f0e7e3a06c11cd358ff9917c73b50d2dcb28 Reviewed-on: https://go-review.googlesource.com/c/tools/+/418536 Run-TryBot: Jamal Carvalho gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Hyang-Ah Hana Kim --- gopls/go.mod | 2 +- gopls/go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/gopls/go.mod b/gopls/go.mod index bd118e226bb..020386fca28 100644 --- a/gopls/go.mod +++ b/gopls/go.mod @@ -10,7 +10,7 @@ require ( golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 golang.org/x/sys v0.0.0-20220209214540-3681064d5158 golang.org/x/tools v0.1.11-0.20220523181440-ccb10502d1a5 - golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c + golang.org/x/vuln v0.0.0-20220718121659-b9a3ad919698 honnef.co/go/tools v0.3.2 mvdan.cc/gofumpt v0.3.0 mvdan.cc/xurls/v2 v2.4.0 diff --git a/gopls/go.sum b/gopls/go.sum index 73a55fbad82..f263f9e0d00 100644 --- a/gopls/go.sum +++ b/gopls/go.sum @@ -75,6 +75,8 @@ golang.org/x/vuln v0.0.0-20220503210553-a5481fb0c8be h1:jokAF1mfylAi1iTQx7C44B7v golang.org/x/vuln v0.0.0-20220503210553-a5481fb0c8be/go.mod h1:twca1SxmF6/i2wHY/mj1vLIkkHdp+nil/yA32ZOP4kg= golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c h1:r5bbIROBQtRRgoutV8Q3sFY58VzHW6jMBYl48ANSyS4= golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c/go.mod h1:UZshlUPxXeGUM9I14UOawXQg6yosDE9cr1vKY/DzgWo= +golang.org/x/vuln v0.0.0-20220718121659-b9a3ad919698 h1:9lgpkUgjzoIcZYp7/UPFO/0jIlYcokcEjqWm0hj9pzE= +golang.org/x/vuln v0.0.0-20220718121659-b9a3ad919698/go.mod h1:UZshlUPxXeGUM9I14UOawXQg6yosDE9cr1vKY/DzgWo= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= From 126ef8f8644b6482a6c6d65a4e373fa96bd36bfb Mon Sep 17 00:00:00 2001 From: Jamal Carvalho Date: Wed, 20 Jul 2022 18:40:41 +0000 Subject: [PATCH 118/136] gopls/internal/govulncheck: sync x/vuln@b9a3ad9 For golang/go#53869 Change-Id: I8cf795b792380596be306b2437e26faf990cff8b Reviewed-on: https://go-review.googlesource.com/c/tools/+/418537 Reviewed-by: Hyang-Ah Hana Kim Run-TryBot: Jamal Carvalho TryBot-Result: Gopher Robot gopls-CI: kokoro --- gopls/internal/govulncheck/README.md | 2 ++ gopls/internal/govulncheck/source.go | 22 ---------------------- 2 files changed, 2 insertions(+), 22 deletions(-) diff --git a/gopls/internal/govulncheck/README.md b/gopls/internal/govulncheck/README.md index d8339c506f6..bc10d8a2ec1 100644 --- a/gopls/internal/govulncheck/README.md +++ b/gopls/internal/govulncheck/README.md @@ -15,3 +15,5 @@ The `copy.sh` does the copying, after removing all .go files here. To use it: 2. cd to this directory. 3. Run `copy.sh`. + +4. Re-add build tags for go1.18 \ No newline at end of file diff --git a/gopls/internal/govulncheck/source.go b/gopls/internal/govulncheck/source.go index 23028b9eb42..d51fe8c0c2d 100644 --- a/gopls/internal/govulncheck/source.go +++ b/gopls/internal/govulncheck/source.go @@ -8,13 +8,11 @@ package govulncheck import ( - "context" "fmt" "sort" "strings" "golang.org/x/tools/go/packages" - "golang.org/x/vuln/client" "golang.org/x/vuln/vulncheck" ) @@ -57,26 +55,6 @@ func LoadPackages(cfg *packages.Config, patterns ...string) ([]*vulncheck.Packag return vpkgs, err } -// Source calls vulncheck.Source on the Go source in pkgs. It returns the result -// with Vulns trimmed to those that are actually called. -// -// This function is being used by the Go IDE team. -func Source(ctx context.Context, pkgs []*vulncheck.Package, c client.Client) (*vulncheck.Result, error) { - r, err := vulncheck.Source(ctx, pkgs, &vulncheck.Config{Client: c}) - if err != nil { - return nil, err - } - // Keep only the vulns that are called. - var vulns []*vulncheck.Vuln - for _, v := range r.Vulns { - if v.CallSink != 0 { - vulns = append(vulns, v) - } - } - r.Vulns = vulns - return r, nil -} - // CallInfo is information about calls to vulnerable functions. type CallInfo struct { // CallStacks contains all call stacks to vulnerable functions. From 7b605f471d9c0aa0acd165f72e32f0897fc6eece Mon Sep 17 00:00:00 2001 From: Jamal Carvalho Date: Wed, 20 Jul 2022 18:49:20 +0000 Subject: [PATCH 119/136] gopls/internal/vulncheck: pass go version to vulncheck config For golang/go#53869 Change-Id: I8c34adaf81415dafb724ca67fa731912832c3ee5 Reviewed-on: https://go-review.googlesource.com/c/tools/+/418538 Run-TryBot: Jamal Carvalho TryBot-Result: Gopher Robot gopls-CI: kokoro Reviewed-by: Hyang-Ah Hana Kim --- gopls/internal/vulncheck/command.go | 2 +- gopls/internal/vulncheck/util.go | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/gopls/internal/vulncheck/command.go b/gopls/internal/vulncheck/command.go index 53bf0f03860..a29bc008c7a 100644 --- a/gopls/internal/vulncheck/command.go +++ b/gopls/internal/vulncheck/command.go @@ -84,7 +84,7 @@ func (c *cmd) Run(ctx context.Context, cfg *packages.Config, patterns ...string) log.Printf("analyzing %d packages...\n", len(loadedPkgs)) - r, err := vulncheck.Source(ctx, loadedPkgs, &vulncheck.Config{Client: c.Client}) + r, err := vulncheck.Source(ctx, loadedPkgs, &vulncheck.Config{Client: c.Client, SourceGoVersion: goVersion()}) if err != nil { return nil, err } diff --git a/gopls/internal/vulncheck/util.go b/gopls/internal/vulncheck/util.go index c329461894e..05332d375c3 100644 --- a/gopls/internal/vulncheck/util.go +++ b/gopls/internal/vulncheck/util.go @@ -8,8 +8,11 @@ package vulncheck import ( + "bytes" "fmt" "go/token" + "os" + "os/exec" gvc "golang.org/x/tools/gopls/internal/govulncheck" "golang.org/x/tools/internal/lsp/protocol" @@ -80,3 +83,16 @@ func posToPosition(pos *token.Position) (p protocol.Position) { } return p } + +func goVersion() string { + if v := os.Getenv("GOVERSION"); v != "" { + // Unlikely to happen in practice, mostly used for testing. + return v + } + out, err := exec.Command("go", "env", "GOVERSION").Output() + if err != nil { + fmt.Fprintf(os.Stderr, "failed to determine go version; skipping stdlib scanning: %v\n", err) + return "" + } + return string(bytes.TrimSpace(out)) +} From a2a24778ba9562d210892fad5bf136c0bdbd233c Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Thu, 14 Jul 2022 10:44:30 -0400 Subject: [PATCH 120/136] gopls/internal/regtest: externalize shouldLoad tracking The fundamental bug causing TestChangePackageName to fail has been fixed, yet unskipping it revealed a new bug: tracking whether or not a package should be loaded requires that we actually store that package in s.meta. In cases where we drop metadata, we also lose the information that a package path needs to be reloaded. Fix this by significantly reworking the tracking of pending loads, to simplify the code and separate the reloading logic from the logic of tracking metadata. As a nice side-effect, this eliminates the needless work necessary to mark/unmark packages as needing loading, since this is no longer tracked by the immutable metadata graph. Additionally, eliminate the "shouldLoad" guard inside of snapshot.load. We should never ask for loads that we do not want, and the shouldLoad guard either masks bugs or leads to bugs. For example, we would repeatedly call load from reloadOrphanedFiles for files that are part of a package that needs loading, because we skip loading the file scope. Lift the responsibility for determining if we should load to the callers of load. Along the way, make a few additional minor improvements: - simplify the code where possible - leave TODOs for likely bugs or things that should be simplified in the future - reduce the overly granular locking in getOrLoadIDsForURI, which could lead to strange races - remove a stale comment for a test that is no longer flaky. Updates golang/go#53878 Change-Id: I6d9084806f1fdebc43002c7cc75dc1b94f8514b9 Reviewed-on: https://go-review.googlesource.com/c/tools/+/417576 Run-TryBot: Robert Findley gopls-CI: kokoro Reviewed-by: Suzy Mueller TryBot-Result: Gopher Robot --- .../regtest/diagnostics/diagnostics_test.go | 15 +- gopls/internal/regtest/misc/vendor_test.go | 10 - internal/lsp/cache/load.go | 15 -- internal/lsp/cache/metadata.go | 12 +- internal/lsp/cache/snapshot.go | 225 +++++++++--------- internal/lsp/source/util.go | 4 + 6 files changed, 129 insertions(+), 152 deletions(-) diff --git a/gopls/internal/regtest/diagnostics/diagnostics_test.go b/gopls/internal/regtest/diagnostics/diagnostics_test.go index b9dc2d434b2..ae8b4a56cdd 100644 --- a/gopls/internal/regtest/diagnostics/diagnostics_test.go +++ b/gopls/internal/regtest/diagnostics/diagnostics_test.go @@ -703,7 +703,8 @@ func main() { // Test for golang/go#38207. func TestNewModule_Issue38207(t *testing.T) { - testenv.NeedsGo1Point(t, 14) + // Fails at Go 1.14 following CL 417576. Not investigated. + testenv.NeedsGo1Point(t, 15) const emptyFile = ` -- go.mod -- module mod.com @@ -874,7 +875,7 @@ func TestX(t *testing.T) { } func TestChangePackageName(t *testing.T) { - t.Skip("This issue hasn't been fixed yet. See golang.org/issue/41061.") + testenv.NeedsGo1Point(t, 16) // needs native overlay support const mod = ` -- go.mod -- @@ -889,15 +890,11 @@ package foo_ Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("foo/bar_test.go") env.RegexpReplace("foo/bar_test.go", "package foo_", "package foo_test") - env.SaveBuffer("foo/bar_test.go") env.Await( OnceMet( - env.DoneWithSave(), - NoDiagnostics("foo/bar_test.go"), - ), - OnceMet( - env.DoneWithSave(), - NoDiagnostics("foo/foo.go"), + env.DoneWithChange(), + EmptyOrNoDiagnostics("foo/bar_test.go"), + EmptyOrNoDiagnostics("foo/foo.go"), ), ) }) diff --git a/gopls/internal/regtest/misc/vendor_test.go b/gopls/internal/regtest/misc/vendor_test.go index 324a8006fa7..4e02799b47a 100644 --- a/gopls/internal/regtest/misc/vendor_test.go +++ b/gopls/internal/regtest/misc/vendor_test.go @@ -27,16 +27,6 @@ var Goodbye error func TestInconsistentVendoring(t *testing.T) { testenv.NeedsGo1Point(t, 14) - // TODO(golang/go#49646): delete this comment once this test is stable. - // - // In golang/go#49646, this test is reported as flaky on Windows. We believe - // this is due to file contention from go mod vendor that should be resolved. - // If this test proves to still be flaky, skip it. - // - // if runtime.GOOS == "windows" { - // t.Skipf("skipping test due to flakiness on Windows: https://golang.org/issue/49646") - // } - const pkgThatUsesVendoring = ` -- go.mod -- module mod.com diff --git a/internal/lsp/cache/load.go b/internal/lsp/cache/load.go index d0942b51bfe..79789fe60d0 100644 --- a/internal/lsp/cache/load.go +++ b/internal/lsp/cache/load.go @@ -7,7 +7,6 @@ package cache import ( "bytes" "context" - "errors" "fmt" "io/ioutil" "os" @@ -41,24 +40,10 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf var query []string var containsDir bool // for logging - // Unless the context was canceled, set "shouldLoad" to false for all - // of the metadata we attempted to load. - defer func() { - if errors.Is(err, context.Canceled) { - return - } - // TODO(rfindley): merge these metadata updates with the updates below, to - // avoid updating the graph twice. - s.clearShouldLoad(scopes...) - }() - // Keep track of module query -> module path so that we can later correlate query // errors with errors. moduleQueries := make(map[string]string) for _, scope := range scopes { - if !s.shouldLoad(scope) { - continue - } switch scope := scope.(type) { case PackagePath: if source.IsCommandLineArguments(string(scope)) { diff --git a/internal/lsp/cache/metadata.go b/internal/lsp/cache/metadata.go index 486035f9390..7d9192f43a2 100644 --- a/internal/lsp/cache/metadata.go +++ b/internal/lsp/cache/metadata.go @@ -29,7 +29,7 @@ type Metadata struct { Name PackageName GoFiles []span.URI CompiledGoFiles []span.URI - ForTest PackagePath + ForTest PackagePath // package path under test, or "" TypesSizes types.Sizes Errors []packages.Error Deps []PackageID // direct dependencies, in string order @@ -94,12 +94,8 @@ type KnownMetadata struct { // PkgFilesChanged reports whether the file set of this metadata has // potentially changed. - PkgFilesChanged bool - - // ShouldLoad is true if the given metadata should be reloaded. // - // Note that ShouldLoad is different from !Valid: when we try to load a - // package, we mark ShouldLoad = false regardless of whether the load - // succeeded, to prevent endless loads. - ShouldLoad bool + // TODO(rfindley): this is used for WorkspacePackages, and looks fishy: we + // should probably only consider valid packages to be workspace packages. + PkgFilesChanged bool } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index a516860aafc..64e7f17c994 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -117,6 +117,13 @@ type snapshot struct { // when the view is created. workspacePackages map[PackageID]PackagePath + // shouldLoad tracks packages that need to be reloaded, mapping a PackageID + // to the package paths that should be used to reload it + // + // When we try to load a package, we clear it from the shouldLoad map + // regardless of whether the load succeeded, to prevent endless loads. + shouldLoad map[PackageID][]PackagePath + // unloadableFiles keeps track of files that we've failed to load. unloadableFiles map[span.URI]struct{} @@ -643,6 +650,10 @@ func (s *snapshot) PackageForFile(ctx context.Context, uri span.URI, mode source } func (s *snapshot) packageHandlesForFile(ctx context.Context, uri span.URI, mode source.TypecheckMode, includeTestVariants bool) ([]*packageHandle, error) { + // TODO(rfindley): why can't/shouldn't we awaitLoaded here? It seems that if + // we ask for package handles for a file, we should wait for pending loads. + // Else we will reload orphaned files before the initial load completes. + // Check if we should reload metadata for the file. We don't invalidate IDs // (though we should), so the IDs will be a better source of truth than the // metadata. If there are no IDs for the file, then we should also reload. @@ -691,13 +702,13 @@ func (s *snapshot) packageHandlesForFile(ctx context.Context, uri span.URI, mode } func (s *snapshot) getOrLoadIDsForURI(ctx context.Context, uri span.URI) ([]PackageID, error) { - knownIDs := s.getIDsForURI(uri) - reload := len(knownIDs) == 0 - for _, id := range knownIDs { - // Reload package metadata if any of the metadata has missing - // dependencies, in case something has changed since the last time we - // reloaded it. - if s.noValidMetadataForID(id) { + s.mu.Lock() + ids := s.meta.ids[uri] + reload := len(ids) == 0 + for _, id := range ids { + // If the file is part of a package that needs reloading, reload it now to + // improve our responsiveness. + if len(s.shouldLoad[id]) > 0 { reload = true break } @@ -705,20 +716,38 @@ func (s *snapshot) getOrLoadIDsForURI(ctx context.Context, uri span.URI) ([]Pack // missing dependencies. This is expensive and results in too many // calls to packages.Load. Determine what we should do instead. } + s.mu.Unlock() + if reload { - err := s.load(ctx, false, fileURI(uri)) + scope := fileURI(uri) + err := s.load(ctx, false, scope) + + // As in reloadWorkspace, we must clear scopes after loading. + // + // TODO(rfindley): simply call reloadWorkspace here, first, to avoid this + // duplication. + if !errors.Is(err, context.Canceled) { + s.clearShouldLoad(scope) + } + // TODO(rfindley): this doesn't look right. If we don't reload, we use + // invalid metadata anyway, but if we DO reload and it fails, we don't? if !s.useInvalidMetadata() && err != nil { return nil, err } + + s.mu.Lock() + ids = s.meta.ids[uri] + s.mu.Unlock() + // We've tried to reload and there are still no known IDs for the URI. // Return the load error, if there was one. - knownIDs = s.getIDsForURI(uri) - if len(knownIDs) == 0 { + if len(ids) == 0 { return nil, err } } - return knownIDs, nil + + return ids, nil } // Only use invalid metadata for Go versions >= 1.13. Go 1.12 and below has @@ -1153,13 +1182,6 @@ func moduleForURI(modFiles map[span.URI]struct{}, uri span.URI) span.URI { return match } -func (s *snapshot) getIDsForURI(uri span.URI) []PackageID { - s.mu.Lock() - defer s.mu.Unlock() - - return s.meta.ids[uri] -} - func (s *snapshot) getMetadata(id PackageID) *KnownMetadata { s.mu.Lock() defer s.mu.Unlock() @@ -1167,78 +1189,34 @@ func (s *snapshot) getMetadata(id PackageID) *KnownMetadata { return s.meta.metadata[id] } -func (s *snapshot) shouldLoad(scope interface{}) bool { - s.mu.Lock() - defer s.mu.Unlock() - - g := s.meta - - switch scope := scope.(type) { - case PackagePath: - var meta *KnownMetadata - for _, m := range g.metadata { - if m.PkgPath != scope { - continue - } - meta = m - } - if meta == nil || meta.ShouldLoad { - return true - } - return false - case fileURI: - uri := span.URI(scope) - ids := g.ids[uri] - if len(ids) == 0 { - return true - } - for _, id := range ids { - m, ok := g.metadata[id] - if !ok || m.ShouldLoad { - return true - } - } - return false - default: - return true - } -} - +// clearShouldLoad clears package IDs that no longer need to be reloaded after +// scopes has been loaded. func (s *snapshot) clearShouldLoad(scopes ...interface{}) { s.mu.Lock() defer s.mu.Unlock() - g := s.meta - - var updates map[PackageID]*KnownMetadata - markLoaded := func(m *KnownMetadata) { - if updates == nil { - updates = make(map[PackageID]*KnownMetadata) - } - next := *m - next.ShouldLoad = false - updates[next.ID] = &next - } for _, scope := range scopes { switch scope := scope.(type) { case PackagePath: - for _, m := range g.metadata { - if m.PkgPath == scope { - markLoaded(m) + var toDelete []PackageID + for id, pkgPaths := range s.shouldLoad { + for _, pkgPath := range pkgPaths { + if pkgPath == scope { + toDelete = append(toDelete, id) + } } } + for _, id := range toDelete { + delete(s.shouldLoad, id) + } case fileURI: uri := span.URI(scope) - ids := g.ids[uri] + ids := s.meta.ids[uri] for _, id := range ids { - if m, ok := g.metadata[id]; ok { - markLoaded(m) - } + delete(s.shouldLoad, id) } } } - s.meta = g.Clone(updates) - s.resetIsActivePackageLocked() } // noValidMetadataForURILocked reports whether there is any valid metadata for @@ -1256,16 +1234,6 @@ func (s *snapshot) noValidMetadataForURILocked(uri span.URI) bool { return true } -// noValidMetadataForID reports whether there is no valid metadata for the -// given ID. -func (s *snapshot) noValidMetadataForID(id PackageID) bool { - s.mu.Lock() - defer s.mu.Unlock() - - m := s.meta.metadata[id] - return m == nil || !m.Valid -} - func (s *snapshot) isWorkspacePackage(id PackageID) bool { s.mu.Lock() defer s.mu.Unlock() @@ -1479,39 +1447,42 @@ func (s *snapshot) AwaitInitialized(ctx context.Context) { // reloadWorkspace reloads the metadata for all invalidated workspace packages. func (s *snapshot) reloadWorkspace(ctx context.Context) error { - // See which of the workspace packages are missing metadata. + var scopes []interface{} + var seen map[PackagePath]bool s.mu.Lock() - missingMetadata := len(s.workspacePackages) == 0 || len(s.meta.metadata) == 0 - pkgPathSet := map[PackagePath]struct{}{} - for id, pkgPath := range s.workspacePackages { - if m, ok := s.meta.metadata[id]; ok && m.Valid { - continue - } - missingMetadata = true - - // Don't try to reload "command-line-arguments" directly. - if source.IsCommandLineArguments(string(pkgPath)) { - continue + for _, pkgPaths := range s.shouldLoad { + for _, pkgPath := range pkgPaths { + if seen == nil { + seen = make(map[PackagePath]bool) + } + if seen[pkgPath] { + continue + } + seen[pkgPath] = true + scopes = append(scopes, pkgPath) } - pkgPathSet[pkgPath] = struct{}{} } s.mu.Unlock() + if len(scopes) == 0 { + return nil + } + // If the view's build configuration is invalid, we cannot reload by // package path. Just reload the directory instead. - if missingMetadata && !s.ValidBuildConfiguration() { - return s.load(ctx, false, viewLoadScope("LOAD_INVALID_VIEW")) + if !s.ValidBuildConfiguration() { + scopes = []interface{}{viewLoadScope("LOAD_INVALID_VIEW")} } - if len(pkgPathSet) == 0 { - return nil - } + err := s.load(ctx, false, scopes...) - var pkgPaths []interface{} - for pkgPath := range pkgPathSet { - pkgPaths = append(pkgPaths, pkgPath) + // Unless the context was canceled, set "shouldLoad" to false for all + // of the metadata we attempted to load. + if !errors.Is(err, context.Canceled) { + s.clearShouldLoad(scopes...) } - return s.load(ctx, false, pkgPaths...) + + return err } func (s *snapshot) reloadOrphanedFiles(ctx context.Context) error { @@ -1676,6 +1647,10 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC release := result.Acquire() // Copy the set of unloadable files. + // + // TODO(rfindley): this looks wrong. Shouldn't we clear unloadableFiles on + // changes to environment or workspace layout, or more generally on any + // metadata change? for k, v := range s.unloadableFiles { result.unloadableFiles[k] = v } @@ -1873,6 +1848,15 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } } + // Any packages that need loading in s still need loading in the new + // snapshot. + for k, v := range s.shouldLoad { + if result.shouldLoad == nil { + result.shouldLoad = make(map[PackageID][]PackagePath) + } + result.shouldLoad[k] = v + } + // Compute which metadata updates are required. We only need to invalidate // packages directly containing the affected file, and only if it changed in // a relevant way. @@ -1880,20 +1864,41 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC deleteInvalidMetadata := forceReloadMetadata || workspaceModeChanged for k, v := range s.meta.metadata { invalidateMetadata := idsToInvalidate[k] + + // For metadata that has been newly invalidated, capture package paths + // requiring reloading in the shouldLoad map. + if invalidateMetadata && !source.IsCommandLineArguments(string(v.ID)) { + if result.shouldLoad == nil { + result.shouldLoad = make(map[PackageID][]PackagePath) + } + needsReload := []PackagePath{v.PkgPath} + if v.ForTest != "" && v.ForTest != v.PkgPath { + // When reloading test variants, always reload their ForTest package as + // well. Otherwise, we may miss test variants in the resulting load. + // + // TODO(rfindley): is this actually sufficient? Is it possible that + // other test variants may be invalidated? Either way, we should + // determine exactly what needs to be reloaded here. + needsReload = append(needsReload, v.ForTest) + } + result.shouldLoad[k] = needsReload + } + + // Check whether the metadata should be deleted. if skipID[k] || (invalidateMetadata && deleteInvalidMetadata) { metadataUpdates[k] = nil continue } + + // Check if the metadata has changed. valid := v.Valid && !invalidateMetadata pkgFilesChanged := v.PkgFilesChanged || changedPkgFiles[k] - shouldLoad := v.ShouldLoad || invalidateMetadata - if valid != v.Valid || pkgFilesChanged != v.PkgFilesChanged || shouldLoad != v.ShouldLoad { + if valid != v.Valid || pkgFilesChanged != v.PkgFilesChanged { // Mark invalidated metadata rather than deleting it outright. metadataUpdates[k] = &KnownMetadata{ Metadata: v.Metadata, Valid: valid, PkgFilesChanged: pkgFilesChanged, - ShouldLoad: shouldLoad, } } } diff --git a/internal/lsp/source/util.go b/internal/lsp/source/util.go index 8d205ee6cee..262447cf36d 100644 --- a/internal/lsp/source/util.go +++ b/internal/lsp/source/util.go @@ -550,6 +550,8 @@ func IsValidImport(pkgPath, importPkgPath string) bool { if i == -1 { return true } + // TODO(rfindley): this looks wrong: IsCommandLineArguments is meant to + // operate on package IDs, not package paths. if IsCommandLineArguments(string(pkgPath)) { return true } @@ -560,6 +562,8 @@ func IsValidImport(pkgPath, importPkgPath string) bool { // "command-line-arguments" package, which is a package with an unknown ID // created by the go command. It can have a test variant, which is why callers // should not check that a value equals "command-line-arguments" directly. +// +// TODO(rfindley): this should accept a PackageID. func IsCommandLineArguments(s string) bool { return strings.Contains(s, "command-line-arguments") } From 3d474c89054e6c6094b99e57fe652ad6d45c2976 Mon Sep 17 00:00:00 2001 From: Peter Weinberger Date: Wed, 30 Mar 2022 14:48:29 -0400 Subject: [PATCH 121/136] internal/lsp/diff: new diff implementation to replace go-diff The new implementation is based on Myers' paper, and is in the package diff/lcs. There is a new option newDiff, that can be set to 'old', 'new', or 'both'. The default is 'both', although that may not be the right choice for a release. See gopls/hooks/diff.go. 'both' runs both the old and new diff algorithm and saves some statistics in a file in os.Tempdir(), When (or if) the new code becomes the default, this logging (and some internal checking) will be removed. The new implementation has internal checking, which currently panics. The code in gopls/hooks/diff.go tries to save an encrypted (for privacy) version of the failing input. The package diff/myers has not been replaced, but it could be. Fixes golang/go#52966 Change-Id: Id38d76ed383c4330d9373580561765b5a2412587 Reviewed-on: https://go-review.googlesource.com/c/tools/+/396855 Reviewed-by: Robert Findley TryBot-Result: Gopher Robot Reviewed-by: Alan Donovan Run-TryBot: Peter Weinberger --- go.mod | 4 +- go.sum | 22 +- gopls/doc/settings.md | 10 + gopls/go.mod | 4 +- gopls/go.sum | 20 +- gopls/internal/hooks/diff.go | 164 +++++++++ gopls/internal/hooks/diff_test.go | 47 ++- gopls/internal/hooks/hooks.go | 10 +- internal/lsp/diff/diff_test.go | 125 +++++++ internal/lsp/diff/lcs/common.go | 184 ++++++++++ internal/lsp/diff/lcs/common_test.go | 140 +++++++ internal/lsp/diff/lcs/doc.go | 156 ++++++++ internal/lsp/diff/lcs/git.sh | 34 ++ internal/lsp/diff/lcs/labels.go | 55 +++ internal/lsp/diff/lcs/old.go | 530 +++++++++++++++++++++++++++ internal/lsp/diff/lcs/old_test.go | 203 ++++++++++ internal/lsp/diff/ndiff.go | 130 +++++++ internal/lsp/source/options.go | 12 +- internal/lsp/tests/tests.go | 1 + 19 files changed, 1804 insertions(+), 47 deletions(-) create mode 100644 internal/lsp/diff/lcs/common.go create mode 100644 internal/lsp/diff/lcs/common_test.go create mode 100644 internal/lsp/diff/lcs/doc.go create mode 100644 internal/lsp/diff/lcs/git.sh create mode 100644 internal/lsp/diff/lcs/labels.go create mode 100644 internal/lsp/diff/lcs/old.go create mode 100644 internal/lsp/diff/lcs/old_test.go create mode 100644 internal/lsp/diff/ndiff.go diff --git a/go.mod b/go.mod index 985b9cc120c..f05aba2b64a 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,12 @@ module golang.org/x/tools -go 1.17 +go 1.18 require ( github.com/yuin/goldmark v1.4.1 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a golang.org/x/text v0.3.7 ) diff --git a/go.sum b/go.sum index 85cf00cab79..efeb68a0ec2 100644 --- a/go.sum +++ b/go.sum @@ -1,30 +1,12 @@ github.com/yuin/goldmark v1.4.1 h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/gopls/doc/settings.md b/gopls/doc/settings.md index 0ed0e19bb02..6d923b71e36 100644 --- a/gopls/doc/settings.md +++ b/gopls/doc/settings.md @@ -452,6 +452,16 @@ Default: `false`. +#### **newDiff** *string* + +newDiff enables the new diff implementation. If this is "both", +for now both diffs will be run and statistics will be generateted in +a file in $TMPDIR. This is a risky setting; help in trying it +is appreciated. If it is "old" the old implementation is used, +and if it is "new", just the new implementation is used. + +Default: 'old'. + ## Code Lenses These are the code lenses that `gopls` currently supports. They can be enabled diff --git a/gopls/go.mod b/gopls/go.mod index 020386fca28..3e89620b7f6 100644 --- a/gopls/go.mod +++ b/gopls/go.mod @@ -8,7 +8,7 @@ require ( github.com/jba/templatecheck v0.6.0 github.com/sergi/go-diff v1.1.0 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 - golang.org/x/sys v0.0.0-20220209214540-3681064d5158 + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a golang.org/x/tools v0.1.11-0.20220523181440-ccb10502d1a5 golang.org/x/vuln v0.0.0-20220718121659-b9a3ad919698 honnef.co/go/tools v0.3.2 @@ -20,7 +20,7 @@ require ( github.com/BurntSushi/toml v1.0.0 // indirect github.com/google/safehtml v0.0.2 // indirect golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/text v0.3.7 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect ) diff --git a/gopls/go.sum b/gopls/go.sum index f263f9e0d00..4ee977a8c89 100644 --- a/gopls/go.sum +++ b/gopls/go.sum @@ -1,21 +1,14 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns= github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/google/go-cmdtest v0.4.0/go.mod h1:apVn/GCasLZUVpAJ6oWAuyP7Ne7CEsQbTnc0plM3m+o= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/safehtml v0.0.2 h1:ZOt2VXg4x24bW0m2jtzAOkhoXV0iM8vNKc0paByCZqM= github.com/google/safehtml v0.0.2/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU= github.com/jba/printsrc v0.2.2 h1:9OHK51UT+/iMAEBlQIIXW04qvKyF3/vvLuwW/hL8tDU= @@ -47,8 +40,6 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -61,18 +52,15 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/vuln v0.0.0-20220503210553-a5481fb0c8be h1:jokAF1mfylAi1iTQx7C44B7vyXUcSEMw8eDv0PzNu8s= -golang.org/x/vuln v0.0.0-20220503210553-a5481fb0c8be/go.mod h1:twca1SxmF6/i2wHY/mj1vLIkkHdp+nil/yA32ZOP4kg= golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c h1:r5bbIROBQtRRgoutV8Q3sFY58VzHW6jMBYl48ANSyS4= golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c/go.mod h1:UZshlUPxXeGUM9I14UOawXQg6yosDE9cr1vKY/DzgWo= golang.org/x/vuln v0.0.0-20220718121659-b9a3ad919698 h1:9lgpkUgjzoIcZYp7/UPFO/0jIlYcokcEjqWm0hj9pzE= @@ -88,14 +76,10 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -honnef.co/go/tools v0.3.0 h1:2LdYUZ7CIxnYgskbUZfY7FPggmqnh6shBqfWa8Tn3XU= -honnef.co/go/tools v0.3.0/go.mod h1:vlRD9XErLMGT+mDuofSr0mMMquscM/1nQqtRSsh6m70= honnef.co/go/tools v0.3.2 h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34= honnef.co/go/tools v0.3.2/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= mvdan.cc/gofumpt v0.3.0 h1:kTojdZo9AcEYbQYhGuLf/zszYthRdhDNDUi2JKTxas4= mvdan.cc/gofumpt v0.3.0/go.mod h1:0+VyGZWleeIj5oostkOex+nDBA0eyavuDnDusAJ8ylo= mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio= -mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY= mvdan.cc/xurls/v2 v2.4.0 h1:tzxjVAj+wSBmDcF6zBB7/myTy3gX9xvi8Tyr28AuQgc= mvdan.cc/xurls/v2 v2.4.0/go.mod h1:+GEjq9uNjqs8LQfM9nVnM8rff0OQ5Iash5rzX+N1CSg= diff --git a/gopls/internal/hooks/diff.go b/gopls/internal/hooks/diff.go index a307ba77fd6..e0461a152bb 100644 --- a/gopls/internal/hooks/diff.go +++ b/gopls/internal/hooks/diff.go @@ -5,13 +5,177 @@ package hooks import ( + "crypto/rand" + "encoding/json" "fmt" + "io" + "log" + "math/big" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + "unicode" "github.com/sergi/go-diff/diffmatchpatch" "golang.org/x/tools/internal/lsp/diff" "golang.org/x/tools/internal/span" ) +// structure for saving information about diffs +// while the new code is being rolled out +type diffstat struct { + Before, After int + Oldedits, Newedits int + Oldtime, Newtime time.Duration + Stack string + Msg string `json:",omitempty"` // for errors + Ignored int `json:",omitempty"` // numbr of skipped records with 0 edits +} + +var ( + mu sync.Mutex // serializes writes and protects ignored + difffd io.Writer + ignored int // lots of the diff calls have 0 diffs +) + +var fileonce sync.Once + +func (s *diffstat) save() { + // save log records in a file in os.TempDir(). + // diff is frequently called with identical strings, so + // these are somewhat compressed out + fileonce.Do(func() { + fname := filepath.Join(os.TempDir(), fmt.Sprintf("gopls-diff-%x", os.Getpid())) + fd, err := os.Create(fname) + if err != nil { + // now what? + } + difffd = fd + }) + + mu.Lock() + defer mu.Unlock() + if s.Oldedits == 0 && s.Newedits == 0 { + if ignored < 15 { + // keep track of repeated instances of no diffs + // but only print every 15th + ignored++ + return + } + s.Ignored = ignored + 1 + } else { + s.Ignored = ignored + } + ignored = 0 + // it would be really nice to see why diff was called + _, f, l, ok := runtime.Caller(2) + if ok { + var fname string + fname = filepath.Base(f) // diff is only called from a few places + s.Stack = fmt.Sprintf("%s:%d", fname, l) + } + x, err := json.Marshal(s) + if err != nil { + log.Print(err) // failure to print statistics should not stop gopls + } + fmt.Fprintf(difffd, "%s\n", x) +} + +// save encrypted versions of the broken input and return the file name +// (the saved strings will have the same diff behavior as the user's strings) +func disaster(before, after string) string { + // encrypt before and after for privacy. (randomized monoalphabetic cipher) + // got will contain the substitution cipher + // for the runes in before and after + got := map[rune]rune{} + for _, r := range before { + got[r] = ' ' // value doesn't matter + } + for _, r := range after { + got[r] = ' ' + } + repl := initrepl(len(got)) + i := 0 + for k := range got { // randomized + got[k] = repl[i] + i++ + } + // use got to encrypt before and after + subst := func(r rune) rune { return got[r] } + first := strings.Map(subst, before) + second := strings.Map(subst, after) + + // one failure per session is enough, and more private. + // this saves the last one. + fname := fmt.Sprintf("%s/gopls-failed-%x", os.TempDir(), os.Getpid()) + fd, err := os.Create(fname) + defer fd.Close() + _, err = fd.Write([]byte(fmt.Sprintf("%s\n%s\n", string(first), string(second)))) + if err != nil { + // what do we tell the user? + return "" + } + // ask the user to send us the file, somehow + return fname +} + +func initrepl(n int) []rune { + repl := make([]rune, 0, n) + for r := rune(0); len(repl) < n; r++ { + if unicode.IsLetter(r) || unicode.IsNumber(r) { + repl = append(repl, r) + } + } + // randomize repl + rdr := rand.Reader + lim := big.NewInt(int64(len(repl))) + for i := 1; i < n; i++ { + v, _ := rand.Int(rdr, lim) + k := v.Int64() + repl[i], repl[k] = repl[k], repl[i] + } + return repl +} + +// BothDiffs edits calls both the new and old diffs, checks that the new diffs +// change before into after, and attempts to preserve some statistics. +func BothDiffs(uri span.URI, before, after string) (edits []diff.TextEdit, err error) { + // The new diff code contains a lot of internal checks that panic when they + // fail. This code catches the panics, or other failures, tries to save + // the failing example (and ut wiykd ask the user to send it back to us, and + // changes options.newDiff to 'old', if only we could figure out how.) + stat := diffstat{Before: len(before), After: len(after)} + now := time.Now() + Oldedits, oerr := ComputeEdits(uri, before, after) + if oerr != nil { + stat.Msg += fmt.Sprintf("old:%v", oerr) + } + stat.Oldedits = len(Oldedits) + stat.Oldtime = time.Since(now) + defer func() { + if r := recover(); r != nil { + disaster(before, after) + edits, err = Oldedits, oerr + } + }() + now = time.Now() + Newedits, rerr := diff.NComputeEdits(uri, before, after) + stat.Newedits = len(Newedits) + stat.Newtime = time.Now().Sub(now) + got := diff.ApplyEdits(before, Newedits) + if got != after { + stat.Msg += "FAIL" + disaster(before, after) + stat.save() + return Oldedits, oerr + } + stat.save() + return Newedits, rerr +} + func ComputeEdits(uri span.URI, before, after string) (edits []diff.TextEdit, err error) { // The go-diff library has an unresolved panic (see golang/go#278774). // TODO(rstambler): Remove the recover once the issue has been fixed diff --git a/gopls/internal/hooks/diff_test.go b/gopls/internal/hooks/diff_test.go index d979be78dbe..a9e536728b0 100644 --- a/gopls/internal/hooks/diff_test.go +++ b/gopls/internal/hooks/diff_test.go @@ -2,15 +2,56 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package hooks_test +package hooks import ( + "fmt" + "io/ioutil" + "os" "testing" + "unicode/utf8" - "golang.org/x/tools/gopls/internal/hooks" "golang.org/x/tools/internal/lsp/diff/difftest" ) func TestDiff(t *testing.T) { - difftest.DiffTest(t, hooks.ComputeEdits) + difftest.DiffTest(t, ComputeEdits) +} + +func TestRepl(t *testing.T) { + t.Skip("just for checking repl by looking at it") + repl := initrepl(800) + t.Errorf("%q", string(repl)) + t.Errorf("%d", len(repl)) +} + +func TestDisaster(t *testing.T) { + a := "This is a string,(\u0995) just for basic functionality" + b := "Ths is another string, (\u0996) to see if disaster will store stuff correctly" + fname := disaster(a, b) + buf, err := ioutil.ReadFile(fname) + if err != nil { + t.Errorf("error %v reading %s", err, fname) + } + var x, y string + n, err := fmt.Sscanf(string(buf), "%s\n%s\n", &x, &y) + if n != 2 { + t.Errorf("got %d, expected 2", n) + t.Logf("read %q", string(buf)) + } + if a == x || b == y { + t.Error("failed to encrypt") + } + err = os.Remove(fname) + if err != nil { + t.Errorf("%v removing %s", err, fname) + } + alen, blen := utf8.RuneCount([]byte(a)), utf8.RuneCount([]byte(b)) + xlen, ylen := utf8.RuneCount([]byte(x)), utf8.RuneCount([]byte(y)) + if alen != xlen { + t.Errorf("a; got %d, expected %d", xlen, alen) + } + if blen != ylen { + t.Errorf("b: got %d expected %d", ylen, blen) + } } diff --git a/gopls/internal/hooks/hooks.go b/gopls/internal/hooks/hooks.go index 023aefeab98..b55917e0737 100644 --- a/gopls/internal/hooks/hooks.go +++ b/gopls/internal/hooks/hooks.go @@ -11,6 +11,7 @@ import ( "context" "golang.org/x/tools/gopls/internal/vulncheck" + "golang.org/x/tools/internal/lsp/diff" "golang.org/x/tools/internal/lsp/source" "mvdan.cc/gofumpt/format" "mvdan.cc/xurls/v2" @@ -19,7 +20,14 @@ import ( func Options(options *source.Options) { options.LicensesText = licensesText if options.GoDiff { - options.ComputeEdits = ComputeEdits + switch options.NewDiff { + case "old": + options.ComputeEdits = ComputeEdits + case "new": + options.ComputeEdits = diff.NComputeEdits + default: + options.ComputeEdits = BothDiffs + } } options.URLRegexp = xurls.Relaxed() options.GofumptFormat = func(ctx context.Context, langVersion, modulePath string, src []byte) ([]byte, error) { diff --git a/internal/lsp/diff/diff_test.go b/internal/lsp/diff/diff_test.go index dd9414e5d7a..a3699498914 100644 --- a/internal/lsp/diff/diff_test.go +++ b/internal/lsp/diff/diff_test.go @@ -6,6 +6,8 @@ package diff_test import ( "fmt" + "math/rand" + "strings" "testing" "golang.org/x/tools/internal/lsp/diff" @@ -29,6 +31,71 @@ func TestApplyEdits(t *testing.T) { } } +func TestNEdits(t *testing.T) { + for i, tc := range difftest.TestCases { + sp := fmt.Sprintf("file://%s.%d", tc.Name, i) + edits, err := diff.NComputeEdits(span.URI(sp), tc.In, tc.Out) + if err != nil { + t.Fatal(err) + } + got := diff.ApplyEdits(tc.In, edits) + if got != tc.Out { + t.Fatalf("%s: got %q wanted %q", tc.Name, got, tc.Out) + } + if len(edits) < len(tc.Edits) { // should find subline edits + t.Errorf("got %v, expected %v for %#v", edits, tc.Edits, tc) + } + } +} + +func TestNRandom(t *testing.T) { + rand.Seed(1) + for i := 0; i < 1000; i++ { + fname := fmt.Sprintf("file://%x", i) + a := randstr("abω", 16) + b := randstr("abωc", 16) + edits, err := diff.NComputeEdits(span.URI(fname), a, b) + if err != nil { + t.Fatalf("%q,%q %v", a, b, err) + } + got := diff.ApplyEdits(a, edits) + if got != b { + t.Fatalf("%d: got %q, wanted %q, starting with %q", i, got, b, a) + } + } +} + +func TestNLinesRandom(t *testing.T) { + rand.Seed(2) + for i := 0; i < 1000; i++ { + fname := fmt.Sprintf("file://%x", i) + x := randlines("abω", 4) // avg line length is 6, want a change every 3rd line or so + v := []rune(x) + for i := 0; i < len(v); i++ { + if rand.Float64() < .05 { + v[i] = 'N' + } + } + y := string(v) + // occasionally remove the trailing \n + if rand.Float64() < .1 { + x = x[:len(x)-1] + } + if rand.Float64() < .1 { + y = y[:len(y)-1] + } + a, b := strings.SplitAfter(x, "\n"), strings.SplitAfter(y, "\n") + edits, err := diff.NComputeLineEdits(span.URI(fname), a, b) + if err != nil { + t.Fatalf("%q,%q %v", a, b, err) + } + got := diff.ApplyEdits(x, edits) + if got != y { + t.Fatalf("%d: got\n%q, wanted\n%q, starting with %q", i, got, y, a) + } + } +} + func TestLineEdits(t *testing.T) { for _, tc := range difftest.TestCases { t.Run(tc.Name, func(t *testing.T) { @@ -63,6 +130,41 @@ func TestUnified(t *testing.T) { } } +func TestRegressionOld001(t *testing.T) { + a := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/tools/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/lsp/diff/difftest\"\n\t\"golang.org/x/tools/internal/span\"\n)\n" + + b := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/lsp/diff/difftest\"\n\t\"golang.org/x/tools/internal/span\"\n)\n" + diffs, err := diff.NComputeEdits(span.URI("file://one"), a, b) + if err != nil { + t.Error(err) + } + got := diff.ApplyEdits(a, diffs) + if got != b { + i := 0 + for ; i < len(a) && i < len(b) && got[i] == b[i]; i++ { + } + t.Errorf("oops %vd\n%q\n%q", diffs, got, b) + t.Errorf("\n%q\n%q", got[i:], b[i:]) + } +} + +func TestRegressionOld002(t *testing.T) { + a := "n\"\n)\n" + b := "n\"\n\t\"golang.org/x//nnal/stack\"\n)\n" + diffs, err := diff.NComputeEdits(span.URI("file://two"), a, b) + if err != nil { + t.Error(err) + } + got := diff.ApplyEdits(a, diffs) + if got != b { + i := 0 + for ; i < len(a) && i < len(b) && got[i] == b[i]; i++ { + } + t.Errorf("oops %vd\n%q\n%q", diffs, got, b) + t.Errorf("\n%q\n%q", got[i:], b[i:]) + } +} + func diffEdits(got, want []diff.TextEdit) bool { if len(got) != len(want) { return true @@ -78,3 +180,26 @@ func diffEdits(got, want []diff.TextEdit) bool { } return false } + +// return a random string of length n made of characters from s +func randstr(s string, n int) string { + src := []rune(s) + x := make([]rune, n) + for i := 0; i < n; i++ { + x[i] = src[rand.Intn(len(src))] + } + return string(x) +} + +// return some random lines, all ending with \n +func randlines(s string, n int) string { + src := []rune(s) + var b strings.Builder + for i := 0; i < n; i++ { + for j := 0; j < 4+rand.Intn(4); j++ { + b.WriteRune(src[rand.Intn(len(src))]) + } + b.WriteByte('\n') + } + return b.String() +} diff --git a/internal/lsp/diff/lcs/common.go b/internal/lsp/diff/lcs/common.go new file mode 100644 index 00000000000..e5d08014760 --- /dev/null +++ b/internal/lsp/diff/lcs/common.go @@ -0,0 +1,184 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "log" + "sort" +) + +// lcs is a longest common sequence +type lcs []diag + +// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i l[j].Len + }) + return l +} + +// validate that the elements of the lcs do not overlap +// (can only happen when the two-sided algorithm ends early) +// expects the lcs to be sorted +func (l lcs) valid() bool { + for i := 1; i < len(l); i++ { + if l[i-1].X+l[i-1].Len > l[i].X { + return false + } + if l[i-1].Y+l[i-1].Len > l[i].Y { + return false + } + } + return true +} + +// repair overlapping lcs +// only called if two-sided stops early +func (l lcs) fix() lcs { + // from the set of diagonals in l, find a maximal non-conflicting set + // this problem may be NP-complete, but we use a greedy heuristic, + // which is quadratic, but with a better data structure, could be D log D. + // indepedent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs + // which has to have monotone x and y + if len(l) == 0 { + return nil + } + sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len }) + tmp := make(lcs, 0, len(l)) + tmp = append(tmp, l[0]) + for i := 1; i < len(l); i++ { + var dir direction + nxt := l[i] + for _, in := range tmp { + if dir, nxt = overlap(in, nxt); dir == empty || dir == bad { + break + } + } + if nxt.Len > 0 && dir != bad { + tmp = append(tmp, nxt) + } + } + tmp.sort() + if false && !tmp.valid() { // debug checking + log.Fatalf("here %d", len(tmp)) + } + return tmp +} + +type direction int + +const ( + empty direction = iota // diag is empty (so not in lcs) + leftdown // proposed acceptably to the left and below + rightup // proposed diag is acceptably to the right and above + bad // proposed diag is inconsistent with the lcs so far +) + +// overlap trims the proposed diag prop so it doesn't overlap with +// the existing diag that has already been added to the lcs. +func overlap(exist, prop diag) (direction, diag) { + if prop.X <= exist.X && exist.X < prop.X+prop.Len { + // remove the end of prop where it overlaps with the X end of exist + delta := prop.X + prop.Len - exist.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.X <= prop.X && prop.X < exist.X+exist.Len { + // remove the beginning of prop where overlaps with exist + delta := exist.X + exist.Len - prop.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta + prop.Y += delta + } + if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len { + // remove the end of prop that overlaps (in Y) with exist + delta := prop.Y + prop.Len - exist.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len { + // remove the beginning of peop that overlaps with exist + delta := exist.Y + exist.Len - prop.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta // no test reaches this code + prop.Y += delta + } + if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y { + return leftdown, prop + } + if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y { + return rightup, prop + } + // prop can't be in an lcs that contains exist + return bad, prop +} + +// manipulating Diag and lcs + +// prependlcs a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs +// or to its first Diag. prependlcs is only called extending diagonals +// the backward direction. +func prependlcs(lcs lcs, x, y int) lcs { + if len(lcs) > 0 { + d := &lcs[0] + if int(d.X) == x+1 && int(d.Y) == y+1 { + // extend the diagonal down and to the left + d.X, d.Y = int(x), int(y) + d.Len++ + return lcs + } + } + + r := diag{X: int(x), Y: int(y), Len: 1} + lcs = append([]diag{r}, lcs...) + return lcs +} + +// appendlcs appends a diagonal, or extends the existing one. +// by adding the edge (x,y)-(x+1.y+1). appendlcs is only called +// while extending diagonals in the forward direction. +func appendlcs(lcs lcs, x, y int) lcs { + if len(lcs) > 0 { + last := &lcs[len(lcs)-1] + // Expand last element if adjoining. + if last.X+last.Len == x && last.Y+last.Len == y { + last.Len++ + return lcs + } + } + + return append(lcs, diag{X: x, Y: y, Len: 1}) +} + +// enforce constraint on d, k +func ok(d, k int) bool { + return d >= 0 && -d <= k && k <= d +} + +type Diff struct { + Start, End int // offsets in A + Text string // replacement text +} diff --git a/internal/lsp/diff/lcs/common_test.go b/internal/lsp/diff/lcs/common_test.go new file mode 100644 index 00000000000..4aa36abc2e8 --- /dev/null +++ b/internal/lsp/diff/lcs/common_test.go @@ -0,0 +1,140 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "log" + "math/rand" + "strings" + "testing" +) + +type Btest struct { + a, b string + lcs []string +} + +var Btests = []Btest{ + {"aaabab", "abaab", []string{"abab", "aaab"}}, + {"aabbba", "baaba", []string{"aaba"}}, + {"cabbx", "cbabx", []string{"cabx", "cbbx"}}, + {"c", "cb", []string{"c"}}, + {"aaba", "bbb", []string{"b"}}, + {"bbaabb", "b", []string{"b"}}, + {"baaabb", "bbaba", []string{"bbb", "baa", "bab"}}, + {"baaabb", "abbab", []string{"abb", "bab", "aab"}}, + {"baaba", "aaabba", []string{"aaba"}}, + {"ca", "cba", []string{"ca"}}, + {"ccbcbc", "abba", []string{"bb"}}, + {"ccbcbc", "aabba", []string{"bb"}}, + {"ccb", "cba", []string{"cb"}}, + {"caef", "axe", []string{"ae"}}, + {"bbaabb", "baabb", []string{"baabb"}}, + // Example from Myers: + {"abcabba", "cbabac", []string{"caba", "baba", "cbba"}}, + {"3456aaa", "aaa", []string{"aaa"}}, + {"aaa", "aaa123", []string{"aaa"}}, + {"aabaa", "aacaa", []string{"aaaa"}}, + {"1a", "a", []string{"a"}}, + {"abab", "bb", []string{"bb"}}, + {"123", "ab", []string{""}}, + {"a", "b", []string{""}}, + {"abc", "123", []string{""}}, + {"aa", "aa", []string{"aa"}}, + {"abcde", "12345", []string{""}}, + {"aaa3456", "aaa", []string{"aaa"}}, + {"abcde", "12345a", []string{"a"}}, + {"ab", "123", []string{""}}, + {"1a2", "a", []string{"a"}}, + // for two-sided + {"babaab", "cccaba", []string{"aba"}}, + {"aabbab", "cbcabc", []string{"bab"}}, + {"abaabb", "bcacab", []string{"baab"}}, + {"abaabb", "abaaaa", []string{"abaa"}}, + {"bababb", "baaabb", []string{"baabb"}}, + {"abbbaa", "cabacc", []string{"aba"}}, + {"aabbaa", "aacaba", []string{"aaaa", "aaba"}}, +} + +func init() { + log.SetFlags(log.Lshortfile) +} + +func check(t *testing.T, str string, lcs lcs, want []string) { + t.Helper() + if !lcs.valid() { + t.Errorf("bad lcs %v", lcs) + } + var got strings.Builder + for _, dd := range lcs { + got.WriteString(str[dd.X : dd.X+dd.Len]) + } + ans := got.String() + for _, w := range want { + if ans == w { + return + } + } + t.Fatalf("str=%q lcs=%v want=%q got=%q", str, lcs, want, ans) +} + +func checkDiffs(t *testing.T, before string, diffs []Diff, after string) { + t.Helper() + var ans strings.Builder + sofar := 0 // index of position in before + for _, d := range diffs { + if sofar < d.Start { + ans.WriteString(before[sofar:d.Start]) + } + ans.WriteString(d.Text) + sofar = d.End + } + ans.WriteString(before[sofar:]) + if ans.String() != after { + t.Fatalf("diff %v took %q to %q, not to %q", diffs, before, ans.String(), after) + } +} + +func lcslen(l lcs) int { + ans := 0 + for _, d := range l { + ans += int(d.Len) + } + return ans +} + +// return a random string of length n made of characters from s +func randstr(s string, n int) string { + src := []rune(s) + x := make([]rune, n) + for i := 0; i < n; i++ { + x[i] = src[rand.Intn(len(src))] + } + return string(x) +} + +func TestLcsFix(t *testing.T) { + tests := []struct{ before, after lcs }{ + {lcs{diag{0, 0, 3}, diag{2, 2, 5}, diag{3, 4, 5}, diag{8, 9, 4}}, lcs{diag{0, 0, 2}, diag{2, 2, 1}, diag{3, 4, 5}, diag{8, 9, 4}}}, + {lcs{diag{1, 1, 6}, diag{6, 12, 3}}, lcs{diag{1, 1, 5}, diag{6, 12, 3}}}, + {lcs{diag{0, 0, 4}, diag{3, 5, 4}}, lcs{diag{0, 0, 3}, diag{3, 5, 4}}}, + {lcs{diag{0, 20, 1}, diag{0, 0, 3}, diag{1, 20, 4}}, lcs{diag{0, 0, 3}, diag{3, 22, 2}}}, + {lcs{diag{0, 0, 4}, diag{1, 1, 2}}, lcs{diag{0, 0, 4}}}, + {lcs{diag{0, 0, 4}}, lcs{diag{0, 0, 4}}}, + {lcs{}, lcs{}}, + {lcs{diag{0, 0, 4}, diag{1, 1, 6}, diag{3, 3, 2}}, lcs{diag{0, 0, 1}, diag{1, 1, 6}}}, + } + for n, x := range tests { + got := x.before.fix() + if len(got) != len(x.after) { + t.Errorf("got %v, expected %v, for %v", got, x.after, x.before) + } + olen := lcslen(x.after) + glen := lcslen(got) + if olen != glen { + t.Errorf("%d: lens(%d,%d) differ, %v, %v, %v", n, glen, olen, got, x.after, x.before) + } + } +} diff --git a/internal/lsp/diff/lcs/doc.go b/internal/lsp/diff/lcs/doc.go new file mode 100644 index 00000000000..dc779f38a01 --- /dev/null +++ b/internal/lsp/diff/lcs/doc.go @@ -0,0 +1,156 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package lcs contains code to find longest-common-subsequences +// (and diffs) +package lcs + +/* +Compute longest-common-subsequences of two slices A, B using +algorithms from Myers' paper. A longest-common-subsequence +(LCS from now on) of A and B is a maximal set of lexically increasing +pairs of subscripts (x,y) with A[x]==B[y]. There may be many LCS, but +they all have the same length. An LCS determines a sequence of edits +that changes A into B. + +The key concept is the edit graph of A and B. +If A has length N and B has length M, then the edit graph has +vertices v[i][j] for 0 <= i <= N, 0 <= j <= M. There is a +horizontal edge from v[i][j] to v[i+1][j] whenever both are in +the graph, and a vertical edge from v[i][j] to f[i][j+1] similarly. +When A[i] == B[j] there is a diagonal edge from v[i][j] to v[i+1][j+1]. + +A path between in the graph between (0,0) and (N,M) determines a sequence +of edits converting A into B: each horizontal edge corresponds to removing +an element of A, and each vertical edge corresponds to inserting an +element of B. + +A vertex (x,y) is on (forward) diagonal k if x-y=k. A path in the graph +is of length D if it has D non-diagonal edges. The algorithms generate +forward paths (in which at least one of x,y increases at each edge), +or backward paths (in which at least one of x,y decreases at each edge), +or a combination. (Note that the orientation is the traditional mathematical one, +with the origin in the lower-left corner.) + +Here is the edit graph for A:"aabbaa", B:"aacaba". (I know the diagonals look weird.) + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + c | | | | | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a a b b a a + + +The algorithm labels a vertex (x,y) with D,k if it is on diagonal k and at +the end of a maximal path of length D. (Because x-y=k it suffices to remember +only the x coordinate of the vertex.) + +The forward algorithm: Find the longest diagonal starting at (0,0) and +label its end with D=0,k=0. From that vertex take a vertical step and +then follow the longest diagonal (up and to the right), and label that vertex +with D=1,k=-1. From the D=0,k=0 point take a horizontal step and the follow +the longest diagonal (up and to the right) and label that vertex +D=1,k=1. In the same way, having labelled all the D vertices, +from a vertex labelled D,k find two vertices +tentatively labelled D+1,k-1 and D+1,k+1. There may be two on the same +diagonal, in which case take the one with the larger x. + +Eventually the path gets to (N,M), and the diagonals on it are the LCS. + +Here is the edit graph with the ends of D-paths labelled. (So, for instance, +0/2,2 indicates that x=2,y=2 is labelled with 0, as it should be, since the first +step is to go up the longest diagonal from (0,0).) +A:"aabbaa", B:"aacaba" + ⊙ ------- ⊙ ------- ⊙ -------(3/3,6)------- ⊙ -------(3/5,6)-------(4/6,6) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ -------(2/3,5)------- ⊙ ------- ⊙ ------- ⊙ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ -------(3/5,4)------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ -------(1/2,3)-------(2/3,3)------- ⊙ ------- ⊙ ------- ⊙ + c | | | | | | | + ⊙ ------- ⊙ -------(0/2,2)-------(1/3,2)-------(2/4,2)-------(3/5,2)-------(4/6,2) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a a b b a a + +The 4-path is reconstructed starting at (4/6,6), horizontal to (3/5,6), diagonal to (3,4), vertical +to (2/3,3), horizontal to (1/2,3), vertical to (0/2,2), and diagonal to (0,0). As expected, +there are 4 non-diagonal steps, and the diagonals form an LCS. + +There is a symmetric backward algorithm, which gives (backwards labels are prefixed with a colon): +A:"aabbaa", B:"aacaba" + ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ --------(:0/5,5)-------- ⊙ + b | | | ____/‾‾‾ | ____/‾‾‾ | | | + ⊙ -------- ⊙ -------- ⊙ --------(:1/3,4)-------- ⊙ -------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,3)--------(:2/1,3)-------- ⊙ --------(:2/3,3)--------(:1/4,3)-------- ⊙ -------- ⊙ + c | | | | | | | + ⊙ -------- ⊙ -------- ⊙ --------(:3/3,2)--------(:2/4,2)-------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,1)-------- ⊙ -------- ⊙ -------- ⊙ --------(:3/4,1)-------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:4/0,0)-------- ⊙ -------- ⊙ -------- ⊙ --------(:4/4,0)-------- ⊙ -------- ⊙ + a a b b a a + +Neither of these is ideal for use in an editor, where it is undesirable to send very long diffs to the +front end. It's tricky to decide exactly what 'very long diffs' means, as "replace A by B" is very short. +We want to control how big D can be, by stopping when it gets too large. The forward algorithm then +privileges common prefixes, and the backward algorithm privileges common suffixes. Either is an undesirable +asymmetry. + +Fortunately there is a two-sided algorithm, implied by results in Myers' paper. Here's what the labels in +the edit graph look like. +A:"aabbaa", B:"aacaba" + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- (2/3,5) --------- ⊙ --------- (:0/5,5)--------- ⊙ + b | | | ____/‾‾‾‾ | ____/‾‾‾‾ | | | + ⊙ --------- ⊙ --------- ⊙ --------- (:1/3,4)--------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- (:2/1,3)--------- (1/2,3) ---------(2:2/3,3)--------- (:1/4,3)--------- ⊙ --------- ⊙ + c | | | | | | | + ⊙ --------- ⊙ --------- (0/2,2) --------- (1/3,2) ---------(2:2/4,2)--------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a a b b a a + +The algorithm stopped when it saw the backwards 2-path ending at (1,3) and the forwards 2-path ending at (3,5). The criterion +is a backwards path ending at (u,v) and a forward path ending at (x,y), where u <= x and the two points are on the same +diagonal. (Here the edgegraph has a diagonal, but the criterion is x-y=u-v.) Myers proves there is a forward +2-path from (0,0) to (1,3), and that together with the backwards 2-path ending at (1,3) gives the expected 4-path. +Unfortunately the forward path has to be constructed by another run of the forward algorithm; it can't be found from the +computed labels. That is the worst case. Had the code noticed (x,y)=(u,v)=(3,3) the whole path could be reconstructed +from the edgegraph. The implementation looks for a number of special cases to try to avoid computing an extra forward path. + +If the two-sided algorithm has stop early (because D has become too large) it will have found a forward LCS and a +backwards LCS. Ideally these go with disjoint prefixes and suffixes of A and B, but disjointness may fail and the two +computed LCS may conflict. (An easy example is where A is a suffix of B, and shares a short prefix. The backwards LCS +is all of A, and the forward LCS is a prefix of A.) The algorithm combines the two +to form a best-effort LCS. In the worst case the forward partial LCS may have to +be recomputed. +*/ + +/* Eugene Myers paper is titled +"An O(ND) Difference Algorithm and Its Variations" +and can be found at +http://www.xmailserver.org/diff2.pdf + +(There is a generic implementation of the algorithm the the repository with git hash +b9ad7e4ade3a686d608e44475390ad428e60e7fc) +*/ diff --git a/internal/lsp/diff/lcs/git.sh b/internal/lsp/diff/lcs/git.sh new file mode 100644 index 00000000000..caa4c424198 --- /dev/null +++ b/internal/lsp/diff/lcs/git.sh @@ -0,0 +1,34 @@ + +#!/bin/bash +# +# Copyright 2022 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. +# +# Creates a zip file containing all numbered versions +# of the commit history of a large source file, for use +# as input data for the tests of the diff algorithm. +# +# Run script from root of the x/tools repo. + +set -eu + +# WARNING: This script will install the latest version of $file +# The largest real source file in the x/tools repo. +# file=internal/lsp/source/completion/completion.go +# file=internal/lsp/source/diagnostics.go +file=internal/lsp/protocol/tsprotocol.go + +tmp=$(mktemp -d) +git log $file | + awk '/^commit / {print $2}' | + nl -ba -nrz | + while read n hash; do + git checkout --quiet $hash $file + cp -f $file $tmp/$n + done +(cd $tmp && zip -q - *) > testdata.zip +rm -fr $tmp +git restore --staged $file +git restore $file +echo "Created testdata.zip" diff --git a/internal/lsp/diff/lcs/labels.go b/internal/lsp/diff/lcs/labels.go new file mode 100644 index 00000000000..0689f1ed700 --- /dev/null +++ b/internal/lsp/diff/lcs/labels.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "fmt" +) + +// For each D, vec[D] has length D+1, +// and the label for (D, k) is stored in vec[D][(D+k)/2]. +type label struct { + vec [][]int +} + +// Temporary checking DO NOT COMMIT true TO PRODUCTION CODE +const debug = false + +// debugging. check that the (d,k) pair is valid +// (that is, -d<=k<=d and d+k even) +func checkDK(D, k int) { + if k >= -D && k <= D && (D+k)%2 == 0 { + return + } + panic(fmt.Sprintf("out of range, d=%d,k=%d", D, k)) +} + +func (t *label) set(D, k, x int) { + if debug { + checkDK(D, k) + } + for len(t.vec) <= D { + t.vec = append(t.vec, nil) + } + if t.vec[D] == nil { + t.vec[D] = make([]int, D+1) + } + t.vec[D][(D+k)/2] = x // known that D+k is even +} + +func (t *label) get(d, k int) int { + if debug { + checkDK(d, k) + } + return int(t.vec[d][(d+k)/2]) +} + +func newtriang(limit int) label { + if limit < 100 { + // Preallocate if limit is not large. + return label{vec: make([][]int, limit)} + } + return label{} +} diff --git a/internal/lsp/diff/lcs/old.go b/internal/lsp/diff/lcs/old.go new file mode 100644 index 00000000000..a091edd5501 --- /dev/null +++ b/internal/lsp/diff/lcs/old.go @@ -0,0 +1,530 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "fmt" + "strings" +) + +// non generic code. The names have Old at the end to indicate they are the +// the implementation that doesn't use generics. + +// Compute the Diffs and the lcs. +func Compute(a, b interface{}, limit int) ([]Diff, lcs) { + var ans lcs + g := newegraph(a, b, limit) + ans = g.twosided() + diffs := g.fromlcs(ans) + return diffs, ans +} + +// editGraph carries the information for computing the lcs for []byte, []rune, or []string. +type editGraph struct { + eq eq // how to compare elements of A, B, and convert slices to strings + vf, vb label // forward and backward labels + + limit int // maximal value of D + // the bounding rectangle of the current edit graph + lx, ly, ux, uy int + delta int // common subexpression: (ux-lx)-(uy-ly) +} + +// abstraction in place of generic +type eq interface { + eq(i, j int) bool + substr(i, j int) string // string from b[i:j] + lena() int + lenb() int +} + +type byteeq struct { + a, b []byte // the input was ascii. perhaps these could be strings +} + +func (x *byteeq) eq(i, j int) bool { return x.a[i] == x.b[j] } +func (x *byteeq) substr(i, j int) string { return string(x.b[i:j]) } +func (x *byteeq) lena() int { return int(len(x.a)) } +func (x *byteeq) lenb() int { return int(len(x.b)) } + +type runeeq struct { + a, b []rune +} + +func (x *runeeq) eq(i, j int) bool { return x.a[i] == x.b[j] } +func (x *runeeq) substr(i, j int) string { return string(x.b[i:j]) } +func (x *runeeq) lena() int { return int(len(x.a)) } +func (x *runeeq) lenb() int { return int(len(x.b)) } + +type lineeq struct { + a, b []string +} + +func (x *lineeq) eq(i, j int) bool { return x.a[i] == x.b[j] } +func (x *lineeq) substr(i, j int) string { return strings.Join(x.b[i:j], "") } +func (x *lineeq) lena() int { return int(len(x.a)) } +func (x *lineeq) lenb() int { return int(len(x.b)) } + +func neweq(a, b interface{}) eq { + switch x := a.(type) { + case []byte: + return &byteeq{a: x, b: b.([]byte)} + case []rune: + return &runeeq{a: x, b: b.([]rune)} + case []string: + return &lineeq{a: x, b: b.([]string)} + default: + panic(fmt.Sprintf("unexpected type %T in neweq", x)) + } +} + +func (g *editGraph) fromlcs(lcs lcs) []Diff { + var ans []Diff + var pa, pb int // offsets in a, b + for _, l := range lcs { + if pa < l.X && pb < l.Y { + ans = append(ans, Diff{pa, l.X, g.eq.substr(pb, l.Y)}) + } else if pa < l.X { + ans = append(ans, Diff{pa, l.X, ""}) + } else if pb < l.Y { + ans = append(ans, Diff{pa, l.X, g.eq.substr(pb, l.Y)}) + } + pa = l.X + l.Len + pb = l.Y + l.Len + } + if pa < g.eq.lena() && pb < g.eq.lenb() { + ans = append(ans, Diff{pa, g.eq.lena(), g.eq.substr(pb, g.eq.lenb())}) + } else if pa < g.eq.lena() { + ans = append(ans, Diff{pa, g.eq.lena(), ""}) + } else if pb < g.eq.lenb() { + ans = append(ans, Diff{pa, g.eq.lena(), g.eq.substr(pb, g.eq.lenb())}) + } + return ans +} + +func newegraph(a, b interface{}, limit int) *editGraph { + if limit <= 0 { + limit = 1 << 25 // effectively infinity + } + var alen, blen int + switch a := a.(type) { + case []byte: + alen, blen = len(a), len(b.([]byte)) + case []rune: + alen, blen = len(a), len(b.([]rune)) + case []string: + alen, blen = len(a), len(b.([]string)) + default: + panic(fmt.Sprintf("unexpected type %T in newegraph", a)) + } + ans := &editGraph{eq: neweq(a, b), vf: newtriang(limit), vb: newtriang(limit), limit: int(limit), + ux: alen, uy: blen, delta: alen - blen} + return ans +} + +// --- FORWARD --- +// fdone decides if the forwward path has reached the upper right +// corner of the rectangele. If so, it also returns the computed lcs. +func (e *editGraph) fdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vf.get(D, k) + y := x - k + if x == e.ux && y == e.uy { + return true, e.forwardlcs(D, k) + } + return false, nil +} + +// run the forward algorithm, until success or up to the limit on D. +func (e *editGraph) forward() lcs { + e.setForward(0, 0, e.lx) + if ok, ans := e.fdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + if ok, ans := e.fdone(D+1, -(D + 1)); ok { + return ans + } + e.setForward(D+1, D+1, e.getForward(D, D)+1) + if ok, ans := e.fdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + if ok, ans := e.fdone(D+1, k); ok { + return ans + } + } + } + // D is too large + // find the D path with maximal x+y inside the rectangle and + // use that to compute the found part of the lcs + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + return e.forwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking from the farthest point reached +func (e *editGraph) forwardlcs(D, k int) lcs { + var ans lcs + for x := e.getForward(D, k); x != 0 || x-k != 0; { + if ok(D-1, k-1) && x-1 == e.getForward(D-1, k-1) { + // if (x-1,y) is labelled D-1, x--,D--,k--,continue + D, k, x = D-1, k-1, x-1 + continue + } else if ok(D-1, k+1) && x == e.getForward(D-1, k+1) { + // if (x,y-1) is labelled D-1, x, D--,k++, continue + D, k = D-1, k+1 + continue + } + // if (x-1,y-1)--(x,y) is a diagonal, prepend,x--,y--, continue + y := x - k + realx, realy := x+e.lx, y+e.ly + if e.eq.eq(realx-1, realy-1) { + ans = prependlcs(ans, realx-1, realy-1) + x-- + } else { + panic("broken path") + } + } + return ans +} + +// start at (x,y), go up the diagonal as far as possible, +// and label the result with d +func (e *editGraph) lookForward(k, relx int) int { + rely := relx - k + x, y := relx+e.lx, rely+e.ly + for x < e.ux && y < e.uy && e.eq.eq(x, y) { + x++ + y++ + } + return x +} + +func (e *editGraph) setForward(d, k, relx int) { + x := e.lookForward(k, relx) + e.vf.set(d, k, x-e.lx) +} + +func (e *editGraph) getForward(d, k int) int { + x := e.vf.get(d, k) + return x +} + +// --- BACKWARD --- +// bdone decides if the backward path has reached the lower left corner +func (e *editGraph) bdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vb.get(D, k) + y := x - (k + e.delta) + if x == 0 && y == 0 { + return true, e.backwardlcs(D, k) + } + return false, nil +} + +// run the backward algorithm, until success or up to the limit on D. +func (e *editGraph) backward() lcs { + e.setBackward(0, 0, e.ux) + if ok, ans := e.bdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + if ok, ans := e.bdone(D+1, -(D + 1)); ok { + return ans + } + e.setBackward(D+1, D+1, e.getBackward(D, D)) + if ok, ans := e.bdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + if ok, ans := e.bdone(D+1, k); ok { + return ans + } + } + } + + // D is too large + // find the D path with minimal x+y inside the rectangle and + // use that to compute the part of the lcs found + kmax := -e.limit - 1 + diagmin := 1 << 25 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no paths when limit=%d?", e.limit)) + } + return e.backwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking +func (e *editGraph) backwardlcs(D, k int) lcs { + var ans lcs + for x := e.getBackward(D, k); x != e.ux || x-(k+e.delta) != e.uy; { + if ok(D-1, k-1) && x == e.getBackward(D-1, k-1) { + // D--, k--, x unchanged + D, k = D-1, k-1 + continue + } else if ok(D-1, k+1) && x+1 == e.getBackward(D-1, k+1) { + // D--, k++, x++ + D, k, x = D-1, k+1, x+1 + continue + } + y := x - (k + e.delta) + realx, realy := x+e.lx, y+e.ly + if e.eq.eq(realx, realy) { + ans = appendlcs(ans, realx, realy) + x++ + } else { + panic("broken path") + } + } + return ans +} + +// start at (x,y), go down the diagonal as far as possible, +func (e *editGraph) lookBackward(k, relx int) int { + rely := relx - (k + e.delta) // forward k = k + e.delta + x, y := relx+e.lx, rely+e.ly + for x > 0 && y > 0 && e.eq.eq(x-1, y-1) { + x-- + y-- + } + return x +} + +// convert to rectangle, and label the result with d +func (e *editGraph) setBackward(d, k, relx int) { + x := e.lookBackward(k, relx) + e.vb.set(d, k, x-e.lx) +} + +func (e *editGraph) getBackward(d, k int) int { + x := e.vb.get(d, k) + return x +} + +// -- TWOSIDED --- + +func (e *editGraph) twosided() lcs { + // The termination condition could be improved, as either the forward + // or backward pass could succeed before Myers' Lemma applies. + // Aside from questions of efficiency (is the extra testing cost-effective) + // this is more likely to matter when e.limit is reached. + e.setForward(0, 0, e.lx) + e.setBackward(0, 0, e.ux) + + // from D to D+1 + for D := 0; D < e.limit; D++ { + // just finished a backwards pass, so check + if got, ok := e.twoDone(D, D); ok { + return e.twolcs(D, D, got) + } + // do a forwards pass (D to D+1) + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + e.setForward(D+1, D+1, e.getForward(D, D)+1) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + } + // just did a forward pass, so check + if got, ok := e.twoDone(D+1, D); ok { + return e.twolcs(D+1, D, got) + } + // do a backward pass, D to D+1 + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + e.setBackward(D+1, D+1, e.getBackward(D, D)) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + } + } + + // D too large. combine a forward and backward partial lcs + // first, a forward one + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no forward paths when limit=%d?", e.limit)) + } + lcs := e.forwardlcs(e.limit, kmax) + // now a backward one + // find the D path with minimal x+y inside the rectangle and + // use that to compute the lcs + diagmin := 1 << 25 // infinity + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no backward paths when limit=%d?", e.limit)) + } + lcs = append(lcs, e.backwardlcs(e.limit, kmax)...) + // These may overlap (e.forwardlcs and e.backwardlcs return sorted lcs) + ans := lcs.fix() + return ans +} + +// Does Myers' Lemma apply? +func (e *editGraph) twoDone(df, db int) (int, bool) { + if (df+db+e.delta)%2 != 0 { + return 0, false // diagonals cannot overlap + } + kmin := -db + e.delta + if -df > kmin { + kmin = -df + } + kmax := db + e.delta + if df < kmax { + kmax = df + } + for k := kmin; k <= kmax; k += 2 { + x := e.vf.get(df, k) + u := e.vb.get(db, k-e.delta) + if u <= x { + // is it worth looking at all the other k? + for l := k; l <= kmax; l += 2 { + x := e.vf.get(df, l) + y := x - l + u := e.vb.get(db, l-e.delta) + v := u - l + if x == u || u == 0 || v == 0 || y == e.uy || x == e.ux { + return l, true + } + } + return k, true + } + } + return 0, false +} + +func (e *editGraph) twolcs(df, db, kf int) lcs { + // db==df || db+1==df + x := e.vf.get(df, kf) + y := x - kf + kb := kf - e.delta + u := e.vb.get(db, kb) + v := u - kf + + // Myers proved there is a df-path from (0,0) to (u,v) + // and a db-path from (x,y) to (N,M). + // In the first case the overall path is the forward path + // to (u,v) followed by the backward path to (N,M). + // In the second case the path is the backward path to (x,y) + // followed by the forward path to (x,y) from (0,0). + + // Look for some special cases to avoid computing either of these paths. + if x == u { + // "babaab" "cccaba" + // already patched together + lcs := e.forwardlcs(df, kf) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // is (u-1,v) or (u,v-1) labelled df-1? + // if so, that forward df-1-path plus a horizontal or vertical edge + // is the df-path to (u,v), then plus the db-path to (N,M) + if u > 0 && ok(df-1, u-1-v) && e.vf.get(df-1, u-1-v) == u-1 { + // "aabbab" "cbcabc" + lcs := e.forwardlcs(df-1, u-1-v) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + if v > 0 && ok(df-1, (u-(v-1))) && e.vf.get(df-1, u-(v-1)) == u { + // "abaabb" "bcacab" + lcs := e.forwardlcs(df-1, u-(v-1)) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // The path can't possibly contribute to the lcs because it + // is all horizontal or vertical edges + if u == 0 || v == 0 || x == e.ux || y == e.uy { + // "abaabb" "abaaaa" + if u == 0 || v == 0 { + return e.backwardlcs(db, kb) + } + return e.forwardlcs(df, kf) + } + + // is (x+1,y) or (x,y+1) labelled db-1? + if x+1 <= e.ux && ok(db-1, x+1-y-e.delta) && e.vb.get(db-1, x+1-y-e.delta) == x+1 { + // "bababb" "baaabb" + lcs := e.backwardlcs(db-1, kb+1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + if y+1 <= e.uy && ok(db-1, x-(y+1)-e.delta) && e.vb.get(db-1, x-(y+1)-e.delta) == x { + // "abbbaa" "cabacc" + lcs := e.backwardlcs(db-1, kb-1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + + // need to compute another path + // "aabbaa" "aacaba" + lcs := e.backwardlcs(db, kb) + oldx, oldy := e.ux, e.uy + e.ux = u + e.uy = v + lcs = append(lcs, e.forward()...) + e.ux, e.uy = oldx, oldy + return lcs.sort() +} diff --git a/internal/lsp/diff/lcs/old_test.go b/internal/lsp/diff/lcs/old_test.go new file mode 100644 index 00000000000..ba22fe6f461 --- /dev/null +++ b/internal/lsp/diff/lcs/old_test.go @@ -0,0 +1,203 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "math/rand" + "testing" +) + +func TestForwardOld(t *testing.T) { + for _, tx := range Btests { + lim := len(tx.a) + len(tx.b) + left, right := []byte(tx.a), []byte(tx.b) + g := newegraph(left, right, lim) + lcs := g.forward() + diffs := g.fromlcs(lcs) + check(t, tx.a, lcs, tx.lcs) + checkDiffs(t, tx.a, diffs, tx.b) + + g = newegraph(right, left, lim) + lcs = g.forward() + diffs = g.fromlcs(lcs) + check(t, tx.b, lcs, tx.lcs) + checkDiffs(t, tx.b, diffs, tx.a) + } +} + +func TestBackwardOld(t *testing.T) { + for _, tx := range Btests { + lim := len(tx.a) + len(tx.b) + left, right := []byte(tx.a), []byte(tx.b) + g := newegraph(left, right, lim) + lcs := g.backward() + check(t, tx.a, lcs, tx.lcs) + diffs := g.fromlcs(lcs) + checkDiffs(t, tx.a, diffs, tx.b) + + g = newegraph(right, left, lim) + lcs = g.backward() + diffs = g.fromlcs(lcs) + check(t, tx.b, lcs, tx.lcs) + checkDiffs(t, tx.b, diffs, tx.a) + } +} + +func TestTwosidedOld(t *testing.T) { + // test both (a,b) and (b,a) + for _, tx := range Btests { + left, right := []byte(tx.a), []byte(tx.b) + lim := len(tx.a) + len(tx.b) + diffs, lcs := Compute(left, right, lim) + check(t, tx.a, lcs, tx.lcs) + checkDiffs(t, tx.a, diffs, tx.b) + diffs, lcs = Compute(right, left, lim) + check(t, tx.b, lcs, tx.lcs) + checkDiffs(t, tx.b, diffs, tx.a) + } +} + +func TestIntOld(t *testing.T) { + // need to avoid any characters in btests + lfill, rfill := "AAAAAAAAAAAA", "BBBBBBBBBBBB" + for _, tx := range Btests { + if len(tx.a) < 2 || len(tx.b) < 2 { + continue + } + left := []byte(tx.a + lfill) + right := []byte(tx.b + rfill) + lim := len(tx.a) + len(tx.b) + diffs, lcs := Compute(left, right, lim) + check(t, string(left), lcs, tx.lcs) + checkDiffs(t, string(left), diffs, string(right)) + diffs, lcs = Compute(right, left, lim) + check(t, string(right), lcs, tx.lcs) + checkDiffs(t, string(right), diffs, string(left)) + + left = []byte(lfill + tx.a) + right = []byte(rfill + tx.b) + diffs, lcs = Compute(left, right, lim) + check(t, string(left), lcs, tx.lcs) + checkDiffs(t, string(left), diffs, string(right)) + diffs, lcs = Compute(right, left, lim) + check(t, string(right), lcs, tx.lcs) + checkDiffs(t, string(right), diffs, string(left)) + } +} + +func TestSpecialOld(t *testing.T) { // needs lcs.fix + a := []byte("golang.org/x/tools/intern") + b := []byte("github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/intern") + diffs, lcs := Compute(a, b, 4) + if !lcs.valid() { + t.Errorf("%d,%v", len(diffs), lcs) + } +} + +func TestRegressionOld001(t *testing.T) { + a := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"golang.org/x/tools/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/lsp/diff/difftest\"\n\t\"golang.org/x/tools/internal/span\"\n)\n" + + b := "// Copyright 2019 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage diff_test\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/google/safehtml/template\"\n\t\"golang.org/x/tools/internal/lsp/diff\"\n\t\"golang.org/x/tools/internal/lsp/diff/difftest\"\n\t\"golang.org/x/tools/internal/span\"\n)\n" + for i := 1; i < len(b); i++ { + diffs, lcs := Compute([]byte(a), []byte(b), int(i)) // 14 from gopls + if !lcs.valid() { + t.Errorf("%d,%v", len(diffs), lcs) + } + checkDiffs(t, a, diffs, b) + } +} + +func TestRegressionOld002(t *testing.T) { + a := "n\"\n)\n" + b := "n\"\n\t\"golang.org/x//nnal/stack\"\n)\n" + for i := 1; i <= len(b); i++ { + diffs, lcs := Compute([]byte(a), []byte(b), int(i)) + if !lcs.valid() { + t.Errorf("%d,%v", len(diffs), lcs) + } + checkDiffs(t, a, diffs, b) + } +} + +func TestRegressionOld003(t *testing.T) { + a := "golang.org/x/hello v1.0.0\nrequire golang.org/x/unused v1" + b := "golang.org/x/hello v1" + for i := 1; i <= len(a); i++ { + diffs, lcs := Compute([]byte(a), []byte(b), int(i)) + if !lcs.valid() { + t.Errorf("%d,%v", len(diffs), lcs) + } + checkDiffs(t, a, diffs, b) + } +} + +func TestRandOld(t *testing.T) { + rand.Seed(1) + for i := 0; i < 1000; i++ { + a := []rune(randstr("abω", 16)) + b := []rune(randstr("abωc", 16)) + g := newegraph(a, b, 24) // large enough to get true lcs + two := g.twosided() + forw := g.forward() + back := g.backward() + if lcslen(two) != lcslen(forw) || lcslen(forw) != lcslen(back) { + t.Logf("\n%v\n%v\n%v", forw, back, two) + t.Fatalf("%d forw:%d back:%d two:%d", i, lcslen(forw), lcslen(back), lcslen(two)) + } + if !two.valid() || !forw.valid() || !back.valid() { + t.Errorf("check failure") + } + } +} + +func BenchmarkTwoOld(b *testing.B) { + tests := genBench("abc", 96) + for i := 0; i < b.N; i++ { + for _, tt := range tests { + _, two := Compute([]byte(tt.before), []byte(tt.after), 100) + if !two.valid() { + b.Error("check failed") + } + } + } +} + +func BenchmarkForwOld(b *testing.B) { + tests := genBench("abc", 96) + for i := 0; i < b.N; i++ { + for _, tt := range tests { + _, two := Compute([]byte(tt.before), []byte(tt.after), 100) + if !two.valid() { + b.Error("check failed") + } + } + } +} + +func genBench(set string, n int) []struct{ before, after string } { + // before and after for benchmarks. 24 strings of length n with + // before and after differing at least once, and about 5% + rand.Seed(3) + var ans []struct{ before, after string } + for i := 0; i < 24; i++ { + // maybe b should have an approximately known number of diffs + a := randstr(set, n) + cnt := 0 + bb := make([]rune, 0, n) + for _, r := range a { + if rand.Float64() < .05 { + cnt++ + r = 'N' + } + bb = append(bb, r) + } + if cnt == 0 { + // avoid == shortcut + bb[n/2] = 'N' + } + ans = append(ans, struct{ before, after string }{a, string(bb)}) + } + return ans +} diff --git a/internal/lsp/diff/ndiff.go b/internal/lsp/diff/ndiff.go new file mode 100644 index 00000000000..8f7732dd019 --- /dev/null +++ b/internal/lsp/diff/ndiff.go @@ -0,0 +1,130 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "strings" + "unicode/utf8" + + "golang.org/x/tools/internal/lsp/diff/lcs" + "golang.org/x/tools/internal/span" +) + +// maxDiffs is a limit on how deeply the lcs algorithm should search +// the value is just a guess +const maxDiffs = 30 + +// NComputeEdits computes TextEdits for strings +// (both it and the diff in the myers package have type ComputeEdits, which +// is why the arguments are strings, not []bytes.) +func NComputeEdits(uri span.URI, before, after string) ([]TextEdit, error) { + if before == after { + // very frequently true + return nil, nil + } + // the diffs returned by the lcs package use indexes into whatever slice + // was passed in. TextEdits need a span.Span which is computed with + // byte offsets, so rune or line offsets need to be converted. + if needrunes(before) || needrunes(after) { + diffs, _ := lcs.Compute([]rune(before), []rune(after), maxDiffs/2) + diffs = runeOffsets(diffs, []rune(before)) + ans, err := convertDiffs(uri, diffs, []byte(before)) + return ans, err + } else { + diffs, _ := lcs.Compute([]byte(before), []byte(after), maxDiffs/2) + ans, err := convertDiffs(uri, diffs, []byte(before)) + return ans, err + } +} + +// NComputeLineEdits computes TextEdits for []strings +func NComputeLineEdits(uri span.URI, before, after []string) ([]TextEdit, error) { + diffs, _ := lcs.Compute(before, after, maxDiffs/2) + diffs = lineOffsets(diffs, before) + ans, err := convertDiffs(uri, diffs, []byte(strJoin(before))) + // the code is not coping with possible missing \ns at the ends + return ans, err +} + +// convert diffs with byte offsets into diffs with line and column +func convertDiffs(uri span.URI, diffs []lcs.Diff, src []byte) ([]TextEdit, error) { + ans := make([]TextEdit, len(diffs)) + tf := span.NewTokenFile(uri.Filename(), src) + for i, d := range diffs { + s := newSpan(uri, d.Start, d.End) + s, err := s.WithPosition(tf) + if err != nil { + return nil, err + } + ans[i] = TextEdit{s, d.Text} + } + return ans, nil +} + +// convert diffs with rune offsets into diffs with byte offsets +func runeOffsets(diffs []lcs.Diff, src []rune) []lcs.Diff { + var idx int + var tmp strings.Builder // string because []byte([]rune) is illegal + for i, d := range diffs { + tmp.WriteString(string(src[idx:d.Start])) + v := tmp.Len() + tmp.WriteString(string(src[d.Start:d.End])) + d.Start = v + idx = d.End + d.End = tmp.Len() + diffs[i] = d + } + return diffs +} + +// convert diffs with line offsets into diffs with byte offsets +func lineOffsets(diffs []lcs.Diff, src []string) []lcs.Diff { + var idx int + var tmp strings.Builder // bytes/ + for i, d := range diffs { + tmp.WriteString(strJoin(src[idx:d.Start])) + v := tmp.Len() + tmp.WriteString(strJoin(src[d.Start:d.End])) + d.Start = v + idx = d.End + d.End = tmp.Len() + diffs[i] = d + } + return diffs +} + +// join lines. (strings.Join doesn't add a trailing separator) +func strJoin(elems []string) string { + if len(elems) == 0 { + return "" + } + n := 0 + for i := 0; i < len(elems); i++ { + n += len(elems[i]) + } + + var b strings.Builder + b.Grow(n) + for _, s := range elems { + b.WriteString(s) + //b.WriteByte('\n') + } + return b.String() +} + +func newSpan(uri span.URI, left, right int) span.Span { + return span.New(uri, span.NewPoint(0, 0, left), span.NewPoint(0, 0, right)) +} + +// need runes is true if the string needs to be converted to []rune +// for random access +func needrunes(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return true + } + } + return false +} diff --git a/internal/lsp/source/options.go b/internal/lsp/source/options.go index c386eee45e7..8fa78ae2610 100644 --- a/internal/lsp/source/options.go +++ b/internal/lsp/source/options.go @@ -477,7 +477,7 @@ type Hooks struct { // LicensesText holds third party licenses for software used by gopls. LicensesText string - // TODO(rfindley): is this even necessary? + // GoDiff is used in gopls/hooks to get Myers' diff GoDiff bool // Whether staticcheck is supported. @@ -562,6 +562,13 @@ type InternalOptions struct { // on the server. // This option applies only during initialization. ShowBugReports bool + + // NewDiff controls the choice of the new diff implementation. + // It can be 'new', 'checked', or 'old' which is the default. + // 'checked' computes diffs with both algorithms, checks + // that the new algorithm has worked, and write some summary + // statistics to a file in os.TmpDir() + NewDiff string } type ImportShortcut string @@ -1059,6 +1066,9 @@ func (o *Options) set(name string, value interface{}, seen map[string]struct{}) // This setting should be handled before all of the other options are // processed, so do nothing here. + case "newDiff": + result.setString(&o.NewDiff) + // Replaced settings. case "experimentalDisabledAnalyses": result.deprecated("analyses") diff --git a/internal/lsp/tests/tests.go b/internal/lsp/tests/tests.go index ec804e5e79e..b60fbf03866 100644 --- a/internal/lsp/tests/tests.go +++ b/internal/lsp/tests/tests.go @@ -265,6 +265,7 @@ func DefaultOptions(o *source.Options) { o.HierarchicalDocumentSymbolSupport = true o.ExperimentalWorkspaceModule = true o.SemanticTokens = true + o.InternalOptions.NewDiff = "both" } func RunTests(t *testing.T, dataDir string, includeMultiModule bool, f func(*testing.T, *Data)) { From 1cfe623ebfa0ec9b232510b6b504dce8a0942a9a Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Fri, 22 Jul 2022 11:40:02 -0400 Subject: [PATCH 122/136] gopls/internal/regtest: unskip TestQuickFixEmptyFiles This test ran slowly sometimes. Now that we have improved performance, elimininated arbitrary timeouts, and improved cacheability of computed results when running with -short, I suspect this test should no longer flake. If it does, we can reduce its cost in other ways rather than turning it off entirely. Updates golang/go#48773 Updates golang/go#53878 Change-Id: I878e78117df5a1a25f4ac5f72e02f28fc078ec73 Reviewed-on: https://go-review.googlesource.com/c/tools/+/419106 Run-TryBot: Robert Findley Reviewed-by: Bryan Mills gopls-CI: kokoro TryBot-Result: Gopher Robot --- gopls/internal/regtest/diagnostics/diagnostics_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/gopls/internal/regtest/diagnostics/diagnostics_test.go b/gopls/internal/regtest/diagnostics/diagnostics_test.go index ae8b4a56cdd..d404b65f4de 100644 --- a/gopls/internal/regtest/diagnostics/diagnostics_test.go +++ b/gopls/internal/regtest/diagnostics/diagnostics_test.go @@ -964,8 +964,6 @@ const C = a.A // This is a copy of the scenario_default/quickfix_empty_files.txt test from // govim. Reproduces golang/go#39646. func TestQuickFixEmptyFiles(t *testing.T) { - t.Skip("too flaky: golang/go#48773") - testenv.NeedsGo1Point(t, 15) const mod = ` From 178fdf98da63010b55ff90c04614e0ca9a69a2cd Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Thu, 21 Jul 2022 22:47:23 -0400 Subject: [PATCH 123/136] gopls/internal/regtest: unskip Test_Issue38211 This test was originally skipped due to deadline exceeded errors. In the time since, we've made performance improvements, fixed races, and altered the regtests to remove arbitrary deadlines. Unskip it to see if it still flakes. For golang/go#44098 For golang/go#53878 Change-Id: I06530f2bc9c6883f415dc9147cfcbf260abb2a00 Reviewed-on: https://go-review.googlesource.com/c/tools/+/418898 Run-TryBot: Robert Findley gopls-CI: kokoro TryBot-Result: Gopher Robot Reviewed-by: Bryan Mills --- gopls/internal/regtest/diagnostics/diagnostics_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/gopls/internal/regtest/diagnostics/diagnostics_test.go b/gopls/internal/regtest/diagnostics/diagnostics_test.go index d404b65f4de..9c9ad368cf3 100644 --- a/gopls/internal/regtest/diagnostics/diagnostics_test.go +++ b/gopls/internal/regtest/diagnostics/diagnostics_test.go @@ -636,8 +636,6 @@ var ErrHelpWanted error // Test for golang/go#38211. func Test_Issue38211(t *testing.T) { - t.Skipf("Skipping flaky test: https://golang.org/issue/44098") - testenv.NeedsGo1Point(t, 14) const ardanLabs = ` -- go.mod -- From 6ec939a616607e6443b9037c89557109eedbddfd Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Fri, 22 Jul 2022 13:51:59 -0400 Subject: [PATCH 124/136] internal/span: fix incorrect bounds check in ToOffset token.File.LineStart panics if line < 1, but we were checking line < 0. Surprising that this was not hit more often: it looks like we pre-validate input except for parsed positions coming from the Go command. It is possible that an older version of the Go command returned invalid positions. Also report a bug for one error condition in ToOffset: the position returned by LineStart should always be valid. Fixes golang/go#54006 Change-Id: I5965af9c62976b3e00b023512df334a8de943a3d Reviewed-on: https://go-review.googlesource.com/c/tools/+/419109 TryBot-Result: Gopher Robot Run-TryBot: Robert Findley Reviewed-by: Hyang-Ah Hana Kim gopls-CI: kokoro --- internal/span/token.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/internal/span/token.go b/internal/span/token.go index cae696db757..c35a512c158 100644 --- a/internal/span/token.go +++ b/internal/span/token.go @@ -176,15 +176,16 @@ func ToPosition(tf *token.File, offset int) (int, int, error) { return line, col, err } -// ToOffset converts a 1-base line and utf-8 column index into a byte offset in -// the file corresponding to tf. +// ToOffset converts a 1-based line and utf-8 column index into a byte offset +// in the file corresponding to tf. func ToOffset(tf *token.File, line, col int) (int, error) { - if line < 0 { - return -1, fmt.Errorf("line is not valid") + if line < 1 { // token.File.LineStart panics if line < 1 + return -1, fmt.Errorf("invalid line: %d", line) } + lineMax := tf.LineCount() + 1 if line > lineMax { - return -1, fmt.Errorf("line is beyond end of file %v", lineMax) + return -1, fmt.Errorf("line %d is beyond end of file %v", line, lineMax) } else if line == lineMax { if col > 1 { return -1, fmt.Errorf("column is beyond end of file") @@ -194,7 +195,9 @@ func ToOffset(tf *token.File, line, col int) (int, error) { } pos := tf.LineStart(line) if !pos.IsValid() { - return -1, fmt.Errorf("line is not in file") + // bug.Errorf here because LineStart panics on out-of-bound input, and so + // should never return invalid positions. + return -1, bug.Errorf("line is not in file") } // we assume that column is in bytes here, and that the first byte of a // line is at column 1 From 04bd0878179c06f3950f7440743a458185f2f961 Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Thu, 30 Jun 2022 16:45:30 -0400 Subject: [PATCH 125/136] internal/lsp: enable fillstruct for generics This enables some fill struct code actions for instances of structs with type parameters. This additionally adds a filtering mechanism to the suggested fixes in order to account for multiple suggested fixes in the same location. Change-Id: I98866b462b026f4c5a4897bc278f704381623f25 Reviewed-on: https://go-review.googlesource.com/c/tools/+/418415 Reviewed-by: Robert Findley gopls-CI: kokoro TryBot-Result: Gopher Robot Run-TryBot: Suzy Mueller --- .../lsp/analysis/fillstruct/fillstruct.go | 8 - .../testdata/src/typeparams/typeparams.go | 12 +- internal/lsp/cmd/test/suggested_fix.go | 8 +- internal/lsp/lsp_test.go | 16 +- internal/lsp/source/source_test.go | 14 +- .../extract_variable/extract_basic_lit.go | 4 +- .../extract_basic_lit.go.golden | 8 +- .../extract_variable/extract_func_call.go | 4 +- .../extract_func_call.go.golden | 12 +- .../extract/extract_variable/extract_scope.go | 4 +- .../extract_variable/extract_scope.go.golden | 8 +- internal/lsp/testdata/fillstruct/a.go | 8 +- internal/lsp/testdata/fillstruct/a.go.golden | 32 +- internal/lsp/testdata/fillstruct/a2.go | 8 +- internal/lsp/testdata/fillstruct/a2.go.golden | 32 +- internal/lsp/testdata/fillstruct/a3.go | 10 +- internal/lsp/testdata/fillstruct/a3.go.golden | 50 +-- internal/lsp/testdata/fillstruct/a4.go | 8 +- internal/lsp/testdata/fillstruct/a4.go.golden | 32 +- .../lsp/testdata/fillstruct/fill_struct.go | 8 +- .../testdata/fillstruct/fill_struct.go.golden | 32 +- .../testdata/fillstruct/fill_struct_anon.go | 2 +- .../fillstruct/fill_struct_anon.go.golden | 2 +- .../testdata/fillstruct/fill_struct_nested.go | 2 +- .../fillstruct/fill_struct_nested.go.golden | 2 +- .../fillstruct/fill_struct_package.go | 4 +- .../fillstruct/fill_struct_package.go.golden | 8 +- .../fillstruct/fill_struct_partial.go | 4 +- .../fillstruct/fill_struct_partial.go.golden | 8 +- .../testdata/fillstruct/fill_struct_spaces.go | 2 +- .../fillstruct/fill_struct_spaces.go.golden | 2 +- .../testdata/fillstruct/fill_struct_unsafe.go | 2 +- .../fillstruct/fill_struct_unsafe.go.golden | 2 +- .../lsp/testdata/fillstruct/typeparams.go | 38 ++ .../testdata/fillstruct/typeparams.go.golden | 328 ++++++++++++++++++ .../lsp/testdata/missingfunction/channels.go | 2 +- .../missingfunction/channels.go.golden | 2 +- .../missingfunction/consecutive_params.go | 2 +- .../consecutive_params.go.golden | 2 +- .../testdata/missingfunction/error_param.go | 2 +- .../missingfunction/error_param.go.golden | 2 +- .../lsp/testdata/missingfunction/literals.go | 2 +- .../missingfunction/literals.go.golden | 4 +- .../lsp/testdata/missingfunction/operation.go | 2 +- .../missingfunction/operation.go.golden | 4 +- .../lsp/testdata/missingfunction/selector.go | 2 +- .../missingfunction/selector.go.golden | 2 +- .../lsp/testdata/missingfunction/slice.go | 2 +- .../testdata/missingfunction/slice.go.golden | 2 +- .../lsp/testdata/missingfunction/tuple.go | 2 +- .../testdata/missingfunction/tuple.go.golden | 2 +- .../testdata/missingfunction/unique_params.go | 2 +- .../missingfunction/unique_params.go.golden | 4 +- .../lsp/testdata/stub/stub_add_selector.go | 2 +- .../testdata/stub/stub_add_selector.go.golden | 2 +- internal/lsp/testdata/stub/stub_assign.go | 2 +- .../lsp/testdata/stub/stub_assign.go.golden | 2 +- .../testdata/stub/stub_assign_multivars.go | 2 +- .../stub/stub_assign_multivars.go.golden | 2 +- internal/lsp/testdata/stub/stub_call_expr.go | 2 +- .../testdata/stub/stub_call_expr.go.golden | 2 +- internal/lsp/testdata/stub/stub_embedded.go | 2 +- .../lsp/testdata/stub/stub_embedded.go.golden | 2 +- internal/lsp/testdata/stub/stub_err.go | 2 +- internal/lsp/testdata/stub/stub_err.go.golden | 2 +- .../lsp/testdata/stub/stub_function_return.go | 2 +- .../stub/stub_function_return.go.golden | 2 +- .../testdata/stub/stub_generic_receiver.go | 2 +- .../stub/stub_generic_receiver.go.golden | 2 +- .../lsp/testdata/stub/stub_ignored_imports.go | 2 +- .../stub/stub_ignored_imports.go.golden | 2 +- internal/lsp/testdata/stub/stub_multi_var.go | 2 +- .../testdata/stub/stub_multi_var.go.golden | 2 +- internal/lsp/testdata/stub/stub_pointer.go | 2 +- .../lsp/testdata/stub/stub_pointer.go.golden | 2 +- .../lsp/testdata/stub/stub_renamed_import.go | 2 +- .../stub/stub_renamed_import.go.golden | 2 +- .../stub/stub_renamed_import_iface.go | 2 +- .../stub/stub_renamed_import_iface.go.golden | 2 +- internal/lsp/testdata/stub/stub_stdlib.go | 2 +- .../lsp/testdata/stub/stub_stdlib.go.golden | 2 +- .../suggestedfix/has_suggested_fix.go | 2 +- .../suggestedfix/has_suggested_fix.go.golden | 2 +- .../lsp/testdata/summary_go1.18.txt.golden | 2 +- .../lsp/testdata/typeerrors/noresultvalues.go | 4 +- .../typeerrors/noresultvalues.go.golden | 8 +- internal/lsp/testdata/undeclared/var.go | 6 +- .../lsp/testdata/undeclared/var.go.golden | 18 +- internal/lsp/tests/README.md | 2 +- internal/lsp/tests/tests.go | 15 +- 90 files changed, 624 insertions(+), 257 deletions(-) create mode 100644 internal/lsp/testdata/fillstruct/typeparams.go create mode 100644 internal/lsp/testdata/fillstruct/typeparams.go.golden diff --git a/internal/lsp/analysis/fillstruct/fillstruct.go b/internal/lsp/analysis/fillstruct/fillstruct.go index 2103a55879e..2c0084ff6f5 100644 --- a/internal/lsp/analysis/fillstruct/fillstruct.go +++ b/internal/lsp/analysis/fillstruct/fillstruct.go @@ -68,14 +68,6 @@ func run(pass *analysis.Pass) (interface{}, error) { return } - // Ignore types that have type parameters for now. - // TODO: support type params. - if typ, ok := typ.(*types.Named); ok { - if tparams := typeparams.ForNamed(typ); tparams != nil && tparams.Len() > 0 { - return - } - } - // Find reference to the type declaration of the struct being initialized. for { p, ok := typ.Underlying().(*types.Pointer) diff --git a/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go b/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go index 90290613d87..7972bd3e12b 100644 --- a/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go +++ b/internal/lsp/analysis/fillstruct/testdata/src/typeparams/typeparams.go @@ -12,18 +12,16 @@ type basicStruct[T any] struct { foo T } -var _ = basicStruct[int]{} - -type fooType[T any] T +var _ = basicStruct[int]{} // want "" type twoArgStruct[F, B any] struct { - foo fooType[F] - bar fooType[B] + foo F + bar B } -var _ = twoArgStruct[string, int]{} +var _ = twoArgStruct[string, int]{} // want "" -var _ = twoArgStruct[int, string]{ +var _ = twoArgStruct[int, string]{ // want "" bar: "bar", } diff --git a/internal/lsp/cmd/test/suggested_fix.go b/internal/lsp/cmd/test/suggested_fix.go index c819e051735..db401350fb1 100644 --- a/internal/lsp/cmd/test/suggested_fix.go +++ b/internal/lsp/cmd/test/suggested_fix.go @@ -12,14 +12,16 @@ import ( "golang.org/x/tools/internal/span" ) -func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) { +func (r *runner) SuggestedFix(t *testing.T, spn span.Span, suggestedFixes []tests.SuggestedFix, expectedActions int) { uri := spn.URI() filename := uri.Filename() args := []string{"fix", "-a", fmt.Sprintf("%s", spn)} - for _, kind := range actionKinds { - if kind == "refactor.rewrite" { + var actionKinds []string + for _, sf := range suggestedFixes { + if sf.ActionKind == "refactor.rewrite" { t.Skip("refactor.rewrite is not yet supported on the command line") } + actionKinds = append(actionKinds, sf.ActionKind) } args = append(args, actionKinds...) got, stderr := r.NormalizeGoplsCmd(t, args...) diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go index e8febec93f3..8e0e628dfea 100644 --- a/internal/lsp/lsp_test.go +++ b/internal/lsp/lsp_test.go @@ -487,7 +487,7 @@ func (r *runner) Import(t *testing.T, spn span.Span) { } } -func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) { +func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []tests.SuggestedFix, expectedActions int) { uri := spn.URI() view, err := r.server.session.ViewOf(uri) if err != nil { @@ -516,9 +516,9 @@ func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, } codeActionKinds := []protocol.CodeActionKind{} for _, k := range actionKinds { - codeActionKinds = append(codeActionKinds, protocol.CodeActionKind(k)) + codeActionKinds = append(codeActionKinds, protocol.CodeActionKind(k.ActionKind)) } - actions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{ + allActions, err := r.server.CodeAction(r.ctx, &protocol.CodeActionParams{ TextDocument: protocol.TextDocumentIdentifier{ URI: protocol.URIFromSpanURI(uri), }, @@ -531,6 +531,16 @@ func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, if err != nil { t.Fatalf("CodeAction %s failed: %v", spn, err) } + var actions []protocol.CodeAction + for _, action := range allActions { + for _, fix := range actionKinds { + if strings.Contains(action.Title, fix.Title) { + actions = append(actions, action) + break + } + } + + } if len(actions) != expectedActions { // Hack: We assume that we only get one code action per range. var cmds []string diff --git a/internal/lsp/source/source_test.go b/internal/lsp/source/source_test.go index 8beb8a5dde0..c670bdeb0b2 100644 --- a/internal/lsp/source/source_test.go +++ b/internal/lsp/source/source_test.go @@ -968,14 +968,12 @@ func (r *runner) SignatureHelp(t *testing.T, spn span.Span, want *protocol.Signa } // These are pure LSP features, no source level functionality to be tested. -func (r *runner) Link(t *testing.T, uri span.URI, wantLinks []tests.Link) {} - -func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string, expectedActions int) { -} -func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span) {} -func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span) {} -func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens) {} -func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string) {} +func (r *runner) Link(t *testing.T, uri span.URI, wantLinks []tests.Link) {} +func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actions []tests.SuggestedFix, want int) {} +func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span) {} +func (r *runner) MethodExtraction(t *testing.T, start span.Span, end span.Span) {} +func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens) {} +func (r *runner) AddImport(t *testing.T, uri span.URI, expectedImport string) {} func spanToRange(data *tests.Data, spn span.Span) (*protocol.ColumnMapper, protocol.Range, error) { m, err := data.Mapper(spn.URI()) diff --git a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go index c49e5d6a017..cbb70a04cd1 100644 --- a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go +++ b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go @@ -1,6 +1,6 @@ package extract func _() { - var _ = 1 + 2 //@suggestedfix("1", "refactor.extract") - var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract") + var _ = 1 + 2 //@suggestedfix("1", "refactor.extract", "") + var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract", "") } diff --git a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden index 00ee7b4f94d..3fd9b328711 100644 --- a/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden +++ b/internal/lsp/testdata/extract/extract_variable/extract_basic_lit.go.golden @@ -3,16 +3,16 @@ package extract func _() { x := 1 - var _ = x + 2 //@suggestedfix("1", "refactor.extract") - var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract") + var _ = x + 2 //@suggestedfix("1", "refactor.extract", "") + var _ = 3 + 4 //@suggestedfix("3 + 4", "refactor.extract", "") } -- suggestedfix_extract_basic_lit_5_10 -- package extract func _() { - var _ = 1 + 2 //@suggestedfix("1", "refactor.extract") + var _ = 1 + 2 //@suggestedfix("1", "refactor.extract", "") x := 3 + 4 - var _ = x //@suggestedfix("3 + 4", "refactor.extract") + var _ = x //@suggestedfix("3 + 4", "refactor.extract", "") } diff --git a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go index badc010dce4..a20b45f5869 100644 --- a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go +++ b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go @@ -3,7 +3,7 @@ package extract import "strconv" func _() { - x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract") + x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract", "") str := "1" - b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract") + b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "") } diff --git a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden index 74df67ee65f..4423fc92770 100644 --- a/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden +++ b/internal/lsp/testdata/extract/extract_variable/extract_func_call.go.golden @@ -5,9 +5,9 @@ import "strconv" func _() { x0 := append([]int{}, 1) - a := x0 //@suggestedfix("append([]int{}, 1)", "refactor.extract") + a := x0 //@suggestedfix("append([]int{}, 1)", "refactor.extract", "") str := "1" - b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract") + b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "") } -- suggestedfix_extract_func_call_6_8 -- @@ -17,9 +17,9 @@ import "strconv" func _() { x := append([]int{}, 1) - x0 := x //@suggestedfix("append([]int{}, 1)", "refactor.extract") + x0 := x //@suggestedfix("append([]int{}, 1)", "refactor.extract", "") str := "1" - b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract") + b, err := strconv.Atoi(str) //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "") } -- suggestedfix_extract_func_call_8_12 -- @@ -28,9 +28,9 @@ package extract import "strconv" func _() { - x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract") + x0 := append([]int{}, 1) //@suggestedfix("append([]int{}, 1)", "refactor.extract", "") str := "1" x, x1 := strconv.Atoi(str) - b, err := x, x1 //@suggestedfix("strconv.Atoi(str)", "refactor.extract") + b, err := x, x1 //@suggestedfix("strconv.Atoi(str)", "refactor.extract", "") } diff --git a/internal/lsp/testdata/extract/extract_variable/extract_scope.go b/internal/lsp/testdata/extract/extract_variable/extract_scope.go index 5dfcc36203b..c14ad709212 100644 --- a/internal/lsp/testdata/extract/extract_variable/extract_scope.go +++ b/internal/lsp/testdata/extract/extract_variable/extract_scope.go @@ -5,9 +5,9 @@ import "go/ast" func _() { x0 := 0 if true { - y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract") + y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract", "") } if true { - x1 := !false //@suggestedfix("!false", "refactor.extract") + x1 := !false //@suggestedfix("!false", "refactor.extract", "") } } diff --git a/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden b/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden index e0e6464b59a..1c2f64b7df7 100644 --- a/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden +++ b/internal/lsp/testdata/extract/extract_variable/extract_scope.go.golden @@ -6,11 +6,11 @@ import "go/ast" func _() { x0 := 0 if true { - y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract") + y := ast.CompositeLit{} //@suggestedfix("ast.CompositeLit{}", "refactor.extract", "") } if true { x := !false - x1 := x //@suggestedfix("!false", "refactor.extract") + x1 := x //@suggestedfix("!false", "refactor.extract", "") } } @@ -23,10 +23,10 @@ func _() { x0 := 0 if true { x := ast.CompositeLit{} - y := x //@suggestedfix("ast.CompositeLit{}", "refactor.extract") + y := x //@suggestedfix("ast.CompositeLit{}", "refactor.extract", "") } if true { - x1 := !false //@suggestedfix("!false", "refactor.extract") + x1 := !false //@suggestedfix("!false", "refactor.extract", "") } } diff --git a/internal/lsp/testdata/fillstruct/a.go b/internal/lsp/testdata/fillstruct/a.go index 5c6df6c4a7c..4fb855d06b5 100644 --- a/internal/lsp/testdata/fillstruct/a.go +++ b/internal/lsp/testdata/fillstruct/a.go @@ -8,20 +8,20 @@ type basicStruct struct { foo int } -var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type twoArgStruct struct { foo int bar string } -var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type nestedStruct struct { bar string basic basicStruct } -var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") -var _ = data.B{} //@suggestedfix("}", "refactor.rewrite") +var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill") diff --git a/internal/lsp/testdata/fillstruct/a.go.golden b/internal/lsp/testdata/fillstruct/a.go.golden index 5d6dbceb279..76789f0fc26 100644 --- a/internal/lsp/testdata/fillstruct/a.go.golden +++ b/internal/lsp/testdata/fillstruct/a.go.golden @@ -11,23 +11,23 @@ type basicStruct struct { var _ = basicStruct{ foo: 0, -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") type twoArgStruct struct { foo int bar string } -var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type nestedStruct struct { bar string basic basicStruct } -var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") -var _ = data.B{} //@suggestedfix("}", "refactor.rewrite") +var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a_18_22 -- package fillstruct @@ -40,7 +40,7 @@ type basicStruct struct { foo int } -var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type twoArgStruct struct { foo int @@ -50,16 +50,16 @@ type twoArgStruct struct { var _ = twoArgStruct{ foo: 0, bar: "", -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") type nestedStruct struct { bar string basic basicStruct } -var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") -var _ = data.B{} //@suggestedfix("}", "refactor.rewrite") +var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a_25_22 -- package fillstruct @@ -72,14 +72,14 @@ type basicStruct struct { foo int } -var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type twoArgStruct struct { foo int bar string } -var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type nestedStruct struct { bar string @@ -89,9 +89,9 @@ type nestedStruct struct { var _ = nestedStruct{ bar: "", basic: basicStruct{}, -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") -var _ = data.B{} //@suggestedfix("}", "refactor.rewrite") +var _ = data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a_27_16 -- package fillstruct @@ -104,23 +104,23 @@ type basicStruct struct { foo int } -var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = basicStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type twoArgStruct struct { foo int bar string } -var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = twoArgStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type nestedStruct struct { bar string basic basicStruct } -var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = nestedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var _ = data.B{ ExportedInt: 0, -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") diff --git a/internal/lsp/testdata/fillstruct/a2.go b/internal/lsp/testdata/fillstruct/a2.go index 8e12a6b54ba..b5e30a84f1e 100644 --- a/internal/lsp/testdata/fillstruct/a2.go +++ b/internal/lsp/testdata/fillstruct/a2.go @@ -8,22 +8,22 @@ type typedStruct struct { a [2]string } -var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStruct struct { fn func(i int) int } -var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStructCompex struct { fn func(i int, s string) (string, int) } -var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite") +var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStructEmpty struct { fn func() } -var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite") +var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill") diff --git a/internal/lsp/testdata/fillstruct/a2.go.golden b/internal/lsp/testdata/fillstruct/a2.go.golden index 78a6ee2b691..2eca3e349a1 100644 --- a/internal/lsp/testdata/fillstruct/a2.go.golden +++ b/internal/lsp/testdata/fillstruct/a2.go.golden @@ -15,25 +15,25 @@ var _ = typedStruct{ c: make(chan int), c1: make(<-chan int), a: [2]string{}, -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStruct struct { fn func(i int) int } -var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStructCompex struct { fn func(i int, s string) (string, int) } -var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite") +var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStructEmpty struct { fn func() } -var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite") +var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a2_17_19 -- package fillstruct @@ -46,7 +46,7 @@ type typedStruct struct { a [2]string } -var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStruct struct { fn func(i int) int @@ -55,19 +55,19 @@ type funStruct struct { var _ = funStruct{ fn: func(i int) int { }, -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStructCompex struct { fn func(i int, s string) (string, int) } -var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite") +var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStructEmpty struct { fn func() } -var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite") +var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a2_23_25 -- package fillstruct @@ -80,13 +80,13 @@ type typedStruct struct { a [2]string } -var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStruct struct { fn func(i int) int } -var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStructCompex struct { fn func(i int, s string) (string, int) @@ -95,13 +95,13 @@ type funStructCompex struct { var _ = funStructCompex{ fn: func(i int, s string) (string, int) { }, -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStructEmpty struct { fn func() } -var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite") +var _ = funStructEmpty{} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a2_29_24 -- package fillstruct @@ -114,19 +114,19 @@ type typedStruct struct { a [2]string } -var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = typedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStruct struct { fn func(i int) int } -var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = funStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStructCompex struct { fn func(i int, s string) (string, int) } -var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite") +var _ = funStructCompex{} //@suggestedfix("}", "refactor.rewrite", "Fill") type funStructEmpty struct { fn func() @@ -135,5 +135,5 @@ type funStructEmpty struct { var _ = funStructEmpty{ fn: func() { }, -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") diff --git a/internal/lsp/testdata/fillstruct/a3.go b/internal/lsp/testdata/fillstruct/a3.go index 730db305423..59cd9fa28b5 100644 --- a/internal/lsp/testdata/fillstruct/a3.go +++ b/internal/lsp/testdata/fillstruct/a3.go @@ -14,7 +14,7 @@ type Bar struct { Y *Foo } -var _ = Bar{} //@suggestedfix("}", "refactor.rewrite") +var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill") type importedStruct struct { m map[*ast.CompositeLit]ast.Field @@ -25,7 +25,7 @@ type importedStruct struct { st ast.CompositeLit } -var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type pointerBuiltinStruct struct { b *bool @@ -33,10 +33,10 @@ type pointerBuiltinStruct struct { i *int } -var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var _ = []ast.BasicLit{ - {}, //@suggestedfix("}", "refactor.rewrite") + {}, //@suggestedfix("}", "refactor.rewrite", "Fill") } -var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite") +var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill") diff --git a/internal/lsp/testdata/fillstruct/a3.go.golden b/internal/lsp/testdata/fillstruct/a3.go.golden index 1d8672927d9..a7c7baa8d27 100644 --- a/internal/lsp/testdata/fillstruct/a3.go.golden +++ b/internal/lsp/testdata/fillstruct/a3.go.golden @@ -18,7 +18,7 @@ type Bar struct { var _ = Bar{ X: &Foo{}, Y: &Foo{}, -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") type importedStruct struct { m map[*ast.CompositeLit]ast.Field @@ -29,7 +29,7 @@ type importedStruct struct { st ast.CompositeLit } -var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type pointerBuiltinStruct struct { b *bool @@ -37,13 +37,13 @@ type pointerBuiltinStruct struct { i *int } -var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var _ = []ast.BasicLit{ - {}, //@suggestedfix("}", "refactor.rewrite") + {}, //@suggestedfix("}", "refactor.rewrite", "Fill") } -var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite") +var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a3_28_24 -- package fillstruct @@ -62,7 +62,7 @@ type Bar struct { Y *Foo } -var _ = Bar{} //@suggestedfix("}", "refactor.rewrite") +var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill") type importedStruct struct { m map[*ast.CompositeLit]ast.Field @@ -81,7 +81,7 @@ var _ = importedStruct{ fn: func(ast_decl ast.DeclStmt) ast.Ellipsis { }, st: ast.CompositeLit{}, -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") type pointerBuiltinStruct struct { b *bool @@ -89,13 +89,13 @@ type pointerBuiltinStruct struct { i *int } -var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var _ = []ast.BasicLit{ - {}, //@suggestedfix("}", "refactor.rewrite") + {}, //@suggestedfix("}", "refactor.rewrite", "Fill") } -var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite") +var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a3_36_30 -- package fillstruct @@ -114,7 +114,7 @@ type Bar struct { Y *Foo } -var _ = Bar{} //@suggestedfix("}", "refactor.rewrite") +var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill") type importedStruct struct { m map[*ast.CompositeLit]ast.Field @@ -125,7 +125,7 @@ type importedStruct struct { st ast.CompositeLit } -var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type pointerBuiltinStruct struct { b *bool @@ -137,13 +137,13 @@ var _ = pointerBuiltinStruct{ b: new(bool), s: new(string), i: new(int), -} //@suggestedfix("}", "refactor.rewrite") +} //@suggestedfix("}", "refactor.rewrite", "Fill") var _ = []ast.BasicLit{ - {}, //@suggestedfix("}", "refactor.rewrite") + {}, //@suggestedfix("}", "refactor.rewrite", "Fill") } -var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite") +var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a3_39_3 -- package fillstruct @@ -162,7 +162,7 @@ type Bar struct { Y *Foo } -var _ = Bar{} //@suggestedfix("}", "refactor.rewrite") +var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill") type importedStruct struct { m map[*ast.CompositeLit]ast.Field @@ -173,7 +173,7 @@ type importedStruct struct { st ast.CompositeLit } -var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type pointerBuiltinStruct struct { b *bool @@ -181,17 +181,17 @@ type pointerBuiltinStruct struct { i *int } -var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var _ = []ast.BasicLit{ { ValuePos: 0, Kind: 0, Value: "", - }, //@suggestedfix("}", "refactor.rewrite") + }, //@suggestedfix("}", "refactor.rewrite", "Fill") } -var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite") +var _ = []ast.BasicLit{{}} //@suggestedfix("}", "refactor.rewrite", "Fill") -- suggestedfix_a3_42_25 -- package fillstruct @@ -210,7 +210,7 @@ type Bar struct { Y *Foo } -var _ = Bar{} //@suggestedfix("}", "refactor.rewrite") +var _ = Bar{} //@suggestedfix("}", "refactor.rewrite", "Fill") type importedStruct struct { m map[*ast.CompositeLit]ast.Field @@ -221,7 +221,7 @@ type importedStruct struct { st ast.CompositeLit } -var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = importedStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") type pointerBuiltinStruct struct { b *bool @@ -229,15 +229,15 @@ type pointerBuiltinStruct struct { i *int } -var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite") +var _ = pointerBuiltinStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var _ = []ast.BasicLit{ - {}, //@suggestedfix("}", "refactor.rewrite") + {}, //@suggestedfix("}", "refactor.rewrite", "Fill") } var _ = []ast.BasicLit{{ ValuePos: 0, Kind: 0, Value: "", -}} //@suggestedfix("}", "refactor.rewrite") +}} //@suggestedfix("}", "refactor.rewrite", "Fill") diff --git a/internal/lsp/testdata/fillstruct/a4.go b/internal/lsp/testdata/fillstruct/a4.go index 7833d338c64..5f52a55fa72 100644 --- a/internal/lsp/testdata/fillstruct/a4.go +++ b/internal/lsp/testdata/fillstruct/a4.go @@ -22,18 +22,18 @@ type assignStruct struct { func fill() { var x int - var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite") + var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var s string - var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite") + var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var n int _ = []int{} if true { arr := []int{1, 2} } - var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite") + var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill") var node *ast.CompositeLit - var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite") + var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/internal/lsp/testdata/fillstruct/a4.go.golden b/internal/lsp/testdata/fillstruct/a4.go.golden index 109c6b5ea47..b1e376f05f1 100644 --- a/internal/lsp/testdata/fillstruct/a4.go.golden +++ b/internal/lsp/testdata/fillstruct/a4.go.golden @@ -25,20 +25,20 @@ func fill() { var x int var _ = iStruct{ X: x, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") var s string - var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite") + var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var n int _ = []int{} if true { arr := []int{1, 2} } - var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite") + var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill") var node *ast.CompositeLit - var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite") + var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") } -- suggestedfix_a4_28_18 -- @@ -66,22 +66,22 @@ type assignStruct struct { func fill() { var x int - var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite") + var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var s string var _ = sStruct{ str: s, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") var n int _ = []int{} if true { arr := []int{1, 2} } - var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite") + var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill") var node *ast.CompositeLit - var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite") + var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") } -- suggestedfix_a4_35_20 -- @@ -109,10 +109,10 @@ type assignStruct struct { func fill() { var x int - var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite") + var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var s string - var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite") + var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var n int _ = []int{} @@ -123,10 +123,10 @@ func fill() { num: n, strin: s, arr: []int{}, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") var node *ast.CompositeLit - var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite") + var _ = assignStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") } -- suggestedfix_a4_38_23 -- @@ -154,21 +154,21 @@ type assignStruct struct { func fill() { var x int - var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite") + var _ = iStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var s string - var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite") + var _ = sStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") var n int _ = []int{} if true { arr := []int{1, 2} } - var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite") + var _ = multiFill{} //@suggestedfix("}", "refactor.rewrite", "Fill") var node *ast.CompositeLit var _ = assignStruct{ n: node, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/internal/lsp/testdata/fillstruct/fill_struct.go b/internal/lsp/testdata/fillstruct/fill_struct.go index fccec135321..3da904741d0 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct.go +++ b/internal/lsp/testdata/fillstruct/fill_struct.go @@ -17,10 +17,10 @@ type StructA3 struct { } func fill() { - a := StructA{} //@suggestedfix("}", "refactor.rewrite") - b := StructA2{} //@suggestedfix("}", "refactor.rewrite") - c := StructA3{} //@suggestedfix("}", "refactor.rewrite") + a := StructA{} //@suggestedfix("}", "refactor.rewrite", "Fill") + b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill") + c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") if true { - _ = StructA3{} //@suggestedfix("}", "refactor.rewrite") + _ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") } } diff --git a/internal/lsp/testdata/fillstruct/fill_struct.go.golden b/internal/lsp/testdata/fillstruct/fill_struct.go.golden index 8d997031516..de01a40f052 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct.go.golden +++ b/internal/lsp/testdata/fillstruct/fill_struct.go.golden @@ -24,11 +24,11 @@ func fill() { MapA: map[int]string{}, Array: []int{}, StructB: StructB{}, - } //@suggestedfix("}", "refactor.rewrite") - b := StructA2{} //@suggestedfix("}", "refactor.rewrite") - c := StructA3{} //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") + b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill") + c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") if true { - _ = StructA3{} //@suggestedfix("}", "refactor.rewrite") + _ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") } } @@ -52,13 +52,13 @@ type StructA3 struct { } func fill() { - a := StructA{} //@suggestedfix("}", "refactor.rewrite") + a := StructA{} //@suggestedfix("}", "refactor.rewrite", "Fill") b := StructA2{ B: &StructB{}, - } //@suggestedfix("}", "refactor.rewrite") - c := StructA3{} //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") + c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") if true { - _ = StructA3{} //@suggestedfix("}", "refactor.rewrite") + _ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") } } @@ -82,13 +82,13 @@ type StructA3 struct { } func fill() { - a := StructA{} //@suggestedfix("}", "refactor.rewrite") - b := StructA2{} //@suggestedfix("}", "refactor.rewrite") + a := StructA{} //@suggestedfix("}", "refactor.rewrite", "Fill") + b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill") c := StructA3{ B: StructB{}, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") if true { - _ = StructA3{} //@suggestedfix("}", "refactor.rewrite") + _ = StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") } } @@ -112,13 +112,13 @@ type StructA3 struct { } func fill() { - a := StructA{} //@suggestedfix("}", "refactor.rewrite") - b := StructA2{} //@suggestedfix("}", "refactor.rewrite") - c := StructA3{} //@suggestedfix("}", "refactor.rewrite") + a := StructA{} //@suggestedfix("}", "refactor.rewrite", "Fill") + b := StructA2{} //@suggestedfix("}", "refactor.rewrite", "Fill") + c := StructA3{} //@suggestedfix("}", "refactor.rewrite", "Fill") if true { _ = StructA3{ B: StructB{}, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } } diff --git a/internal/lsp/testdata/fillstruct/fill_struct_anon.go b/internal/lsp/testdata/fillstruct/fill_struct_anon.go index b5d2337fd9d..2c099a80ea7 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_anon.go +++ b/internal/lsp/testdata/fillstruct/fill_struct_anon.go @@ -10,5 +10,5 @@ type StructAnon struct { } func fill() { - _ := StructAnon{} //@suggestedfix("}", "refactor.rewrite") + _ := StructAnon{} //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden index eb6ffd66136..7cc9ac23d02 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden +++ b/internal/lsp/testdata/fillstruct/fill_struct_anon.go.golden @@ -15,6 +15,6 @@ func fill() { a: struct{}{}, b: map[string]interface{}{}, c: map[string]struct{d int; e bool}{}, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/internal/lsp/testdata/fillstruct/fill_struct_nested.go b/internal/lsp/testdata/fillstruct/fill_struct_nested.go index 79eb84b7478..ab7be5a7b58 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_nested.go +++ b/internal/lsp/testdata/fillstruct/fill_struct_nested.go @@ -10,6 +10,6 @@ type StructC struct { func nested() { c := StructB{ - StructC: StructC{}, //@suggestedfix("}", "refactor.rewrite") + StructC: StructC{}, //@suggestedfix("}", "refactor.rewrite", "Fill") } } diff --git a/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden index 30061a5d72a..c902ee7f12b 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden +++ b/internal/lsp/testdata/fillstruct/fill_struct_nested.go.golden @@ -13,7 +13,7 @@ func nested() { c := StructB{ StructC: StructC{ unexportedInt: 0, - }, //@suggestedfix("}", "refactor.rewrite") + }, //@suggestedfix("}", "refactor.rewrite", "Fill") } } diff --git a/internal/lsp/testdata/fillstruct/fill_struct_package.go b/internal/lsp/testdata/fillstruct/fill_struct_package.go index 71f124858b3..edb88c48675 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_package.go +++ b/internal/lsp/testdata/fillstruct/fill_struct_package.go @@ -7,6 +7,6 @@ import ( ) func unexported() { - a := data.B{} //@suggestedfix("}", "refactor.rewrite") - _ = h2.Client{} //@suggestedfix("}", "refactor.rewrite") + a := data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill") + _ = h2.Client{} //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden index 13c85702527..57b261329dc 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden +++ b/internal/lsp/testdata/fillstruct/fill_struct_package.go.golden @@ -10,8 +10,8 @@ import ( func unexported() { a := data.B{ ExportedInt: 0, - } //@suggestedfix("}", "refactor.rewrite") - _ = h2.Client{} //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") + _ = h2.Client{} //@suggestedfix("}", "refactor.rewrite", "Fill") } -- suggestedfix_fill_struct_package_11_16 -- @@ -24,13 +24,13 @@ import ( ) func unexported() { - a := data.B{} //@suggestedfix("}", "refactor.rewrite") + a := data.B{} //@suggestedfix("}", "refactor.rewrite", "Fill") _ = h2.Client{ Transport: nil, CheckRedirect: func(req *h2.Request, via []*h2.Request) error { }, Jar: nil, Timeout: 0, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/internal/lsp/testdata/fillstruct/fill_struct_partial.go b/internal/lsp/testdata/fillstruct/fill_struct_partial.go index 97b517dcdc3..5de1722c783 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_partial.go +++ b/internal/lsp/testdata/fillstruct/fill_struct_partial.go @@ -14,11 +14,11 @@ type StructPartialB struct { func fill() { a := StructPartialA{ PrefilledInt: 5, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") b := StructPartialB{ /* this comment should disappear */ PrefilledInt: 7, // This comment should be blown away. /* As should this one */ - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden index 2d063c14d39..3aa437a0334 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden +++ b/internal/lsp/testdata/fillstruct/fill_struct_partial.go.golden @@ -17,13 +17,13 @@ func fill() { PrefilledInt: 5, UnfilledInt: 0, StructPartialB: StructPartialB{}, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") b := StructPartialB{ /* this comment should disappear */ PrefilledInt: 7, // This comment should be blown away. /* As should this one */ - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } -- suggestedfix_fill_struct_partial_23_2 -- @@ -43,10 +43,10 @@ type StructPartialB struct { func fill() { a := StructPartialA{ PrefilledInt: 5, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") b := StructPartialB{ PrefilledInt: 7, UnfilledInt: 0, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go b/internal/lsp/testdata/fillstruct/fill_struct_spaces.go index d5d1bbba5c3..6a468cd544c 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go +++ b/internal/lsp/testdata/fillstruct/fill_struct_spaces.go @@ -5,5 +5,5 @@ type StructD struct { } func spaces() { - d := StructD{} //@suggestedfix("}", "refactor.rewrite") + d := StructD{} //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden index 0d755334c99..590c91611d0 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden +++ b/internal/lsp/testdata/fillstruct/fill_struct_spaces.go.golden @@ -8,6 +8,6 @@ type StructD struct { func spaces() { d := StructD{ ExportedIntField: 0, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go b/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go index 50877e9005c..f5e42a4f2fe 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go +++ b/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go @@ -8,5 +8,5 @@ type unsafeStruct struct { } func fill() { - _ := unsafeStruct{} //@suggestedfix("}", "refactor.rewrite") + _ := unsafeStruct{} //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden b/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden index 99369544373..7e8e1952f86 100644 --- a/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden +++ b/internal/lsp/testdata/fillstruct/fill_struct_unsafe.go.golden @@ -12,6 +12,6 @@ func fill() { _ := unsafeStruct{ x: 0, p: nil, - } //@suggestedfix("}", "refactor.rewrite") + } //@suggestedfix("}", "refactor.rewrite", "Fill") } diff --git a/internal/lsp/testdata/fillstruct/typeparams.go b/internal/lsp/testdata/fillstruct/typeparams.go new file mode 100644 index 00000000000..c60cd68ada5 --- /dev/null +++ b/internal/lsp/testdata/fillstruct/typeparams.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} + +func _[T any]() { + type S struct{ t T } + x := S{} + _ = x +} diff --git a/internal/lsp/testdata/fillstruct/typeparams.go.golden b/internal/lsp/testdata/fillstruct/typeparams.go.golden new file mode 100644 index 00000000000..9b2b90c12ee --- /dev/null +++ b/internal/lsp/testdata/fillstruct/typeparams.go.golden @@ -0,0 +1,328 @@ +-- suggestedfix_typeparams_11_40 -- +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{ + foo: 0, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} + +func _[T any]() { + type S struct{ t T } + x := S{} + var _ = basicStructWithTypeParams[T]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + _ = x +} + +-- suggestedfix_typeparams_14_40 -- +//go:build go1.18 +// +build go1.18 + +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{ + foo: 0, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} + +func _[T any]() { + type S struct{ t T } + x := S{} + _ = x +} + +-- suggestedfix_typeparams_18_49 -- +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{ + foo: "", + bar: 0, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} + +func _[T any]() { + type S struct{ t T } + x := S{} + var _ = basicStructWithTypeParams[T]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + _ = x +} + +-- suggestedfix_typeparams_20_49 -- +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = basicStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{ + foo: "", + bar: 0, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} + +func _[T any]() { + type S struct{ t T } + x := S{} + _ = x +} + +-- suggestedfix_typeparams_21_49 -- +//go:build go1.18 +// +build go1.18 + +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{ + foo: "", + bar: 0, +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} + +func _[T any]() { + type S struct{ t T } + x := S{} + _ = x +} + +-- suggestedfix_typeparams_22_1 -- +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + foo: 0, + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} + +func _[T any]() { + type S struct{ t T } + x := S{} + var _ = basicStructWithTypeParams[T]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + _ = x +} + +-- suggestedfix_typeparams_24_1 -- +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = basicStructWithTypeParams{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + foo: 0, + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} + +func _[T any]() { + type S struct{ t T } + x := S{} + _ = x +} + +-- suggestedfix_typeparams_25_1 -- +//go:build go1.18 +// +build go1.18 + +package fillstruct + +type emptyStructWithTypeParams[A any] struct{} + +var _ = emptyStructWithTypeParams[int]{} + +type basicStructWithTypeParams[T any] struct { + foo T +} + +var _ = basicStructWithTypeParams[int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type twoArgStructWithTypeParams[F, B any] struct { + foo F + bar B +} + +var _ = twoArgStructWithTypeParams[string, int]{} //@suggestedfix("}", "refactor.rewrite", "Fill") + +var _ = twoArgStructWithTypeParams[int, string]{ + foo: 0, + bar: "bar", +} //@suggestedfix("}", "refactor.rewrite", "Fill") + +type nestedStructWithTypeParams struct { + bar string + basic basicStructWithTypeParams[int] +} + +var _ = nestedStructWithTypeParams{} + +func _[T any]() { + type S struct{ t T } + x := S{} + _ = x +} + diff --git a/internal/lsp/testdata/missingfunction/channels.go b/internal/lsp/testdata/missingfunction/channels.go index 436491c1949..303770cd7aa 100644 --- a/internal/lsp/testdata/missingfunction/channels.go +++ b/internal/lsp/testdata/missingfunction/channels.go @@ -1,7 +1,7 @@ package missingfunction func channels(s string) { - undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix") + undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix", "") } func c() (<-chan string, chan string) { diff --git a/internal/lsp/testdata/missingfunction/channels.go.golden b/internal/lsp/testdata/missingfunction/channels.go.golden index f5078fed17a..998ce589e1d 100644 --- a/internal/lsp/testdata/missingfunction/channels.go.golden +++ b/internal/lsp/testdata/missingfunction/channels.go.golden @@ -2,7 +2,7 @@ package missingfunction func channels(s string) { - undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix") + undefinedChannels(c()) //@suggestedfix("undefinedChannels", "quickfix", "") } func undefinedChannels(ch1 <-chan string, ch2 chan string) { diff --git a/internal/lsp/testdata/missingfunction/consecutive_params.go b/internal/lsp/testdata/missingfunction/consecutive_params.go index d2ec3be3232..f2fb3c04132 100644 --- a/internal/lsp/testdata/missingfunction/consecutive_params.go +++ b/internal/lsp/testdata/missingfunction/consecutive_params.go @@ -2,5 +2,5 @@ package missingfunction func consecutiveParams() { var s string - undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix") + undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix", "") } diff --git a/internal/lsp/testdata/missingfunction/consecutive_params.go.golden b/internal/lsp/testdata/missingfunction/consecutive_params.go.golden index 14a766496fb..4b852ce141b 100644 --- a/internal/lsp/testdata/missingfunction/consecutive_params.go.golden +++ b/internal/lsp/testdata/missingfunction/consecutive_params.go.golden @@ -3,7 +3,7 @@ package missingfunction func consecutiveParams() { var s string - undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix") + undefinedConsecutiveParams(s, s) //@suggestedfix("undefinedConsecutiveParams", "quickfix", "") } func undefinedConsecutiveParams(s1, s2 string) { diff --git a/internal/lsp/testdata/missingfunction/error_param.go b/internal/lsp/testdata/missingfunction/error_param.go index 9fd943ffb6d..d0484f0ff56 100644 --- a/internal/lsp/testdata/missingfunction/error_param.go +++ b/internal/lsp/testdata/missingfunction/error_param.go @@ -2,5 +2,5 @@ package missingfunction func errorParam() { var err error - undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix") + undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix", "") } diff --git a/internal/lsp/testdata/missingfunction/error_param.go.golden b/internal/lsp/testdata/missingfunction/error_param.go.golden index 2e12711817d..de78646a5f1 100644 --- a/internal/lsp/testdata/missingfunction/error_param.go.golden +++ b/internal/lsp/testdata/missingfunction/error_param.go.golden @@ -3,7 +3,7 @@ package missingfunction func errorParam() { var err error - undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix") + undefinedErrorParam(err) //@suggestedfix("undefinedErrorParam", "quickfix", "") } func undefinedErrorParam(err error) { diff --git a/internal/lsp/testdata/missingfunction/literals.go b/internal/lsp/testdata/missingfunction/literals.go index e276eae79ec..0099b1a08ad 100644 --- a/internal/lsp/testdata/missingfunction/literals.go +++ b/internal/lsp/testdata/missingfunction/literals.go @@ -3,5 +3,5 @@ package missingfunction type T struct{} func literals() { - undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix") + undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix", "") } diff --git a/internal/lsp/testdata/missingfunction/literals.go.golden b/internal/lsp/testdata/missingfunction/literals.go.golden index 04782b9bf50..599f020a75b 100644 --- a/internal/lsp/testdata/missingfunction/literals.go.golden +++ b/internal/lsp/testdata/missingfunction/literals.go.golden @@ -8,7 +8,7 @@ package missingfunction type T struct{} func literals() { - undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix") + undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix", "") } func undefinedLiterals(s string, t1 T, t2 *T) { @@ -20,7 +20,7 @@ package missingfunction type T struct{} func literals() { - undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix") + undefinedLiterals("hey compiler", T{}, &T{}) //@suggestedfix("undefinedLiterals", "quickfix", "") } func undefinedLiterals(s string, t1 T, t2 *T) { diff --git a/internal/lsp/testdata/missingfunction/operation.go b/internal/lsp/testdata/missingfunction/operation.go index 0408219fe37..a4913ec10b2 100644 --- a/internal/lsp/testdata/missingfunction/operation.go +++ b/internal/lsp/testdata/missingfunction/operation.go @@ -3,5 +3,5 @@ package missingfunction import "time" func operation() { - undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix") + undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix", "") } diff --git a/internal/lsp/testdata/missingfunction/operation.go.golden b/internal/lsp/testdata/missingfunction/operation.go.golden index 5e35f300534..fce69b11d85 100644 --- a/internal/lsp/testdata/missingfunction/operation.go.golden +++ b/internal/lsp/testdata/missingfunction/operation.go.golden @@ -8,7 +8,7 @@ package missingfunction import "time" func operation() { - undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix") + undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix", "") } func undefinedOperation(duration time.Duration) { @@ -20,7 +20,7 @@ package missingfunction import "time" func operation() { - undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix") + undefinedOperation(10 * time.Second) //@suggestedfix("undefinedOperation", "quickfix", "") } func undefinedOperation(duration time.Duration) { diff --git a/internal/lsp/testdata/missingfunction/selector.go b/internal/lsp/testdata/missingfunction/selector.go index afd1ab61f3a..93a04027138 100644 --- a/internal/lsp/testdata/missingfunction/selector.go +++ b/internal/lsp/testdata/missingfunction/selector.go @@ -2,5 +2,5 @@ package missingfunction func selector() { m := map[int]bool{} - undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix") + undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix", "") } diff --git a/internal/lsp/testdata/missingfunction/selector.go.golden b/internal/lsp/testdata/missingfunction/selector.go.golden index c48691c4ed5..44e2dde3aa7 100644 --- a/internal/lsp/testdata/missingfunction/selector.go.golden +++ b/internal/lsp/testdata/missingfunction/selector.go.golden @@ -3,7 +3,7 @@ package missingfunction func selector() { m := map[int]bool{} - undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix") + undefinedSelector(m[1]) //@suggestedfix("undefinedSelector", "quickfix", "") } func undefinedSelector(b bool) { diff --git a/internal/lsp/testdata/missingfunction/slice.go b/internal/lsp/testdata/missingfunction/slice.go index 4a562a2e762..48b1a52b3f3 100644 --- a/internal/lsp/testdata/missingfunction/slice.go +++ b/internal/lsp/testdata/missingfunction/slice.go @@ -1,5 +1,5 @@ package missingfunction func slice() { - undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix") + undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix", "") } diff --git a/internal/lsp/testdata/missingfunction/slice.go.golden b/internal/lsp/testdata/missingfunction/slice.go.golden index 0ccb8611b6c..2a05d9a0f54 100644 --- a/internal/lsp/testdata/missingfunction/slice.go.golden +++ b/internal/lsp/testdata/missingfunction/slice.go.golden @@ -2,7 +2,7 @@ package missingfunction func slice() { - undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix") + undefinedSlice([]int{1, 2}) //@suggestedfix("undefinedSlice", "quickfix", "") } func undefinedSlice(i []int) { diff --git a/internal/lsp/testdata/missingfunction/tuple.go b/internal/lsp/testdata/missingfunction/tuple.go index 1c4782c15dd..4059ced983a 100644 --- a/internal/lsp/testdata/missingfunction/tuple.go +++ b/internal/lsp/testdata/missingfunction/tuple.go @@ -1,7 +1,7 @@ package missingfunction func tuple() { - undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix") + undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix", "") } func b() (string, error) { diff --git a/internal/lsp/testdata/missingfunction/tuple.go.golden b/internal/lsp/testdata/missingfunction/tuple.go.golden index 1e12bb70860..e1118a3f348 100644 --- a/internal/lsp/testdata/missingfunction/tuple.go.golden +++ b/internal/lsp/testdata/missingfunction/tuple.go.golden @@ -2,7 +2,7 @@ package missingfunction func tuple() { - undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix") + undefinedTuple(b()) //@suggestedfix("undefinedTuple", "quickfix", "") } func undefinedTuple(s string, err error) { diff --git a/internal/lsp/testdata/missingfunction/unique_params.go b/internal/lsp/testdata/missingfunction/unique_params.go index ffaba3f9cb9..00479bf7554 100644 --- a/internal/lsp/testdata/missingfunction/unique_params.go +++ b/internal/lsp/testdata/missingfunction/unique_params.go @@ -3,5 +3,5 @@ package missingfunction func uniqueArguments() { var s string var i int - undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix") + undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix", "") } diff --git a/internal/lsp/testdata/missingfunction/unique_params.go.golden b/internal/lsp/testdata/missingfunction/unique_params.go.golden index 74fb91a8eb2..4797b3ba784 100644 --- a/internal/lsp/testdata/missingfunction/unique_params.go.golden +++ b/internal/lsp/testdata/missingfunction/unique_params.go.golden @@ -8,7 +8,7 @@ package missingfunction func uniqueArguments() { var s string var i int - undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix") + undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix", "") } func undefinedUniqueArguments(s1 string, i int, s2 string) { @@ -21,7 +21,7 @@ package missingfunction func uniqueArguments() { var s string var i int - undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix") + undefinedUniqueArguments(s, i, s) //@suggestedfix("undefinedUniqueArguments", "quickfix", "") } func undefinedUniqueArguments(s1 string, i int, s2 string) { diff --git a/internal/lsp/testdata/stub/stub_add_selector.go b/internal/lsp/testdata/stub/stub_add_selector.go index a15afd7c244..4037b7ad3a0 100644 --- a/internal/lsp/testdata/stub/stub_add_selector.go +++ b/internal/lsp/testdata/stub/stub_add_selector.go @@ -7,6 +7,6 @@ import "io" // then our implementation must add the import/package selector // in the concrete method if the concrete type is outside of the interface // package -var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite") +var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite", "") type readerFrom struct{} diff --git a/internal/lsp/testdata/stub/stub_add_selector.go.golden b/internal/lsp/testdata/stub/stub_add_selector.go.golden index e885483eaaf..8f08ca1efe2 100644 --- a/internal/lsp/testdata/stub/stub_add_selector.go.golden +++ b/internal/lsp/testdata/stub/stub_add_selector.go.golden @@ -8,7 +8,7 @@ import "io" // then our implementation must add the import/package selector // in the concrete method if the concrete type is outside of the interface // package -var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite") +var _ io.ReaderFrom = &readerFrom{} //@suggestedfix("&readerFrom", "refactor.rewrite", "") type readerFrom struct{} diff --git a/internal/lsp/testdata/stub/stub_assign.go b/internal/lsp/testdata/stub/stub_assign.go index 9336361d009..d3f09313f25 100644 --- a/internal/lsp/testdata/stub/stub_assign.go +++ b/internal/lsp/testdata/stub/stub_assign.go @@ -4,7 +4,7 @@ import "io" func main() { var br io.ByteWriter - br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite") + br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite", "") } type byteWriter struct{} diff --git a/internal/lsp/testdata/stub/stub_assign.go.golden b/internal/lsp/testdata/stub/stub_assign.go.golden index a52a8236798..f1535424114 100644 --- a/internal/lsp/testdata/stub/stub_assign.go.golden +++ b/internal/lsp/testdata/stub/stub_assign.go.golden @@ -5,7 +5,7 @@ import "io" func main() { var br io.ByteWriter - br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite") + br = &byteWriter{} //@suggestedfix("&", "refactor.rewrite", "") } type byteWriter struct{} diff --git a/internal/lsp/testdata/stub/stub_assign_multivars.go b/internal/lsp/testdata/stub/stub_assign_multivars.go index 01b330fda54..bd36d6833d1 100644 --- a/internal/lsp/testdata/stub/stub_assign_multivars.go +++ b/internal/lsp/testdata/stub/stub_assign_multivars.go @@ -5,7 +5,7 @@ import "io" func main() { var br io.ByteWriter var i int - i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite") + i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite", "") } type multiByteWriter struct{} diff --git a/internal/lsp/testdata/stub/stub_assign_multivars.go.golden b/internal/lsp/testdata/stub/stub_assign_multivars.go.golden index e1e71adbd50..425d11746a5 100644 --- a/internal/lsp/testdata/stub/stub_assign_multivars.go.golden +++ b/internal/lsp/testdata/stub/stub_assign_multivars.go.golden @@ -6,7 +6,7 @@ import "io" func main() { var br io.ByteWriter var i int - i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite") + i, br = 1, &multiByteWriter{} //@suggestedfix("&", "refactor.rewrite", "") } type multiByteWriter struct{} diff --git a/internal/lsp/testdata/stub/stub_call_expr.go b/internal/lsp/testdata/stub/stub_call_expr.go index 775b0e5545e..0c309466524 100644 --- a/internal/lsp/testdata/stub/stub_call_expr.go +++ b/internal/lsp/testdata/stub/stub_call_expr.go @@ -1,7 +1,7 @@ package stub func main() { - check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite") + check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite", "") } func check(err error) { diff --git a/internal/lsp/testdata/stub/stub_call_expr.go.golden b/internal/lsp/testdata/stub/stub_call_expr.go.golden index 2d12f8651f3..c82d22440f1 100644 --- a/internal/lsp/testdata/stub/stub_call_expr.go.golden +++ b/internal/lsp/testdata/stub/stub_call_expr.go.golden @@ -2,7 +2,7 @@ package stub func main() { - check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite") + check(&callExpr{}) //@suggestedfix("&", "refactor.rewrite", "") } func check(err error) { diff --git a/internal/lsp/testdata/stub/stub_embedded.go b/internal/lsp/testdata/stub/stub_embedded.go index 6d6a986bf24..f66989e9f0f 100644 --- a/internal/lsp/testdata/stub/stub_embedded.go +++ b/internal/lsp/testdata/stub/stub_embedded.go @@ -5,7 +5,7 @@ import ( "sort" ) -var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite") +var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite", "") type embeddedConcrete struct{} diff --git a/internal/lsp/testdata/stub/stub_embedded.go.golden b/internal/lsp/testdata/stub/stub_embedded.go.golden index c258ebaf46c..3c5347e8c01 100644 --- a/internal/lsp/testdata/stub/stub_embedded.go.golden +++ b/internal/lsp/testdata/stub/stub_embedded.go.golden @@ -6,7 +6,7 @@ import ( "sort" ) -var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite") +var _ embeddedInterface = (*embeddedConcrete)(nil) //@suggestedfix("(", "refactor.rewrite", "") type embeddedConcrete struct{} diff --git a/internal/lsp/testdata/stub/stub_err.go b/internal/lsp/testdata/stub/stub_err.go index 908c7d3152f..121f0e794d7 100644 --- a/internal/lsp/testdata/stub/stub_err.go +++ b/internal/lsp/testdata/stub/stub_err.go @@ -1,7 +1,7 @@ package stub func main() { - var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite") + var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite", "") } type customErr struct{} diff --git a/internal/lsp/testdata/stub/stub_err.go.golden b/internal/lsp/testdata/stub/stub_err.go.golden index 717aed86293..0b441bdaab1 100644 --- a/internal/lsp/testdata/stub/stub_err.go.golden +++ b/internal/lsp/testdata/stub/stub_err.go.golden @@ -2,7 +2,7 @@ package stub func main() { - var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite") + var br error = &customErr{} //@suggestedfix("&", "refactor.rewrite", "") } type customErr struct{} diff --git a/internal/lsp/testdata/stub/stub_function_return.go b/internal/lsp/testdata/stub/stub_function_return.go index bbf05885af2..41f17645e9c 100644 --- a/internal/lsp/testdata/stub/stub_function_return.go +++ b/internal/lsp/testdata/stub/stub_function_return.go @@ -5,7 +5,7 @@ import ( ) func newCloser() io.Closer { - return closer{} //@suggestedfix("c", "refactor.rewrite") + return closer{} //@suggestedfix("c", "refactor.rewrite", "") } type closer struct{} diff --git a/internal/lsp/testdata/stub/stub_function_return.go.golden b/internal/lsp/testdata/stub/stub_function_return.go.golden index f80874d2b94..e90712e6973 100644 --- a/internal/lsp/testdata/stub/stub_function_return.go.golden +++ b/internal/lsp/testdata/stub/stub_function_return.go.golden @@ -6,7 +6,7 @@ import ( ) func newCloser() io.Closer { - return closer{} //@suggestedfix("c", "refactor.rewrite") + return closer{} //@suggestedfix("c", "refactor.rewrite", "") } type closer struct{} diff --git a/internal/lsp/testdata/stub/stub_generic_receiver.go b/internal/lsp/testdata/stub/stub_generic_receiver.go index 64e90fcf6a7..1c00569ea1c 100644 --- a/internal/lsp/testdata/stub/stub_generic_receiver.go +++ b/internal/lsp/testdata/stub/stub_generic_receiver.go @@ -7,7 +7,7 @@ import "io" // This file tests that that the stub method generator accounts for concrete // types that have type parameters defined. -var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite") +var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite", "Implement io.ReaderFrom") type genReader[T, Y any] struct { T T diff --git a/internal/lsp/testdata/stub/stub_generic_receiver.go.golden b/internal/lsp/testdata/stub/stub_generic_receiver.go.golden index 1fc7157b463..97935d47eb3 100644 --- a/internal/lsp/testdata/stub/stub_generic_receiver.go.golden +++ b/internal/lsp/testdata/stub/stub_generic_receiver.go.golden @@ -8,7 +8,7 @@ import "io" // This file tests that that the stub method generator accounts for concrete // types that have type parameters defined. -var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite") +var _ io.ReaderFrom = &genReader[string, int]{} //@suggestedfix("&genReader", "refactor.rewrite", "Implement io.ReaderFrom") type genReader[T, Y any] struct { T T diff --git a/internal/lsp/testdata/stub/stub_ignored_imports.go b/internal/lsp/testdata/stub/stub_ignored_imports.go index 8f6ec73de1b..ca95d2a7120 100644 --- a/internal/lsp/testdata/stub/stub_ignored_imports.go +++ b/internal/lsp/testdata/stub/stub_ignored_imports.go @@ -12,7 +12,7 @@ import ( var ( _ Reader - _ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite") + _ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite", "") ) type ignoredResetter struct{} diff --git a/internal/lsp/testdata/stub/stub_ignored_imports.go.golden b/internal/lsp/testdata/stub/stub_ignored_imports.go.golden index a0ddc179353..33aba532662 100644 --- a/internal/lsp/testdata/stub/stub_ignored_imports.go.golden +++ b/internal/lsp/testdata/stub/stub_ignored_imports.go.golden @@ -14,7 +14,7 @@ import ( var ( _ Reader - _ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite") + _ zlib.Resetter = (*ignoredResetter)(nil) //@suggestedfix("(", "refactor.rewrite", "") ) type ignoredResetter struct{} diff --git a/internal/lsp/testdata/stub/stub_multi_var.go b/internal/lsp/testdata/stub/stub_multi_var.go index 4276b799429..06702b22204 100644 --- a/internal/lsp/testdata/stub/stub_multi_var.go +++ b/internal/lsp/testdata/stub/stub_multi_var.go @@ -6,6 +6,6 @@ import "io" // has multiple values on the same line can still be // analyzed correctly to target the interface implementation // diagnostic. -var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite") +var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite", "") type multiVar struct{} diff --git a/internal/lsp/testdata/stub/stub_multi_var.go.golden b/internal/lsp/testdata/stub/stub_multi_var.go.golden index b9ac4236766..804c7eec65c 100644 --- a/internal/lsp/testdata/stub/stub_multi_var.go.golden +++ b/internal/lsp/testdata/stub/stub_multi_var.go.golden @@ -7,7 +7,7 @@ import "io" // has multiple values on the same line can still be // analyzed correctly to target the interface implementation // diagnostic. -var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite") +var one, two, three io.Reader = nil, &multiVar{}, nil //@suggestedfix("&", "refactor.rewrite", "") type multiVar struct{} diff --git a/internal/lsp/testdata/stub/stub_pointer.go b/internal/lsp/testdata/stub/stub_pointer.go index 2b3681b8357..e9d8bc688fc 100644 --- a/internal/lsp/testdata/stub/stub_pointer.go +++ b/internal/lsp/testdata/stub/stub_pointer.go @@ -3,7 +3,7 @@ package stub import "io" func getReaderFrom() io.ReaderFrom { - return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite") + return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite", "") } type pointerImpl struct{} diff --git a/internal/lsp/testdata/stub/stub_pointer.go.golden b/internal/lsp/testdata/stub/stub_pointer.go.golden index c4133d7a44d..a4d765dd457 100644 --- a/internal/lsp/testdata/stub/stub_pointer.go.golden +++ b/internal/lsp/testdata/stub/stub_pointer.go.golden @@ -4,7 +4,7 @@ package stub import "io" func getReaderFrom() io.ReaderFrom { - return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite") + return &pointerImpl{} //@suggestedfix("&", "refactor.rewrite", "") } type pointerImpl struct{} diff --git a/internal/lsp/testdata/stub/stub_renamed_import.go b/internal/lsp/testdata/stub/stub_renamed_import.go index eaebe251018..54dd598013d 100644 --- a/internal/lsp/testdata/stub/stub_renamed_import.go +++ b/internal/lsp/testdata/stub/stub_renamed_import.go @@ -5,7 +5,7 @@ import ( myio "io" ) -var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite") +var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite", "") var _ myio.Reader type myIO struct{} diff --git a/internal/lsp/testdata/stub/stub_renamed_import.go.golden b/internal/lsp/testdata/stub/stub_renamed_import.go.golden index 48ff4f1537f..8182d2b3675 100644 --- a/internal/lsp/testdata/stub/stub_renamed_import.go.golden +++ b/internal/lsp/testdata/stub/stub_renamed_import.go.golden @@ -6,7 +6,7 @@ import ( myio "io" ) -var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite") +var _ zlib.Resetter = &myIO{} //@suggestedfix("&", "refactor.rewrite", "") var _ myio.Reader type myIO struct{} diff --git a/internal/lsp/testdata/stub/stub_renamed_import_iface.go b/internal/lsp/testdata/stub/stub_renamed_import_iface.go index 96caf540d60..26142d0a893 100644 --- a/internal/lsp/testdata/stub/stub_renamed_import_iface.go +++ b/internal/lsp/testdata/stub/stub_renamed_import_iface.go @@ -8,6 +8,6 @@ import ( // method references an import from its own package // that the concrete type does not yet import, and that import happens // to be renamed, then we prefer the renaming of the interface. -var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite") +var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite", "") type otherInterfaceImpl struct{} diff --git a/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden b/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden index 9ba2cb440e8..134c24bf88a 100644 --- a/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden +++ b/internal/lsp/testdata/stub/stub_renamed_import_iface.go.golden @@ -11,7 +11,7 @@ import ( // method references an import from its own package // that the concrete type does not yet import, and that import happens // to be renamed, then we prefer the renaming of the interface. -var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite") +var _ other.Interface = &otherInterfaceImpl{} //@suggestedfix("&otherInterfaceImpl", "refactor.rewrite", "") type otherInterfaceImpl struct{} diff --git a/internal/lsp/testdata/stub/stub_stdlib.go b/internal/lsp/testdata/stub/stub_stdlib.go index 0d54a6daadf..463cf78a344 100644 --- a/internal/lsp/testdata/stub/stub_stdlib.go +++ b/internal/lsp/testdata/stub/stub_stdlib.go @@ -4,6 +4,6 @@ import ( "io" ) -var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite") +var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite", "") type writer struct{} diff --git a/internal/lsp/testdata/stub/stub_stdlib.go.golden b/internal/lsp/testdata/stub/stub_stdlib.go.golden index 8636cead414..55592501a07 100644 --- a/internal/lsp/testdata/stub/stub_stdlib.go.golden +++ b/internal/lsp/testdata/stub/stub_stdlib.go.golden @@ -5,7 +5,7 @@ import ( "io" ) -var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite") +var _ io.Writer = writer{} //@suggestedfix("w", "refactor.rewrite", "") type writer struct{} diff --git a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go b/internal/lsp/testdata/suggestedfix/has_suggested_fix.go index e06dce0a846..7ff524479b4 100644 --- a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go +++ b/internal/lsp/testdata/suggestedfix/has_suggested_fix.go @@ -6,6 +6,6 @@ import ( func goodbye() { s := "hiiiiiii" - s = s //@suggestedfix("s = s", "quickfix") + s = s //@suggestedfix("s = s", "quickfix", "") log.Print(s) } diff --git a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden b/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden index 9ccaa199468..e7e84fc227d 100644 --- a/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden +++ b/internal/lsp/testdata/suggestedfix/has_suggested_fix.go.golden @@ -7,7 +7,7 @@ import ( func goodbye() { s := "hiiiiiii" - //@suggestedfix("s = s", "quickfix") + //@suggestedfix("s = s", "quickfix", "") log.Print(s) } diff --git a/internal/lsp/testdata/summary_go1.18.txt.golden b/internal/lsp/testdata/summary_go1.18.txt.golden index 9fadf634090..668d5fb82e5 100644 --- a/internal/lsp/testdata/summary_go1.18.txt.golden +++ b/internal/lsp/testdata/summary_go1.18.txt.golden @@ -13,7 +13,7 @@ FoldingRangesCount = 2 FormatCount = 6 ImportCount = 8 SemanticTokenCount = 3 -SuggestedFixCount = 64 +SuggestedFixCount = 67 FunctionExtractionCount = 25 MethodExtractionCount = 6 DefinitionsCount = 108 diff --git a/internal/lsp/testdata/typeerrors/noresultvalues.go b/internal/lsp/testdata/typeerrors/noresultvalues.go index 84234c4b93a..729e7bbccd4 100644 --- a/internal/lsp/testdata/typeerrors/noresultvalues.go +++ b/internal/lsp/testdata/typeerrors/noresultvalues.go @@ -1,5 +1,5 @@ package typeerrors -func x() { return nil } //@suggestedfix("nil", "quickfix") +func x() { return nil } //@suggestedfix("nil", "quickfix", "") -func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix") +func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix", "") diff --git a/internal/lsp/testdata/typeerrors/noresultvalues.go.golden b/internal/lsp/testdata/typeerrors/noresultvalues.go.golden index 07c54d44553..48409a0b7dd 100644 --- a/internal/lsp/testdata/typeerrors/noresultvalues.go.golden +++ b/internal/lsp/testdata/typeerrors/noresultvalues.go.golden @@ -1,14 +1,14 @@ -- suggestedfix_noresultvalues_3_19 -- package typeerrors -func x() { return } //@suggestedfix("nil", "quickfix") +func x() { return } //@suggestedfix("nil", "quickfix", "") -func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix") +func y() { return nil, "hello" } //@suggestedfix("nil", "quickfix", "") -- suggestedfix_noresultvalues_5_19 -- package typeerrors -func x() { return nil } //@suggestedfix("nil", "quickfix") +func x() { return nil } //@suggestedfix("nil", "quickfix", "") -func y() { return } //@suggestedfix("nil", "quickfix") +func y() { return } //@suggestedfix("nil", "quickfix", "") diff --git a/internal/lsp/testdata/undeclared/var.go b/internal/lsp/testdata/undeclared/var.go index b5f9287d48d..e27a733d942 100644 --- a/internal/lsp/testdata/undeclared/var.go +++ b/internal/lsp/testdata/undeclared/var.go @@ -1,13 +1,13 @@ package undeclared func m() int { - z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix") + z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix", "") if 100 < 90 { z = 1 - } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix") + } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix", "") z = 4 } - for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix") + for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix", "") } r() //@diag("r", "compiler", "undeclared name: r", "error") return z diff --git a/internal/lsp/testdata/undeclared/var.go.golden b/internal/lsp/testdata/undeclared/var.go.golden index 74adbe8ffde..a266df7c0c7 100644 --- a/internal/lsp/testdata/undeclared/var.go.golden +++ b/internal/lsp/testdata/undeclared/var.go.golden @@ -2,14 +2,14 @@ package undeclared func m() int { - z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix") + z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix", "") if 100 < 90 { z = 1 - } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix") + } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix", "") z = 4 } i := - for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix") + for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix", "") } r() //@diag("r", "compiler", "undeclared name: r", "error") return z @@ -20,13 +20,13 @@ package undeclared func m() int { y := - z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix") + z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix", "") if 100 < 90 { z = 1 - } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix") + } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix", "") z = 4 } - for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix") + for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix", "") } r() //@diag("r", "compiler", "undeclared name: r", "error") return z @@ -36,14 +36,14 @@ func m() int { package undeclared func m() int { - z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix") + z, _ := 1+y, 11 //@diag("y", "compiler", "undeclared name: y", "error"),suggestedfix("y", "quickfix", "") n := if 100 < 90 { z = 1 - } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix") + } else if 100 > n+2 { //@diag("n", "compiler", "undeclared name: n", "error"),suggestedfix("n", "quickfix", "") z = 4 } - for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix") + for i < 200 { //@diag("i", "compiler", "undeclared name: i", "error"),suggestedfix("i", "quickfix", "") } r() //@diag("r", "compiler", "undeclared name: r", "error") return z diff --git a/internal/lsp/tests/README.md b/internal/lsp/tests/README.md index 2c18675f7e5..64ced79702e 100644 --- a/internal/lsp/tests/README.md +++ b/internal/lsp/tests/README.md @@ -11,7 +11,7 @@ file, like `internal/lsp/testdata/foo/bar.go.golden`. The former is the "input" and the latter is the expected output. Each input file contains annotations like -`//@suggestedfix("}", "refactor.rewrite")`. These annotations are interpreted by +`//@suggestedfix("}", "refactor.rewrite", "Fill anonymous struct")`. These annotations are interpreted by test runners to perform certain actions. The expected output after those actions is encoded in the golden file. diff --git a/internal/lsp/tests/tests.go b/internal/lsp/tests/tests.go index b60fbf03866..4c3201ab8bc 100644 --- a/internal/lsp/tests/tests.go +++ b/internal/lsp/tests/tests.go @@ -69,7 +69,7 @@ type FoldingRanges []span.Span type Formats []span.Span type Imports []span.Span type SemanticTokens []span.Span -type SuggestedFixes map[span.Span][]string +type SuggestedFixes map[span.Span][]SuggestedFix type FunctionExtractions map[span.Span]span.Span type MethodExtractions map[span.Span]span.Span type Definitions map[span.Span]Definition @@ -152,7 +152,7 @@ type Tests interface { Format(*testing.T, span.Span) Import(*testing.T, span.Span) SemanticTokens(*testing.T, span.Span) - SuggestedFix(*testing.T, span.Span, []string, int) + SuggestedFix(*testing.T, span.Span, []SuggestedFix, int) FunctionExtraction(*testing.T, span.Span, span.Span) MethodExtraction(*testing.T, span.Span, span.Span) Definition(*testing.T, span.Span, Definition) @@ -232,6 +232,10 @@ type Link struct { NotePosition token.Position } +type SuggestedFix struct { + ActionKind, Title string +} + type Golden struct { Filename string Archive *txtar.Archive @@ -1198,11 +1202,8 @@ func (data *Data) collectSemanticTokens(spn span.Span) { data.SemanticTokens = append(data.SemanticTokens, spn) } -func (data *Data) collectSuggestedFixes(spn span.Span, actionKind string) { - if _, ok := data.SuggestedFixes[spn]; !ok { - data.SuggestedFixes[spn] = []string{} - } - data.SuggestedFixes[spn] = append(data.SuggestedFixes[spn], actionKind) +func (data *Data) collectSuggestedFixes(spn span.Span, actionKind, fix string) { + data.SuggestedFixes[spn] = append(data.SuggestedFixes[spn], SuggestedFix{actionKind, fix}) } func (data *Data) collectFunctionExtractions(start span.Span, end span.Span) { From 005c07ac5af6b5cabf2a1cb1ab49993c5b3bcb4d Mon Sep 17 00:00:00 2001 From: "Hana (Hyang-Ah) Kim" Date: Thu, 30 Jun 2022 15:41:43 -0400 Subject: [PATCH 126/136] gopls/internal/vulncheck: adjust logging Report # of findings Change-Id: Ib10d18a23280a8644b9c08a8d51d6e288ae761db Reviewed-on: https://go-review.googlesource.com/c/tools/+/415496 Reviewed-by: Suzy Mueller --- gopls/internal/vulncheck/command.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gopls/internal/vulncheck/command.go b/gopls/internal/vulncheck/command.go index a29bc008c7a..8c88cf3d51b 100644 --- a/gopls/internal/vulncheck/command.go +++ b/gopls/internal/vulncheck/command.go @@ -80,7 +80,6 @@ func (c *cmd) Run(ctx context.Context, cfg *packages.Config, patterns ...string) log.Printf("package load failed: %v", err) return nil, err } - log.Printf("loaded %d packages\n", len(loadedPkgs)) log.Printf("analyzing %d packages...\n", len(loadedPkgs)) @@ -88,9 +87,12 @@ func (c *cmd) Run(ctx context.Context, cfg *packages.Config, patterns ...string) if err != nil { return nil, err } + + log.Printf("selecting affecting vulnerabilities from %d findings...\n", len(r.Vulns)) unaffectedMods := filterUnaffected(r.Vulns) r.Vulns = filterCalled(r) + log.Printf("found %d vulnerabilities.\n", len(r.Vulns)) callInfo := gvc.GetCallInfo(r, loadedPkgs) return toVulns(callInfo, unaffectedMods) // TODO: add import graphs. From 4375b29f44fb3bfb45abe6d2e1de68c34fcd9ec5 Mon Sep 17 00:00:00 2001 From: Abirdcfly Date: Thu, 21 Jul 2022 16:54:42 +0000 Subject: [PATCH 127/136] cmd/auth/cookieauth: delete unreachable os.Exit Change-Id: I5c60eeb8667423544b2bc8b9cf5f51279b0a941d GitHub-Last-Rev: cb0eca5ff34755a0185729e5c81bf08eb92fab39 GitHub-Pull-Request: golang/tools#388 Reviewed-on: https://go-review.googlesource.com/c/tools/+/418854 TryBot-Result: Gopher Robot Run-TryBot: Bryan Mills Reviewed-by: Bryan Mills Auto-Submit: Bryan Mills gopls-CI: kokoro Reviewed-by: Cherry Mui --- cmd/auth/cookieauth/cookieauth.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/auth/cookieauth/cookieauth.go b/cmd/auth/cookieauth/cookieauth.go index feefaff0b6e..8b0ff17664b 100644 --- a/cmd/auth/cookieauth/cookieauth.go +++ b/cmd/auth/cookieauth/cookieauth.go @@ -40,7 +40,6 @@ func main() { f, err := os.Open(os.Args[1]) if err != nil { log.Fatalf("failed to read cookie file: %v\n", os.Args[1]) - os.Exit(1) } defer f.Close() From 2a6393fe54b36af368a3d319f92cd3b1efee7741 Mon Sep 17 00:00:00 2001 From: Dylan Le Date: Thu, 21 Jul 2022 14:30:30 -0400 Subject: [PATCH 128/136] internal/lsp: Refactor to share logic with rename If a package is being operated on, getPackage returns that package information; otherwise nil. Change-Id: I881056510b8d6862c274a7532fdfbc840c938468 Reviewed-on: https://go-review.googlesource.com/c/tools/+/418791 TryBot-Result: Gopher Robot gopls-CI: kokoro Run-TryBot: Dylan Le Reviewed-by: Robert Findley --- internal/lsp/source/references.go | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/internal/lsp/source/references.go b/internal/lsp/source/references.go index a1643fbec6c..2bbdc0741ca 100644 --- a/internal/lsp/source/references.go +++ b/internal/lsp/source/references.go @@ -18,7 +18,6 @@ import ( "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/lsp/bug" "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/safetoken" "golang.org/x/tools/internal/span" ) @@ -32,6 +31,18 @@ type ReferenceInfo struct { isDeclaration bool } +// isInPackageName reports whether the file's package name surrounds the +// given position pp (e.g. "foo" surrounds the cursor in "package foo"). +func isInPackageName(ctx context.Context, s Snapshot, f FileHandle, pgf *ParsedGoFile, pp protocol.Position) (bool, error) { + // Find position of the package name declaration + cursorPos, err := pgf.Mapper.Pos(pp) + if err != nil { + return false, err + } + + return pgf.File.Name.Pos() <= cursorPos && cursorPos <= pgf.File.Name.End(), nil +} + // References returns a list of references for a given identifier within the packages // containing i.File. Declarations appear first in the result. func References(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Position, includeDeclaration bool) ([]*ReferenceInfo, error) { @@ -44,23 +55,13 @@ func References(ctx context.Context, s Snapshot, f FileHandle, pp protocol.Posit return nil, err } - cursorOffset, err := pgf.Mapper.Offset(pp) - if err != nil { - return nil, err - } - packageName := pgf.File.Name.Name // from package decl - packageNameStart, err := safetoken.Offset(pgf.Tok, pgf.File.Name.Pos()) - if err != nil { - return nil, err - } - - packageNameEnd, err := safetoken.Offset(pgf.Tok, pgf.File.Name.End()) + inPackageName, err := isInPackageName(ctx, s, f, pgf, pp) if err != nil { return nil, err } - if packageNameStart <= cursorOffset && cursorOffset < packageNameEnd { + if inPackageName { renamingPkg, err := s.PackageForFile(ctx, f.URI(), TypecheckAll, NarrowestPackage) if err != nil { return nil, err From 76004542dc1955c2c789d856b2e2ded6002412cf Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Mon, 25 Jul 2022 16:09:28 -0400 Subject: [PATCH 129/136] gopls: update dependencies Update all dependencies, except sergi/go-diff. Also tidy x/tools with -compat=1.16, as it had recently been broken at 1.16. Change-Id: I2e6c9bf48c6bedb2dff0fa418bf588dd07918866 Reviewed-on: https://go-review.googlesource.com/c/tools/+/419494 gopls-CI: kokoro Run-TryBot: Robert Findley TryBot-Result: Gopher Robot Reviewed-by: Hyang-Ah Hana Kim --- go.sum | 16 ++++++++++++++++ gopls/go.mod | 17 ++++++++--------- gopls/go.sum | 48 +++++++++++++++++++++++++++--------------------- 3 files changed, 51 insertions(+), 30 deletions(-) diff --git a/go.sum b/go.sum index efeb68a0ec2..7498b1467ed 100644 --- a/go.sum +++ b/go.sum @@ -1,12 +1,28 @@ github.com/yuin/goldmark v1.4.1 h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/gopls/go.mod b/gopls/go.mod index 3e89620b7f6..006bfe23224 100644 --- a/gopls/go.mod +++ b/gopls/go.mod @@ -3,26 +3,25 @@ module golang.org/x/tools/gopls go 1.18 require ( - github.com/google/go-cmp v0.5.7 + github.com/google/go-cmp v0.5.8 github.com/jba/printsrc v0.2.2 github.com/jba/templatecheck v0.6.0 github.com/sergi/go-diff v1.1.0 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 - golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a - golang.org/x/tools v0.1.11-0.20220523181440-ccb10502d1a5 - golang.org/x/vuln v0.0.0-20220718121659-b9a3ad919698 + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f + golang.org/x/tools v0.1.11 + golang.org/x/vuln v0.0.0-20220725105440-4151a5aca1df honnef.co/go/tools v0.3.2 - mvdan.cc/gofumpt v0.3.0 + mvdan.cc/gofumpt v0.3.1 mvdan.cc/xurls/v2 v2.4.0 ) require ( - github.com/BurntSushi/toml v1.0.0 // indirect + github.com/BurntSushi/toml v1.2.0 // indirect github.com/google/safehtml v0.0.2 // indirect - golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/exp/typeparams v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 golang.org/x/text v0.3.7 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect ) replace golang.org/x/tools => ../ diff --git a/gopls/go.sum b/gopls/go.sum index 4ee977a8c89..cced7df8f56 100644 --- a/gopls/go.sum +++ b/gopls/go.sum @@ -1,14 +1,22 @@ -github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= -github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= +github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns= github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmdtest v0.4.0/go.mod h1:apVn/GCasLZUVpAJ6oWAuyP7Ne7CEsQbTnc0plM3m+o= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/safehtml v0.0.2 h1:ZOt2VXg4x24bW0m2jtzAOkhoXV0iM8vNKc0paByCZqM= github.com/google/safehtml v0.0.2/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU= github.com/jba/printsrc v0.2.2 h1:9OHK51UT+/iMAEBlQIIXW04qvKyF3/vvLuwW/hL8tDU= @@ -34,40 +42,36 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/exp/typeparams v0.0.0-20220722155223-a9213eeb770e h1:7Xs2YCOpMlNqSQSmrrnhlzBXIE/bpMecZplbLePTJvE= +golang.org/x/exp/typeparams v0.0.0-20220722155223-a9213eeb770e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c h1:r5bbIROBQtRRgoutV8Q3sFY58VzHW6jMBYl48ANSyS4= -golang.org/x/vuln v0.0.0-20220613164644-4eb5ba49563c/go.mod h1:UZshlUPxXeGUM9I14UOawXQg6yosDE9cr1vKY/DzgWo= -golang.org/x/vuln v0.0.0-20220718121659-b9a3ad919698 h1:9lgpkUgjzoIcZYp7/UPFO/0jIlYcokcEjqWm0hj9pzE= -golang.org/x/vuln v0.0.0-20220718121659-b9a3ad919698/go.mod h1:UZshlUPxXeGUM9I14UOawXQg6yosDE9cr1vKY/DzgWo= +golang.org/x/vuln v0.0.0-20220725105440-4151a5aca1df h1:BkeW9/QJhcigekDUPS9N9bIb0v7gPKKmLYeczVAqr2s= +golang.org/x/vuln v0.0.0-20220725105440-4151a5aca1df/go.mod h1:UZshlUPxXeGUM9I14UOawXQg6yosDE9cr1vKY/DzgWo= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -76,10 +80,12 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= honnef.co/go/tools v0.3.2 h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34= honnef.co/go/tools v0.3.2/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= -mvdan.cc/gofumpt v0.3.0 h1:kTojdZo9AcEYbQYhGuLf/zszYthRdhDNDUi2JKTxas4= -mvdan.cc/gofumpt v0.3.0/go.mod h1:0+VyGZWleeIj5oostkOex+nDBA0eyavuDnDusAJ8ylo= +mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= +mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio= +mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5/go.mod h1:b8RRCBm0eeiWR8cfN88xeq2G5SG3VKGO+5UPWi5FSOY= mvdan.cc/xurls/v2 v2.4.0 h1:tzxjVAj+wSBmDcF6zBB7/myTy3gX9xvi8Tyr28AuQgc= mvdan.cc/xurls/v2 v2.4.0/go.mod h1:+GEjq9uNjqs8LQfM9nVnM8rff0OQ5Iash5rzX+N1CSg= From 8b47d4e187e3f98d5c096495bcea329f33e99055 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Mon, 25 Jul 2022 16:11:43 -0400 Subject: [PATCH 130/136] all: update dependencies Update all x/tools dependencies. Change-Id: I0a81f9821a9267bef9057f294efc3ac1c13b59ad Reviewed-on: https://go-review.googlesource.com/c/tools/+/419495 Reviewed-by: Hyang-Ah Hana Kim gopls-CI: kokoro Run-TryBot: Robert Findley TryBot-Result: Gopher Robot --- go.mod | 8 ++++---- go.sum | 18 +++++++++--------- gopls/go.sum | 7 +++---- 3 files changed, 16 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index f05aba2b64a..272a6d2eaa7 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,10 @@ module golang.org/x/tools go 1.18 require ( - github.com/yuin/goldmark v1.4.1 + github.com/yuin/goldmark v1.4.13 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 - golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a + golang.org/x/net v0.0.0-20220722155237-a158d28d115b + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f golang.org/x/text v0.3.7 ) diff --git a/go.sum b/go.sum index 7498b1467ed..f603000deb2 100644 --- a/go.sum +++ b/go.sum @@ -1,26 +1,26 @@ -github.com/yuin/goldmark v1.4.1 h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/gopls/go.sum b/gopls/go.sum index cced7df8f56..ecd3f4dd363 100644 --- a/gopls/go.sum +++ b/gopls/go.sum @@ -41,7 +41,7 @@ github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNX github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20220722155223-a9213eeb770e h1:7Xs2YCOpMlNqSQSmrrnhlzBXIE/bpMecZplbLePTJvE= @@ -50,12 +50,11 @@ golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -64,8 +63,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/vuln v0.0.0-20220725105440-4151a5aca1df h1:BkeW9/QJhcigekDUPS9N9bIb0v7gPKKmLYeczVAqr2s= From c83f42da700064452a622c90d97093e13e8d236b Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Tue, 26 Jul 2022 12:16:16 -0400 Subject: [PATCH 131/136] internal/lsp: update inlay hints documentation to include go snippets Update the markdown documentation for inlay hints and fix a couple of typos in the examples. Change-Id: I114502a81999bc5e4f25384ab619888f3e31a731 Reviewed-on: https://go-review.googlesource.com/c/tools/+/419496 Reviewed-by: Hyang-Ah Hana Kim gopls-CI: kokoro Run-TryBot: Suzy Mueller TryBot-Result: Gopher Robot --- gopls/doc/inlayHints.md | 27 ++++++++++------ internal/lsp/source/api_json.go | 28 ++++++++-------- internal/lsp/source/inlay_hint.go | 53 ++++++++----------------------- 3 files changed, 45 insertions(+), 63 deletions(-) diff --git a/gopls/doc/inlayHints.md b/gopls/doc/inlayHints.md index 15957b52ede..2ae9a2828af 100644 --- a/gopls/doc/inlayHints.md +++ b/gopls/doc/inlayHints.md @@ -6,67 +6,74 @@ This document describes the inlay hints that `gopls` uses inside the editor. ## **assignVariableTypes** Enable/disable inlay hints for variable types in assign statements: - - i/* int/*, j/* int/* := 0, len(r)-1 +```go + i/* int*/, j/* int*/ := 0, len(r)-1 +``` **Disabled by default. Enable it by setting `"hints": {"assignVariableTypes": true}`.** ## **compositeLiteralFields** Enable/disable inlay hints for composite literal field names: - - {in: "Hello, world", want: "dlrow ,olleH"} +```go + {/*in: */"Hello, world", /*want: */"dlrow ,olleH"} +``` **Disabled by default. Enable it by setting `"hints": {"compositeLiteralFields": true}`.** ## **compositeLiteralTypes** Enable/disable inlay hints for composite literal types: - +```go for _, c := range []struct { in, want string }{ /*struct{ in string; want string }*/{"Hello, world", "dlrow ,olleH"}, } +``` **Disabled by default. Enable it by setting `"hints": {"compositeLiteralTypes": true}`.** ## **constantValues** Enable/disable inlay hints for constant values: - +```go const ( KindNone Kind = iota/* = 0*/ KindPrint/* = 1*/ KindPrintf/* = 2*/ KindErrorf/* = 3*/ ) +``` **Disabled by default. Enable it by setting `"hints": {"constantValues": true}`.** ## **functionTypeParameters** Enable/disable inlay hints for implicit type parameters on generic functions: - +```go myFoo/*[int, string]*/(1, "hello") +``` **Disabled by default. Enable it by setting `"hints": {"functionTypeParameters": true}`.** ## **parameterNames** Enable/disable inlay hints for parameter names: - +```go parseInt(/* str: */ "123", /* radix: */ 8) +``` **Disabled by default. Enable it by setting `"hints": {"parameterNames": true}`.** ## **rangeVariableTypes** Enable/disable inlay hints for variable types in range statements: - - for k/* int*/, v/* string/* := range []string{} { +```go + for k/* int*/, v/* string*/ := range []string{} { fmt.Println(k, v) } +``` **Disabled by default. Enable it by setting `"hints": {"rangeVariableTypes": true}`.** diff --git a/internal/lsp/source/api_json.go b/internal/lsp/source/api_json.go index 2493da25d8c..94d0f12b4a8 100755 --- a/internal/lsp/source/api_json.go +++ b/internal/lsp/source/api_json.go @@ -517,37 +517,37 @@ var GeneratedAPIJSON = &APIJSON{ EnumKeys: EnumKeys{Keys: []EnumKey{ { Name: "\"assignVariableTypes\"", - Doc: "Enable/disable inlay hints for variable types in assign statements:\n\n\ti/* int/*, j/* int/* := 0, len(r)-1", + Doc: "Enable/disable inlay hints for variable types in assign statements:\n```go\n\ti/* int*/, j/* int*/ := 0, len(r)-1\n```", Default: "false", }, { Name: "\"compositeLiteralFields\"", - Doc: "Enable/disable inlay hints for composite literal field names:\n\n\t{in: \"Hello, world\", want: \"dlrow ,olleH\"}", + Doc: "Enable/disable inlay hints for composite literal field names:\n```go\n\t{/*in: */\"Hello, world\", /*want: */\"dlrow ,olleH\"}\n```", Default: "false", }, { Name: "\"compositeLiteralTypes\"", - Doc: "Enable/disable inlay hints for composite literal types:\n\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}", + Doc: "Enable/disable inlay hints for composite literal types:\n```go\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}\n```", Default: "false", }, { Name: "\"constantValues\"", - Doc: "Enable/disable inlay hints for constant values:\n\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)", + Doc: "Enable/disable inlay hints for constant values:\n```go\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)\n```", Default: "false", }, { Name: "\"functionTypeParameters\"", - Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n\n\tmyFoo/*[int, string]*/(1, \"hello\")", + Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n```go\n\tmyFoo/*[int, string]*/(1, \"hello\")\n```", Default: "false", }, { Name: "\"parameterNames\"", - Doc: "Enable/disable inlay hints for parameter names:\n\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)", + Doc: "Enable/disable inlay hints for parameter names:\n```go\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)\n```", Default: "false", }, { Name: "\"rangeVariableTypes\"", - Doc: "Enable/disable inlay hints for variable types in range statements:\n\n\tfor k/* int*/, v/* string/* := range []string{} {\n\t\tfmt.Println(k, v)\n\t}", + Doc: "Enable/disable inlay hints for variable types in range statements:\n```go\n\tfor k/* int*/, v/* string*/ := range []string{} {\n\t\tfmt.Println(k, v)\n\t}\n```", Default: "false", }, }}, @@ -1036,31 +1036,31 @@ var GeneratedAPIJSON = &APIJSON{ Hints: []*HintJSON{ { Name: "assignVariableTypes", - Doc: "Enable/disable inlay hints for variable types in assign statements:\n\n\ti/* int/*, j/* int/* := 0, len(r)-1", + Doc: "Enable/disable inlay hints for variable types in assign statements:\n```go\n\ti/* int*/, j/* int*/ := 0, len(r)-1\n```", }, { Name: "compositeLiteralFields", - Doc: "Enable/disable inlay hints for composite literal field names:\n\n\t{in: \"Hello, world\", want: \"dlrow ,olleH\"}", + Doc: "Enable/disable inlay hints for composite literal field names:\n```go\n\t{/*in: */\"Hello, world\", /*want: */\"dlrow ,olleH\"}\n```", }, { Name: "compositeLiteralTypes", - Doc: "Enable/disable inlay hints for composite literal types:\n\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}", + Doc: "Enable/disable inlay hints for composite literal types:\n```go\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}\n```", }, { Name: "constantValues", - Doc: "Enable/disable inlay hints for constant values:\n\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)", + Doc: "Enable/disable inlay hints for constant values:\n```go\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)\n```", }, { Name: "functionTypeParameters", - Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n\n\tmyFoo/*[int, string]*/(1, \"hello\")", + Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n```go\n\tmyFoo/*[int, string]*/(1, \"hello\")\n```", }, { Name: "parameterNames", - Doc: "Enable/disable inlay hints for parameter names:\n\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)", + Doc: "Enable/disable inlay hints for parameter names:\n```go\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)\n```", }, { Name: "rangeVariableTypes", - Doc: "Enable/disable inlay hints for variable types in range statements:\n\n\tfor k/* int*/, v/* string/* := range []string{} {\n\t\tfmt.Println(k, v)\n\t}", + Doc: "Enable/disable inlay hints for variable types in range statements:\n```go\n\tfor k/* int*/, v/* string*/ := range []string{} {\n\t\tfmt.Println(k, v)\n\t}\n```", }, }, } diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go index 967752b5c51..6ca51930a7f 100644 --- a/internal/lsp/source/inlay_hint.go +++ b/internal/lsp/source/inlay_hint.go @@ -44,63 +44,38 @@ const ( var AllInlayHints = map[string]*Hint{ AssignVariableTypes: { Name: AssignVariableTypes, - Doc: `Enable/disable inlay hints for variable types in assign statements: - - i/* int/*, j/* int/* := 0, len(r)-1`, - Run: assignVariableTypes, + Doc: "Enable/disable inlay hints for variable types in assign statements:\n```go\n\ti/* int*/, j/* int*/ := 0, len(r)-1\n```", + Run: assignVariableTypes, }, ParameterNames: { Name: ParameterNames, - Doc: `Enable/disable inlay hints for parameter names: - - parseInt(/* str: */ "123", /* radix: */ 8)`, - Run: parameterNames, + Doc: "Enable/disable inlay hints for parameter names:\n```go\n\tparseInt(/* str: */ \"123\", /* radix: */ 8)\n```", + Run: parameterNames, }, ConstantValues: { Name: ConstantValues, - Doc: `Enable/disable inlay hints for constant values: - - const ( - KindNone Kind = iota/* = 0*/ - KindPrint/* = 1*/ - KindPrintf/* = 2*/ - KindErrorf/* = 3*/ - )`, - Run: constantValues, + Doc: "Enable/disable inlay hints for constant values:\n```go\n\tconst (\n\t\tKindNone Kind = iota/* = 0*/\n\t\tKindPrint/* = 1*/\n\t\tKindPrintf/* = 2*/\n\t\tKindErrorf/* = 3*/\n\t)\n```", + Run: constantValues, }, RangeVariableTypes: { Name: RangeVariableTypes, - Doc: `Enable/disable inlay hints for variable types in range statements: - - for k/* int*/, v/* string/* := range []string{} { - fmt.Println(k, v) - }`, - Run: rangeVariableTypes, + Doc: "Enable/disable inlay hints for variable types in range statements:\n```go\n\tfor k/* int*/, v/* string*/ := range []string{} {\n\t\tfmt.Println(k, v)\n\t}\n```", + Run: rangeVariableTypes, }, CompositeLiteralTypes: { Name: CompositeLiteralTypes, - Doc: `Enable/disable inlay hints for composite literal types: - - for _, c := range []struct { - in, want string - }{ - /*struct{ in string; want string }*/{"Hello, world", "dlrow ,olleH"}, - }`, - Run: compositeLiteralTypes, + Doc: "Enable/disable inlay hints for composite literal types:\n```go\n\tfor _, c := range []struct {\n\t\tin, want string\n\t}{\n\t\t/*struct{ in string; want string }*/{\"Hello, world\", \"dlrow ,olleH\"},\n\t}\n```", + Run: compositeLiteralTypes, }, CompositeLiteralFieldNames: { Name: CompositeLiteralFieldNames, - Doc: `Enable/disable inlay hints for composite literal field names: - - {in: "Hello, world", want: "dlrow ,olleH"}`, - Run: compositeLiteralFields, + Doc: "Enable/disable inlay hints for composite literal field names:\n```go\n\t{/*in: */\"Hello, world\", /*want: */\"dlrow ,olleH\"}\n```", + Run: compositeLiteralFields, }, FunctionTypeParameters: { Name: FunctionTypeParameters, - Doc: `Enable/disable inlay hints for implicit type parameters on generic functions: - - myFoo/*[int, string]*/(1, "hello")`, - Run: funcTypeParams, + Doc: "Enable/disable inlay hints for implicit type parameters on generic functions:\n```go\n\tmyFoo/*[int, string]*/(1, \"hello\")\n```", + Run: funcTypeParams, }, } From 6c8a6c40933532ebe1271a3a5c8ff53bfdb8f1d5 Mon Sep 17 00:00:00 2001 From: Suzy Mueller Date: Tue, 26 Jul 2022 13:16:49 -0400 Subject: [PATCH 132/136] internal/lsp: suppress parameter hint when argument matches parameter Suppress the parameter hint when it would present redundant information. Fixes golang/go#2361 Change-Id: I4340a903046f212f8a035eab847da665e2692f1a Reviewed-on: https://go-review.googlesource.com/c/tools/+/419497 gopls-CI: kokoro TryBot-Result: Gopher Robot Run-TryBot: Suzy Mueller Reviewed-by: Robert Findley --- internal/lsp/source/inlay_hint.go | 15 +++++++++++---- .../lsp/testdata/inlay_hint/parameter_names.go | 5 +++++ .../testdata/inlay_hint/parameter_names.go.golden | 5 +++++ 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/internal/lsp/source/inlay_hint.go b/internal/lsp/source/inlay_hint.go index 6ca51930a7f..4fb1cfb44f8 100644 --- a/internal/lsp/source/inlay_hint.go +++ b/internal/lsp/source/inlay_hint.go @@ -154,17 +154,24 @@ func parameterNames(node ast.Node, tmap *lsppos.TokenMapper, info *types.Info, _ if i > params.Len()-1 { break } - value := params.At(i).Name() + param := params.At(i) // param.Name is empty for built-ins like append - if value == "" { + if param.Name() == "" { continue } + // Skip the parameter name hint if the arg matches the + // the parameter name. + if i, ok := v.(*ast.Ident); ok && i.Name == param.Name() { + continue + } + + label := param.Name() if signature.Variadic() && i == params.Len()-1 { - value = value + "..." + label = label + "..." } hints = append(hints, protocol.InlayHint{ Position: &start, - Label: buildLabel(value + ":"), + Label: buildLabel(label + ":"), Kind: protocol.Parameter, PaddingRight: true, }) diff --git a/internal/lsp/testdata/inlay_hint/parameter_names.go b/internal/lsp/testdata/inlay_hint/parameter_names.go index 6fba23530aa..0d930e5d426 100644 --- a/internal/lsp/testdata/inlay_hint/parameter_names.go +++ b/internal/lsp/testdata/inlay_hint/parameter_names.go @@ -42,4 +42,9 @@ func foobar() { kipp("a", "b", "c") plex("a", "b", "c") tars("a", "b", "c") + foo, bar, baz := "a", "b", "c" + kipp(foo, bar, baz) + plex("a", bar, baz) + tars(foo+foo, (bar), "c") + } diff --git a/internal/lsp/testdata/inlay_hint/parameter_names.go.golden b/internal/lsp/testdata/inlay_hint/parameter_names.go.golden index 46d3ea4e9bf..4e93a4f9268 100644 --- a/internal/lsp/testdata/inlay_hint/parameter_names.go.golden +++ b/internal/lsp/testdata/inlay_hint/parameter_names.go.golden @@ -43,5 +43,10 @@ func foobar() { kipp("a", "b", "c") plex("a", "b", "c") tars("a", "b", "c") + foo< string>, bar< string>, baz< string> := "a", "b", "c" + kipp(foo, bar, baz) + plex("a", bar, baz) + tars(foo+foo, (bar), "c") + } From 8ccb25c9a3d7c598b204d612763dc0192a01952c Mon Sep 17 00:00:00 2001 From: "Hana (Hyang-Ah) Kim" Date: Tue, 26 Jul 2022 14:55:33 -0400 Subject: [PATCH 133/136] internal/lsp: treat struct tags as string type For golang/go#54066 Change-Id: Ia4f0bf0b4d76743a7f4fafc375859db7184753fb Reviewed-on: https://go-review.googlesource.com/c/tools/+/419498 Reviewed-by: Peter Weinberger TryBot-Result: Gopher Robot gopls-CI: kokoro Run-TryBot: Hyang-Ah Hana Kim --- internal/lsp/semantic.go | 5 ----- internal/lsp/testdata/semantic/a.go.golden | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/internal/lsp/semantic.go b/internal/lsp/semantic.go index 429dc0660b2..f0c4a11a4e9 100644 --- a/internal/lsp/semantic.go +++ b/internal/lsp/semantic.go @@ -299,11 +299,6 @@ func (e *encoded) inspector(n ast.Node) bool { what := tokNumber if x.Kind == token.STRING { what = tokString - if _, ok := e.stack[len(e.stack)-2].(*ast.Field); ok { - // struct tags (this is probably pointless, as the - // TextMate grammar will treat all the other comments the same) - what = tokComment - } } e.token(x.Pos(), ln, what, nil) case *ast.BinaryExpr: diff --git a/internal/lsp/testdata/semantic/a.go.golden b/internal/lsp/testdata/semantic/a.go.golden index 19dd412407d..071dd171c84 100644 --- a/internal/lsp/testdata/semantic/a.go.golden +++ b/internal/lsp/testdata/semantic/a.go.golden @@ -27,7 +27,7 @@ ) /*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/A /*⇒6,keyword,[]*/struct { - /*⇒1,variable,[definition]*/X /*⇒3,type,[defaultLibrary]*/int /*⇒6,comment,[]*/`foof` + /*⇒1,variable,[definition]*/X /*⇒3,type,[defaultLibrary]*/int /*⇒6,string,[]*/`foof` } /*⇒4,keyword,[]*/type /*⇒1,type,[definition]*/B /*⇒9,keyword,[]*/interface { /*⇒1,type,[]*/A From f157068c1bcac39caec6a43bfd495c40845cf542 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Fri, 15 Jul 2022 14:51:24 -0400 Subject: [PATCH 134/136] internal/lsp/regtest: allow sharing memoized results across regtests Each regtest does a significant amount of extra work re-doing things like parsing and type-checking the runtime package. We can share this work across regtests by using a shared cache, significantly speeding them up at the cost of potentially hiding bugs related to timing. Sharing this work still retains most of the benefit of the regtests, so implement this in the default mode (formerly called "singleton" and now renamed to "default"). In a subsequent CL, modes will be cleaned up so that "default" is the only mode that runs with -short. Making this change actually revealed a caching bug: our cached package stores error messages extracted from go/packages errors, but does not include these errors in the cache key. Fix this by hashing all metadata errors into the package cache key. Updates golang/go#39384 Change-Id: I37ab9604149d34c9a79fc02b0e1bc23fcb17c454 Reviewed-on: https://go-review.googlesource.com/c/tools/+/417587 TryBot-Result: Gopher Robot gopls-CI: kokoro Run-TryBot: Robert Findley Reviewed-by: Bryan Mills --- gopls/internal/regtest/bench/bench_test.go | 2 +- gopls/internal/regtest/debug/debug_test.go | 2 +- .../regtest/diagnostics/diagnostics_test.go | 14 +- .../regtest/misc/semantictokens_test.go | 2 +- gopls/internal/regtest/misc/vendor_test.go | 2 +- .../internal/regtest/modfile/modfile_test.go | 8 +- .../regtest/workspace/workspace_test.go | 4 +- gopls/internal/vulncheck/command_test.go | 2 +- internal/lsp/cache/cache.go | 33 ++++- internal/lsp/cache/check.go | 10 ++ internal/lsp/cache/session.go | 2 +- internal/lsp/cmd/capabilities_test.go | 2 +- internal/lsp/cmd/cmd.go | 2 +- internal/lsp/cmd/serve.go | 2 +- internal/lsp/cmd/test/cmdtest.go | 2 +- internal/lsp/lsp_test.go | 2 +- internal/lsp/lsprpc/lsprpc_test.go | 6 +- internal/lsp/mod/mod_test.go | 2 +- internal/lsp/regtest/regtest.go | 14 +- internal/lsp/regtest/runner.go | 121 ++++++++++++------ internal/lsp/source/source_test.go | 2 +- internal/memoize/memoize.go | 26 +++- 22 files changed, 182 insertions(+), 80 deletions(-) diff --git a/gopls/internal/regtest/bench/bench_test.go b/gopls/internal/regtest/bench/bench_test.go index 7f0da83fb37..090a51cc968 100644 --- a/gopls/internal/regtest/bench/bench_test.go +++ b/gopls/internal/regtest/bench/bench_test.go @@ -33,7 +33,7 @@ func benchmarkOptions(dir string) []RunOption { // Skip logs as they buffer up memory unnaturally. SkipLogs(), // The Debug server only makes sense if running in singleton mode. - Modes(Singleton), + Modes(Default), // Remove the default timeout. Individual tests should control their // own graceful termination. NoDefaultTimeout(), diff --git a/gopls/internal/regtest/debug/debug_test.go b/gopls/internal/regtest/debug/debug_test.go index d01d44ed980..bae14802eef 100644 --- a/gopls/internal/regtest/debug/debug_test.go +++ b/gopls/internal/regtest/debug/debug_test.go @@ -20,7 +20,7 @@ func TestBugNotification(t *testing.T) { // Verify that a properly configured session gets notified of a bug on the // server. WithOptions( - Modes(Singleton), // must be in-process to receive the bug report below + Modes(Default), // must be in-process to receive the bug report below Settings{"showBugReports": true}, ).Run(t, "", func(t *testing.T, env *Env) { const desc = "got a bug" diff --git a/gopls/internal/regtest/diagnostics/diagnostics_test.go b/gopls/internal/regtest/diagnostics/diagnostics_test.go index 9c9ad368cf3..ac5307e5534 100644 --- a/gopls/internal/regtest/diagnostics/diagnostics_test.go +++ b/gopls/internal/regtest/diagnostics/diagnostics_test.go @@ -298,7 +298,7 @@ func Hello() { t.Run("without workspace module", func(t *testing.T) { WithOptions( - Modes(Singleton), + Modes(Default), ).Run(t, noMod, func(t *testing.T, env *Env) { env.Await( env.DiagnosticAtRegexp("main.go", `"mod.com/bob"`), @@ -1678,7 +1678,7 @@ import ( WithOptions( InGOPATH(), EnvVars{"GO111MODULE": "off"}, - Modes(Singleton), + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { env.Await( env.DiagnosticAtRegexpWithMessage("main.go", `"nosuchpkg"`, `cannot find package "nosuchpkg" in any of`), @@ -1705,7 +1705,7 @@ package b for _, go111module := range []string{"on", "auto"} { t.Run("GO111MODULE="+go111module, func(t *testing.T) { WithOptions( - Modes(Singleton), + Modes(Default), EnvVars{"GO111MODULE": go111module}, ).Run(t, modules, func(t *testing.T, env *Env) { env.OpenFile("a/a.go") @@ -1722,7 +1722,7 @@ package b // Expect no warning if GO111MODULE=auto in a directory in GOPATH. t.Run("GOPATH_GO111MODULE_auto", func(t *testing.T) { WithOptions( - Modes(Singleton), + Modes(Default), EnvVars{"GO111MODULE": "auto"}, InGOPATH(), ).Run(t, modules, func(t *testing.T, env *Env) { @@ -1784,7 +1784,7 @@ func helloHelper() {} ` WithOptions( ProxyFiles(proxy), - Modes(Singleton), + Modes(Default), ).Run(t, nested, func(t *testing.T, env *Env) { // Expect a diagnostic in a nested module. env.OpenFile("nested/hello/hello.go") @@ -1996,7 +1996,7 @@ func Hello() {} ` WithOptions( Settings{"experimentalUseInvalidMetadata": true}, - Modes(Singleton), + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("go.mod") env.RegexpReplace("go.mod", "module mod.com", "modul mod.com") // break the go.mod file @@ -2052,7 +2052,7 @@ func _() {} Settings{"experimentalUseInvalidMetadata": true}, // ExperimentalWorkspaceModule has a different failure mode for this // case. - Modes(Singleton), + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { env.Await( OnceMet( diff --git a/gopls/internal/regtest/misc/semantictokens_test.go b/gopls/internal/regtest/misc/semantictokens_test.go index dca2b8e7514..4437d402d46 100644 --- a/gopls/internal/regtest/misc/semantictokens_test.go +++ b/gopls/internal/regtest/misc/semantictokens_test.go @@ -25,7 +25,7 @@ func main() {} ` WithOptions( - Modes(Singleton), + Modes(Default), Settings{"allExperiments": true}, ).Run(t, src, func(t *testing.T, env *Env) { params := &protocol.SemanticTokensParams{} diff --git a/gopls/internal/regtest/misc/vendor_test.go b/gopls/internal/regtest/misc/vendor_test.go index 4e02799b47a..b0f507aaf32 100644 --- a/gopls/internal/regtest/misc/vendor_test.go +++ b/gopls/internal/regtest/misc/vendor_test.go @@ -49,7 +49,7 @@ func _() { } ` WithOptions( - Modes(Singleton), + Modes(Default), ProxyFiles(basicProxy), ).Run(t, pkgThatUsesVendoring, func(t *testing.T, env *Env) { env.OpenFile("a/a1.go") diff --git a/gopls/internal/regtest/modfile/modfile_test.go b/gopls/internal/regtest/modfile/modfile_test.go index c0bef833f44..e6f76d4d514 100644 --- a/gopls/internal/regtest/modfile/modfile_test.go +++ b/gopls/internal/regtest/modfile/modfile_test.go @@ -742,7 +742,7 @@ func main() { WithOptions( EnvVars{"GOFLAGS": "-mod=readonly"}, ProxyFiles(proxy), - Modes(Singleton), + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("main.go") original := env.ReadWorkspaceFile("go.mod") @@ -922,7 +922,7 @@ func hello() {} // TODO(rFindley) this doesn't work in multi-module workspace mode, because // it keeps around the last parsing modfile. Update this test to also // exercise the workspace module. - Modes(Singleton), + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("go.mod") env.Await(env.DoneWithOpen()) @@ -1090,7 +1090,7 @@ func main() { ` WithOptions( ProxyFiles(workspaceProxy), - Modes(Singleton), + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("go.mod") params := &protocol.PublishDiagnosticsParams{} @@ -1159,7 +1159,7 @@ func main() { ` WithOptions( ProxyFiles(proxy), - Modes(Singleton), + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { env.OpenFile("main.go") d := &protocol.PublishDiagnosticsParams{} diff --git a/gopls/internal/regtest/workspace/workspace_test.go b/gopls/internal/regtest/workspace/workspace_test.go index 7eafaf191dd..e4a1c4b4494 100644 --- a/gopls/internal/regtest/workspace/workspace_test.go +++ b/gopls/internal/regtest/workspace/workspace_test.go @@ -1205,7 +1205,7 @@ package main ` WithOptions( EnvVars{"GOPATH": filepath.FromSlash("$SANDBOX_WORKDIR/gopath")}, - Modes(Singleton), + Modes(Default), ).Run(t, mod, func(t *testing.T, env *Env) { env.Await( // Confirm that the build configuration is seen as valid, @@ -1236,7 +1236,7 @@ package main func main() {} ` WithOptions( - Modes(Singleton), + Modes(Default), ).Run(t, nomod, func(t *testing.T, env *Env) { env.OpenFile("a/main.go") env.OpenFile("b/main.go") diff --git a/gopls/internal/vulncheck/command_test.go b/gopls/internal/vulncheck/command_test.go index e7bf7085f88..71eaf4a580f 100644 --- a/gopls/internal/vulncheck/command_test.go +++ b/gopls/internal/vulncheck/command_test.go @@ -293,7 +293,7 @@ func runTest(t *testing.T, workspaceData, proxyData string, test func(context.Co t.Fatal(err) } - cache := cache.New(nil) + cache := cache.New(nil, nil, nil) session := cache.NewSession(ctx) options := source.DefaultOptions().Clone() tests.DefaultOptions(options) diff --git a/internal/lsp/cache/cache.go b/internal/lsp/cache/cache.go index a59c8908d5a..c002850653b 100644 --- a/internal/lsp/cache/cache.go +++ b/internal/lsp/cache/cache.go @@ -28,23 +28,46 @@ import ( "golang.org/x/tools/internal/span" ) -func New(options func(*source.Options)) *Cache { +// New Creates a new cache for gopls operation results, using the given file +// set, shared store, and session options. +// +// All of the fset, store and options may be nil, but if store is non-nil so +// must be fset (and they must always be used together), otherwise it may be +// possible to get cached data referencing token.Pos values not mapped by the +// FileSet. +func New(fset *token.FileSet, store *memoize.Store, options func(*source.Options)) *Cache { index := atomic.AddInt64(&cacheIndex, 1) + + if store != nil && fset == nil { + panic("non-nil store with nil fset") + } + if fset == nil { + fset = token.NewFileSet() + } + if store == nil { + store = &memoize.Store{} + } + c := &Cache{ id: strconv.FormatInt(index, 10), - fset: token.NewFileSet(), + fset: fset, options: options, + store: store, fileContent: map[span.URI]*fileHandle{}, } return c } type Cache struct { - id string - fset *token.FileSet + id string + fset *token.FileSet + + // TODO(rfindley): it doesn't make sense that cache accepts LSP options, just + // so that it can create a session: the cache does not (and should not) + // depend on options. Invert this relationship to remove options from Cache. options func(*source.Options) - store memoize.Store + store *memoize.Store fileMu sync.Mutex fileContent map[span.URI]*fileHandle diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index 4caf4ba6fa7..6c02d5348f8 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -249,6 +249,16 @@ func computePackageKey(id PackageID, files []source.FileHandle, m *KnownMetadata for _, file := range files { b.WriteString(file.FileIdentity().String()) } + // Metadata errors are interpreted and memoized on the computed package, so + // we must hash them into the key here. + // + // TODO(rfindley): handle metadata diagnostics independently from + // type-checking diagnostics. + for _, err := range m.Errors { + b.WriteString(err.Msg) + b.WriteString(err.Pos) + b.WriteRune(rune(err.Kind)) + } return packageHandleKey(source.HashOf(b.Bytes())) } diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 2374a528757..a27efe334fd 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -231,7 +231,7 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, backgroundCtx: backgroundCtx, cancel: cancel, initializeOnce: &sync.Once{}, - store: &s.cache.store, + store: s.cache.store, packages: persistent.NewMap(packageKeyLessInterface), meta: &metadataGraph{}, files: newFilesMap(), diff --git a/internal/lsp/cmd/capabilities_test.go b/internal/lsp/cmd/capabilities_test.go index 1d01b4bd0d7..930621bf307 100644 --- a/internal/lsp/cmd/capabilities_test.go +++ b/internal/lsp/cmd/capabilities_test.go @@ -43,7 +43,7 @@ func TestCapabilities(t *testing.T) { params.Capabilities.Workspace.Configuration = true // Send an initialize request to the server. - c.Server = lsp.NewServer(cache.New(app.options).NewSession(ctx), c.Client) + c.Server = lsp.NewServer(cache.New(nil, nil, app.options).NewSession(ctx), c.Client) result, err := c.Server.Initialize(ctx, params) if err != nil { t.Fatal(err) diff --git a/internal/lsp/cmd/cmd.go b/internal/lsp/cmd/cmd.go index a81eb839535..5911f97d1c1 100644 --- a/internal/lsp/cmd/cmd.go +++ b/internal/lsp/cmd/cmd.go @@ -286,7 +286,7 @@ func (app *Application) connect(ctx context.Context) (*connection, error) { switch { case app.Remote == "": connection := newConnection(app) - connection.Server = lsp.NewServer(cache.New(app.options).NewSession(ctx), connection.Client) + connection.Server = lsp.NewServer(cache.New(nil, nil, app.options).NewSession(ctx), connection.Client) ctx = protocol.WithClient(ctx, connection.Client) return connection, connection.initialize(ctx, app.options) case strings.HasPrefix(app.Remote, "internal@"): diff --git a/internal/lsp/cmd/serve.go b/internal/lsp/cmd/serve.go index 1c229a422b4..10730fd89cd 100644 --- a/internal/lsp/cmd/serve.go +++ b/internal/lsp/cmd/serve.go @@ -101,7 +101,7 @@ func (s *Serve) Run(ctx context.Context, args ...string) error { return fmt.Errorf("creating forwarder: %w", err) } } else { - ss = lsprpc.NewStreamServer(cache.New(s.app.options), isDaemon) + ss = lsprpc.NewStreamServer(cache.New(nil, nil, s.app.options), isDaemon) } var network, addr string diff --git a/internal/lsp/cmd/test/cmdtest.go b/internal/lsp/cmd/test/cmdtest.go index ff0461b333f..5342e9b7faf 100644 --- a/internal/lsp/cmd/test/cmdtest.go +++ b/internal/lsp/cmd/test/cmdtest.go @@ -50,7 +50,7 @@ func TestCommandLine(t *testing.T, testdata string, options func(*source.Options func NewTestServer(ctx context.Context, options func(*source.Options)) *servertest.TCPServer { ctx = debug.WithInstance(ctx, "", "") - cache := cache.New(options) + cache := cache.New(nil, nil, options) ss := lsprpc.NewStreamServer(cache, false) return servertest.NewTCPServer(ctx, ss, nil) } diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go index 8e0e628dfea..53890dc616b 100644 --- a/internal/lsp/lsp_test.go +++ b/internal/lsp/lsp_test.go @@ -49,7 +49,7 @@ type runner struct { func testLSP(t *testing.T, datum *tests.Data) { ctx := tests.Context(t) - cache := cache.New(nil) + cache := cache.New(nil, nil, nil) session := cache.NewSession(ctx) options := source.DefaultOptions().Clone() tests.DefaultOptions(options) diff --git a/internal/lsp/lsprpc/lsprpc_test.go b/internal/lsp/lsprpc/lsprpc_test.go index cde641c920b..498566d1bbd 100644 --- a/internal/lsp/lsprpc/lsprpc_test.go +++ b/internal/lsp/lsprpc/lsprpc_test.go @@ -58,7 +58,7 @@ func TestClientLogging(t *testing.T) { client := FakeClient{Logs: make(chan string, 10)} ctx = debug.WithInstance(ctx, "", "") - ss := NewStreamServer(cache.New(nil), false) + ss := NewStreamServer(cache.New(nil, nil, nil), false) ss.serverForTest = server ts := servertest.NewPipeServer(ss, nil) defer checkClose(t, ts.Close) @@ -121,7 +121,7 @@ func checkClose(t *testing.T, closer func() error) { func setupForwarding(ctx context.Context, t *testing.T, s protocol.Server) (direct, forwarded servertest.Connector, cleanup func()) { t.Helper() serveCtx := debug.WithInstance(ctx, "", "") - ss := NewStreamServer(cache.New(nil), false) + ss := NewStreamServer(cache.New(nil, nil, nil), false) ss.serverForTest = s tsDirect := servertest.NewTCPServer(serveCtx, ss, nil) @@ -216,7 +216,7 @@ func TestDebugInfoLifecycle(t *testing.T) { clientCtx := debug.WithInstance(baseCtx, "", "") serverCtx := debug.WithInstance(baseCtx, "", "") - cache := cache.New(nil) + cache := cache.New(nil, nil, nil) ss := NewStreamServer(cache, false) tsBackend := servertest.NewTCPServer(serverCtx, ss, nil) diff --git a/internal/lsp/mod/mod_test.go b/internal/lsp/mod/mod_test.go index 09a182d16d7..56af9860b9d 100644 --- a/internal/lsp/mod/mod_test.go +++ b/internal/lsp/mod/mod_test.go @@ -26,7 +26,7 @@ func TestModfileRemainsUnchanged(t *testing.T) { testenv.NeedsGo1Point(t, 14) ctx := tests.Context(t) - cache := cache.New(nil) + cache := cache.New(nil, nil, nil) session := cache.NewSession(ctx) options := source.DefaultOptions().Clone() tests.DefaultOptions(options) diff --git a/internal/lsp/regtest/regtest.go b/internal/lsp/regtest/regtest.go index 9ebc673f8c0..b3a543d531e 100644 --- a/internal/lsp/regtest/regtest.go +++ b/internal/lsp/regtest/regtest.go @@ -8,6 +8,7 @@ import ( "context" "flag" "fmt" + "go/token" "io/ioutil" "os" "runtime" @@ -16,6 +17,7 @@ import ( "golang.org/x/tools/internal/lsp/cmd" "golang.org/x/tools/internal/lsp/source" + "golang.org/x/tools/internal/memoize" "golang.org/x/tools/internal/testenv" "golang.org/x/tools/internal/tool" ) @@ -87,9 +89,11 @@ var slowGOOS = map[string]bool{ } func DefaultModes() Mode { - normal := Singleton | Experimental + // TODO(rfindley): these modes should *not* depend on GOOS. Depending on + // testing.Short() should be sufficient. + normal := Default | Experimental if slowGOOS[runtime.GOOS] && testing.Short() { - normal = Singleton + normal = Default } if *runSubprocessTests { return normal | SeparateProcess @@ -116,6 +120,8 @@ func Main(m *testing.M, hook func(*source.Options)) { PrintGoroutinesOnFailure: *printGoroutinesOnFailure, SkipCleanup: *skipCleanup, OptionsHook: hook, + fset: token.NewFileSet(), + store: memoize.NewStore(memoize.NeverEvict), } if *runSubprocessTests { goplsPath := *goplsBinaryPath @@ -126,13 +132,13 @@ func Main(m *testing.M, hook func(*source.Options)) { panic(fmt.Sprintf("finding test binary path: %v", err)) } } - runner.GoplsPath = goplsPath + runner.goplsPath = goplsPath } dir, err := ioutil.TempDir("", "gopls-regtest-") if err != nil { panic(fmt.Errorf("creating regtest temp directory: %v", err)) } - runner.TempDir = dir + runner.tempDir = dir code := m.Run() if err := runner.Close(); err != nil { diff --git a/internal/lsp/regtest/runner.go b/internal/lsp/regtest/runner.go index b2992e99392..6c96e61dbc6 100644 --- a/internal/lsp/regtest/runner.go +++ b/internal/lsp/regtest/runner.go @@ -8,6 +8,7 @@ import ( "bytes" "context" "fmt" + "go/token" "io" "io/ioutil" "net" @@ -29,24 +30,58 @@ import ( "golang.org/x/tools/internal/lsp/lsprpc" "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/lsp/source" + "golang.org/x/tools/internal/memoize" "golang.org/x/tools/internal/testenv" "golang.org/x/tools/internal/xcontext" ) // Mode is a bitmask that defines for which execution modes a test should run. +// +// Each mode controls several aspects of gopls' configuration: +// - Which server options to use for gopls sessions +// - Whether to use a shared cache +// - Whether to use a shared server +// - Whether to run the server in-process or in a separate process +// +// The behavior of each mode with respect to these aspects is summarized below. +// TODO(rfindley, cleanup): rather than using arbitrary names for these modes, +// we can compose them explicitly out of the features described here, allowing +// individual tests more freedom in constructing problematic execution modes. +// For example, a test could assert on a certain behavior when running with +// experimental options on a separate process. Moreover, we could unify 'Modes' +// with 'Options', and use RunMultiple rather than a hard-coded loop through +// modes. +// +// Mode | Options | Shared Cache? | Shared Server? | In-process? +// --------------------------------------------------------------------------- +// Default | Default | Y | N | Y +// Forwarded | Default | Y | Y | Y +// SeparateProcess | Default | Y | Y | N +// Experimental | Experimental | N | N | Y type Mode int const ( - // Singleton mode uses a separate in-process gopls instance for each test, - // and communicates over pipes to mimic the gopls sidecar execution mode, - // which communicates over stdin/stderr. - Singleton Mode = 1 << iota - // Forwarded forwards connections to a shared in-process gopls instance. + // Default mode runs gopls with the default options, communicating over pipes + // to emulate the lsp sidecar execution mode, which communicates over + // stdin/stdout. + // + // It uses separate servers for each test, but a shared cache, to avoid + // duplicating work when processing GOROOT. + Default Mode = 1 << iota + + // Forwarded uses the default options, but forwards connections to a shared + // in-process gopls server. Forwarded - // SeparateProcess forwards connection to a shared separate gopls process. + + // SeparateProcess uses the default options, but forwards connection to an + // external gopls daemon. SeparateProcess + // Experimental enables all of the experimental configurations that are - // being developed. + // being developed, and runs gopls in sidecar mode. + // + // It uses a separate cache for each test, to exercise races that may only + // appear with cache misses. Experimental ) @@ -55,14 +90,20 @@ const ( // remote), any tests that execute on the same Runner will share the same // state. type Runner struct { - DefaultModes Mode - Timeout time.Duration - GoplsPath string - PrintGoroutinesOnFailure bool - TempDir string - SkipCleanup bool - OptionsHook func(*source.Options) - + // Configuration + DefaultModes Mode // modes to run for each test + Timeout time.Duration // per-test timeout, if set + PrintGoroutinesOnFailure bool // whether to dump goroutines on test failure + SkipCleanup bool // if set, don't delete test data directories when the test exits + OptionsHook func(*source.Options) // if set, use these options when creating gopls sessions + + // Immutable state shared across test invocations + goplsPath string // path to the gopls executable (for SeparateProcess mode) + tempDir string // shared parent temp directory + fset *token.FileSet // shared FileSet + store *memoize.Store // shared store + + // Lazily allocated resources mu sync.Mutex ts *servertest.TCPServer socketDir string @@ -193,7 +234,7 @@ func InGOPATH() RunOption { } // DebugAddress configures a debug server bound to addr. This option is -// currently only supported when executing in Singleton mode. It is intended to +// currently only supported when executing in Default mode. It is intended to // be used for long-running stress tests. func DebugAddress(addr string) RunOption { return optionSetter(func(opts *runConfig) { @@ -252,10 +293,10 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio mode Mode getServer func(*testing.T, func(*source.Options)) jsonrpc2.StreamServer }{ - {"singleton", Singleton, singletonServer}, + {"default", Default, r.defaultServer}, {"forwarded", Forwarded, r.forwardedServer}, {"separate_process", SeparateProcess, r.separateProcessServer}, - {"experimental", Experimental, experimentalServer}, + {"experimental", Experimental, r.experimentalServer}, } for _, tc := range tests { @@ -267,10 +308,10 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio if config.modes&tc.mode == 0 { continue } - if config.debugAddr != "" && tc.mode != Singleton { + if config.debugAddr != "" && tc.mode != Default { // Debugging is useful for running stress tests, but since the daemon has // likely already been started, it would be too late to debug. - t.Fatalf("debugging regtest servers only works in Singleton mode, "+ + t.Fatalf("debugging regtest servers only works in Default mode, "+ "got debug addr %q and mode %v", config.debugAddr, tc.mode) } @@ -298,7 +339,7 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio di.MonitorMemory(ctx) } - rootDir := filepath.Join(r.TempDir, filepath.FromSlash(t.Name())) + rootDir := filepath.Join(r.tempDir, filepath.FromSlash(t.Name())) if err := os.MkdirAll(rootDir, 0755); err != nil { t.Fatal(err) } @@ -434,11 +475,13 @@ func (s *loggingFramer) printBuffers(testname string, w io.Writer) { fmt.Fprintf(os.Stderr, "#### End Gopls Test Logs for %q\n", testname) } -func singletonServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { - return lsprpc.NewStreamServer(cache.New(optsHook), false) +// defaultServer handles the Default execution mode. +func (r *Runner) defaultServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { + return lsprpc.NewStreamServer(cache.New(r.fset, r.store, optsHook), false) } -func experimentalServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { +// experimentalServer handles the Experimental execution mode. +func (r *Runner) experimentalServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { options := func(o *source.Options) { optsHook(o) o.EnableAllExperiments() @@ -446,28 +489,23 @@ func experimentalServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.S // source.Options.EnableAllExperiments, but we want to test it. o.ExperimentalWorkspaceModule = true } - return lsprpc.NewStreamServer(cache.New(options), false) -} - -func (r *Runner) forwardedServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { - ts := r.getTestServer(optsHook) - return newForwarder("tcp", ts.Addr) + return lsprpc.NewStreamServer(cache.New(nil, nil, options), false) } -// getTestServer gets the shared test server instance to connect to, or creates -// one if it doesn't exist. -func (r *Runner) getTestServer(optsHook func(*source.Options)) *servertest.TCPServer { - r.mu.Lock() - defer r.mu.Unlock() +// forwardedServer handles the Forwarded execution mode. +func (r *Runner) forwardedServer(_ *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { if r.ts == nil { + r.mu.Lock() ctx := context.Background() ctx = debug.WithInstance(ctx, "", "off") - ss := lsprpc.NewStreamServer(cache.New(optsHook), false) + ss := lsprpc.NewStreamServer(cache.New(nil, nil, optsHook), false) r.ts = servertest.NewTCPServer(ctx, ss, nil) + r.mu.Unlock() } - return r.ts + return newForwarder("tcp", r.ts.Addr) } +// separateProcessServer handles the SeparateProcess execution mode. func (r *Runner) separateProcessServer(t *testing.T, optsHook func(*source.Options)) jsonrpc2.StreamServer { // TODO(rfindley): can we use the autostart behavior here, instead of // pre-starting the remote? @@ -498,21 +536,22 @@ func (r *Runner) getRemoteSocket(t *testing.T) string { return filepath.Join(r.socketDir, daemonFile) } - if r.GoplsPath == "" { + if r.goplsPath == "" { t.Fatal("cannot run tests with a separate process unless a path to a gopls binary is configured") } var err error - r.socketDir, err = ioutil.TempDir(r.TempDir, "gopls-regtest-socket") + r.socketDir, err = ioutil.TempDir(r.tempDir, "gopls-regtest-socket") if err != nil { t.Fatalf("creating tempdir: %v", err) } socket := filepath.Join(r.socketDir, daemonFile) args := []string{"serve", "-listen", "unix;" + socket, "-listen.timeout", "10s"} - cmd := exec.Command(r.GoplsPath, args...) + cmd := exec.Command(r.goplsPath, args...) cmd.Env = append(os.Environ(), runTestAsGoplsEnvvar+"=true") var stderr bytes.Buffer cmd.Stderr = &stderr go func() { + // TODO(rfindley): this is racy; we're returning before we know that the command is running. if err := cmd.Run(); err != nil { panic(fmt.Sprintf("error running external gopls: %v\nstderr:\n%s", err, stderr.String())) } @@ -537,7 +576,7 @@ func (r *Runner) Close() error { } } if !r.SkipCleanup { - if err := os.RemoveAll(r.TempDir); err != nil { + if err := os.RemoveAll(r.tempDir); err != nil { errmsgs = append(errmsgs, err.Error()) } } diff --git a/internal/lsp/source/source_test.go b/internal/lsp/source/source_test.go index c670bdeb0b2..5fdcc0f86ea 100644 --- a/internal/lsp/source/source_test.go +++ b/internal/lsp/source/source_test.go @@ -49,7 +49,7 @@ type runner struct { func testSource(t *testing.T, datum *tests.Data) { ctx := tests.Context(t) - cache := cache.New(nil) + cache := cache.New(nil, nil, nil) session := cache.NewSession(ctx) options := source.DefaultOptions().Clone() tests.DefaultOptions(options) diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index aa4d58d2f26..150c79af852 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -236,12 +236,34 @@ func (p *Promise) wait(ctx context.Context) (interface{}, error) { } } +// An EvictionPolicy controls the eviction behavior of keys in a Store when +// they no longer have any references. +type EvictionPolicy int + +const ( + // ImmediatelyEvict evicts keys as soon as they no longer have references. + ImmediatelyEvict EvictionPolicy = iota + + // NeverEvict does not evict keys. + NeverEvict +) + // A Store maps arbitrary keys to reference-counted promises. +// +// The zero value is a valid Store, though a store may also be created via +// NewStore if a custom EvictionPolicy is required. type Store struct { + evictionPolicy EvictionPolicy + promisesMu sync.Mutex promises map[interface{}]*Promise } +// NewStore creates a new store with the given eviction policy. +func NewStore(policy EvictionPolicy) *Store { + return &Store{evictionPolicy: policy} +} + // Promise returns a reference-counted promise for the future result of // calling the specified function. // @@ -264,7 +286,9 @@ func (store *Store) Promise(key interface{}, function Function) (*Promise, func( store.promisesMu.Unlock() release := func() { - if atomic.AddInt32(&p.refcount, -1) == 0 { + // TODO(rfindley): this looks racy: it's possible that the refcount is + // incremented before we grab the lock. + if atomic.AddInt32(&p.refcount, -1) == 0 && store.evictionPolicy != NeverEvict { store.promisesMu.Lock() delete(store.promises, key) store.promisesMu.Unlock() From 39a4e36475d99fd719f5d62f870e662e057c7ad2 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Fri, 15 Jul 2022 14:58:24 -0400 Subject: [PATCH 135/136] internal/lsp/regtest: only run /default tests with -short Due to shared caching, the "default" tests can run faster than other execution modes and still retain most of the test coverage. Update the test runner to only run the singleton tests if testing.Short() is true, independent of GOOS. On the other hand, we lost noticeable coverage when disabling the Forwarded testing mode. Now that the regtests are lighter weight in general, reenable it on the longtests builders. While at it, clean up tests that used the server-side Options hook to instead use Settings, since clients would configure their server via Settings and Options can't work with a shared daemon. Updates golang/go#39384 Change-Id: I33e8b746188d795e88841727e6b7116cd4851dc2 Reviewed-on: https://go-review.googlesource.com/c/tools/+/418774 Reviewed-by: Bryan Mills TryBot-Result: Gopher Robot Run-TryBot: Robert Findley gopls-CI: kokoro --- .../completion/postfix_snippet_test.go | 9 +-- gopls/internal/regtest/misc/shared_test.go | 33 ++++++----- .../regtest/workspace/workspace_test.go | 43 +++++--------- internal/lsp/regtest/regtest.go | 23 +++----- internal/lsp/regtest/runner.go | 58 +++++++++++-------- 5 files changed, 79 insertions(+), 87 deletions(-) diff --git a/gopls/internal/regtest/completion/postfix_snippet_test.go b/gopls/internal/regtest/completion/postfix_snippet_test.go index 5a7ffb80d26..54860474e73 100644 --- a/gopls/internal/regtest/completion/postfix_snippet_test.go +++ b/gopls/internal/regtest/completion/postfix_snippet_test.go @@ -9,7 +9,6 @@ import ( "testing" . "golang.org/x/tools/internal/lsp/regtest" - "golang.org/x/tools/internal/lsp/source" ) func TestPostfixSnippetCompletion(t *testing.T) { @@ -433,9 +432,11 @@ func foo() string { }, } - r := WithOptions(Options(func(o *source.Options) { - o.ExperimentalPostfixCompletions = true - })) + r := WithOptions( + Settings{ + "experimentalPostfixCompletions": true, + }, + ) r.Run(t, mod, func(t *testing.T, env *Env) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { diff --git a/gopls/internal/regtest/misc/shared_test.go b/gopls/internal/regtest/misc/shared_test.go index 6b5acd02f71..170e164c94c 100644 --- a/gopls/internal/regtest/misc/shared_test.go +++ b/gopls/internal/regtest/misc/shared_test.go @@ -24,7 +24,10 @@ func main() { fmt.Println("Hello World.") }` -func runShared(t *testing.T, testFunc func(env1 *Env, env2 *Env)) { +// runShared is a helper to run a test in the same directory using both the +// original env, and an additional other environment connected to the same +// server. +func runShared(t *testing.T, testFunc func(origEnv *Env, otherEnv *Env)) { // Only run these tests in forwarded modes. modes := DefaultModes() & (Forwarded | SeparateProcess) WithOptions(Modes(modes)).Run(t, sharedProgram, func(t *testing.T, env1 *Env) { @@ -38,28 +41,32 @@ func runShared(t *testing.T, testFunc func(env1 *Env, env2 *Env)) { } func TestSimultaneousEdits(t *testing.T) { - runShared(t, func(env1 *Env, env2 *Env) { + runShared(t, func(origEnv *Env, otherEnv *Env) { // In editor #1, break fmt.Println as before. - env1.OpenFile("main.go") - env1.RegexpReplace("main.go", "Printl(n)", "") + origEnv.OpenFile("main.go") + origEnv.RegexpReplace("main.go", "Printl(n)", "") // In editor #2 remove the closing brace. - env2.OpenFile("main.go") - env2.RegexpReplace("main.go", "\\)\n(})", "") + otherEnv.OpenFile("main.go") + otherEnv.RegexpReplace("main.go", "\\)\n(})", "") // Now check that we got different diagnostics in each environment. - env1.Await(env1.DiagnosticAtRegexp("main.go", "Printl")) - env2.Await(env2.DiagnosticAtRegexp("main.go", "$")) + origEnv.Await(origEnv.DiagnosticAtRegexp("main.go", "Printl")) + otherEnv.Await(otherEnv.DiagnosticAtRegexp("main.go", "$")) }) } func TestShutdown(t *testing.T) { - runShared(t, func(env1 *Env, env2 *Env) { - if err := env1.Editor.Close(env1.Ctx); err != nil { + runShared(t, func(origEnv *Env, otherEnv *Env) { + // Close otherEnv, and verify that operation in the original environment is + // unaffected. Note: 'otherEnv' must be the environment being closed here. + // If we were to instead close 'env' here, we'd run into a duplicate + // shutdown when the test runner closes the original env. + if err := otherEnv.Editor.Close(otherEnv.Ctx); err != nil { t.Errorf("closing first editor: %v", err) } // Now make an edit in editor #2 to trigger diagnostics. - env2.OpenFile("main.go") - env2.RegexpReplace("main.go", "\\)\n(})", "") - env2.Await(env2.DiagnosticAtRegexp("main.go", "$")) + origEnv.OpenFile("main.go") + origEnv.RegexpReplace("main.go", "\\)\n(})", "") + origEnv.Await(origEnv.DiagnosticAtRegexp("main.go", "$")) }) } diff --git a/gopls/internal/regtest/workspace/workspace_test.go b/gopls/internal/regtest/workspace/workspace_test.go index e4a1c4b4494..deb8a83695f 100644 --- a/gopls/internal/regtest/workspace/workspace_test.go +++ b/gopls/internal/regtest/workspace/workspace_test.go @@ -15,7 +15,6 @@ import ( "golang.org/x/tools/internal/lsp/bug" "golang.org/x/tools/internal/lsp/fake" "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/lsp/source" "golang.org/x/tools/internal/testenv" . "golang.org/x/tools/internal/lsp/regtest" @@ -138,36 +137,22 @@ func TestReferences(t *testing.T) { } } -// make sure that directory filters work -func TestFilters(t *testing.T) { - for _, tt := range []struct { - name, rootPath string - }{ - { - name: "module root", - rootPath: "pkg", +func TestDirectoryFilters(t *testing.T) { + WithOptions( + ProxyFiles(workspaceProxy), + WorkspaceFolders("pkg"), + Settings{ + "directoryFilters": []string{"-inner"}, }, - } { - t.Run(tt.name, func(t *testing.T) { - opts := []RunOption{ProxyFiles(workspaceProxy)} - if tt.rootPath != "" { - opts = append(opts, WorkspaceFolders(tt.rootPath)) - } - f := func(o *source.Options) { - o.DirectoryFilters = append(o.DirectoryFilters, "-inner") + ).Run(t, workspaceModule, func(t *testing.T, env *Env) { + syms := env.WorkspaceSymbol("Hi") + sort.Slice(syms, func(i, j int) bool { return syms[i].ContainerName < syms[j].ContainerName }) + for _, s := range syms { + if strings.Contains(s.ContainerName, "inner") { + t.Errorf("WorkspaceSymbol: found symbol %q with container %q, want \"inner\" excluded", s.Name, s.ContainerName) } - opts = append(opts, Options(f)) - WithOptions(opts...).Run(t, workspaceModule, func(t *testing.T, env *Env) { - syms := env.WorkspaceSymbol("Hi") - sort.Slice(syms, func(i, j int) bool { return syms[i].ContainerName < syms[j].ContainerName }) - for i, s := range syms { - if strings.Contains(s.ContainerName, "/inner") { - t.Errorf("%s %v %s %s %d\n", s.Name, s.Kind, s.ContainerName, tt.name, i) - } - } - }) - }) - } + } + }) } // Make sure that analysis diagnostics are cleared for the whole package when diff --git a/internal/lsp/regtest/regtest.go b/internal/lsp/regtest/regtest.go index b3a543d531e..cfd999dfa59 100644 --- a/internal/lsp/regtest/regtest.go +++ b/internal/lsp/regtest/regtest.go @@ -79,26 +79,17 @@ func (r RunMultiple) Run(t *testing.T, files string, f TestFunc) { } } -// The regtests run significantly slower on these operating systems, due to (we -// believe) kernel locking behavior. Only run in singleton mode on these -// operating system when using -short. -var slowGOOS = map[string]bool{ - "darwin": true, - "openbsd": true, - "plan9": true, -} - +// DefaultModes returns the default modes to run for each regression test (they +// may be reconfigured by the tests themselves). func DefaultModes() Mode { - // TODO(rfindley): these modes should *not* depend on GOOS. Depending on - // testing.Short() should be sufficient. - normal := Default | Experimental - if slowGOOS[runtime.GOOS] && testing.Short() { - normal = Default + modes := Default + if !testing.Short() { + modes |= Experimental | Forwarded } if *runSubprocessTests { - return normal | SeparateProcess + modes |= SeparateProcess } - return normal + return modes } // Main sets up and tears down the shared regtest state. diff --git a/internal/lsp/regtest/runner.go b/internal/lsp/regtest/runner.go index 6c96e61dbc6..1d22aaa9d85 100644 --- a/internal/lsp/regtest/runner.go +++ b/internal/lsp/regtest/runner.go @@ -85,6 +85,21 @@ const ( Experimental ) +func (m Mode) String() string { + switch m { + case Default: + return "default" + case Forwarded: + return "forwarded" + case SeparateProcess: + return "separate process" + case Experimental: + return "experimental" + default: + return "unknown mode" + } +} + // A Runner runs tests in gopls execution environments, as specified by its // modes. For modes that share state (for example, a shared cache or common // remote), any tests that execute on the same Runner will share the same @@ -117,14 +132,6 @@ type runConfig struct { debugAddr string skipLogs bool skipHooks bool - optionsHook func(*source.Options) -} - -func (r *Runner) defaultConfig() *runConfig { - return &runConfig{ - modes: r.DefaultModes, - optionsHook: r.OptionsHook, - } } // A RunOption augments the behavior of the test runner. @@ -155,22 +162,16 @@ func ProxyFiles(txt string) RunOption { } // Modes configures the execution modes that the test should run in. +// +// By default, modes are configured by the test runner. If this option is set, +// it overrides the set of default modes and the test runs in exactly these +// modes. func Modes(modes Mode) RunOption { return optionSetter(func(opts *runConfig) { - opts.modes = modes - }) -} - -// Options configures the various server and user options. -func Options(hook func(*source.Options)) RunOption { - return optionSetter(func(opts *runConfig) { - old := opts.optionsHook - opts.optionsHook = func(o *source.Options) { - if old != nil { - old(o) - } - hook(o) + if opts.modes != 0 { + panic("modes set more than once") } + opts.modes = modes }) } @@ -301,13 +302,18 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio for _, tc := range tests { tc := tc - config := r.defaultConfig() + var config runConfig for _, opt := range opts { - opt.set(config) + opt.set(&config) } - if config.modes&tc.mode == 0 { + modes := r.DefaultModes + if config.modes != 0 { + modes = config.modes + } + if modes&tc.mode == 0 { continue } + if config.debugAddr != "" && tc.mode != Default { // Debugging is useful for running stress tests, but since the daemon has // likely already been started, it would be too late to debug. @@ -364,7 +370,9 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio } } }() - ss := tc.getServer(t, config.optionsHook) + + ss := tc.getServer(t, r.OptionsHook) + framer := jsonrpc2.NewRawStream ls := &loggingFramer{} if !config.skipLogs { From b3b5c13b291f9653da6f31b95db100a2e26bd186 Mon Sep 17 00:00:00 2001 From: Robert Findley Date: Tue, 26 Jul 2022 17:01:06 -0400 Subject: [PATCH 136/136] internal/lsp/cache: invalidate packages with missing deps when files are added Add logic to invalidate any packages with missing dependencies when a file is added. Also fix a latent bug overwriting 'anyFileOpenenedOrClosed' for each loop iteration. Fixes golang/go#54073 Change-Id: I000ceb354885bd4863a1dfdda09e4d5f0e5481f3 Reviewed-on: https://go-review.googlesource.com/c/tools/+/419501 Run-TryBot: Robert Findley Reviewed-by: Suzy Mueller gopls-CI: kokoro TryBot-Result: Gopher Robot --- gopls/internal/regtest/watch/watch_test.go | 4 +--- internal/lsp/cache/snapshot.go | 22 +++++++++++++++++++--- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/gopls/internal/regtest/watch/watch_test.go b/gopls/internal/regtest/watch/watch_test.go index 04414f6b744..2890f401a90 100644 --- a/gopls/internal/regtest/watch/watch_test.go +++ b/gopls/internal/regtest/watch/watch_test.go @@ -199,14 +199,12 @@ func _() { } ` Run(t, missing, func(t *testing.T, env *Env) { - t.Skip("the initial workspace load fails and never retries") - env.Await( env.DiagnosticAtRegexp("a/a.go", "\"mod.com/c\""), ) env.WriteWorkspaceFile("c/c.go", `package c; func C() {};`) env.Await( - EmptyDiagnostics("c/c.go"), + EmptyDiagnostics("a/a.go"), ) }) } diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 64e7f17c994..b183bc5f88f 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -1695,8 +1695,10 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC // Compute invalidations based on file changes. changedPkgFiles := map[PackageID]bool{} // packages whose file set may have changed - anyImportDeleted := false - anyFileOpenedOrClosed := false + anyImportDeleted := false // import deletions can resolve cycles + anyFileOpenedOrClosed := false // opened files affect workspace packages + anyFileAdded := false // adding a file can resolve missing dependencies + for uri, change := range changes { // Maybe reinitialize the view if we see a change in the vendor // directory. @@ -1709,7 +1711,8 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC var originalOpen, newOpen bool _, originalOpen = originalFH.(*overlay) _, newOpen = change.fileHandle.(*overlay) - anyFileOpenedOrClosed = originalOpen != newOpen + anyFileOpenedOrClosed = anyFileOpenedOrClosed || (originalOpen != newOpen) + anyFileAdded = anyFileAdded || (originalFH == nil && change.fileHandle != nil) // If uri is a Go file, check if it has changed in a way that would // invalidate metadata. Note that we can't use s.view.FileKind here, @@ -1779,6 +1782,19 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC } } + // Adding a file can resolve missing dependencies from existing packages. + // + // We could be smart here and try to guess which packages may have been + // fixed, but until that proves necessary, just invalidate metadata for any + // package with missing dependencies. + if anyFileAdded { + for id, metadata := range s.meta.metadata { + if len(metadata.MissingDeps) > 0 { + directIDs[id] = true + } + } + } + // Invalidate reverse dependencies too. // idsToInvalidate keeps track of transitive reverse dependencies. // If an ID is present in the map, invalidate its types.