diff --git a/.golangci.yml b/.golangci.yml index 221bb82007..e238275bb5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -100,6 +100,11 @@ issues: # trip this off. path: private/buf/bufcli/env.go text: "G101:" + - linters: + - gosec + # G404 checks for use of the ordinary non-CPRNG. + path: private/buf/buflsp/progress.go + text: "G404:" - linters: - containedctx # Type must implement an interface whose methods do not accept context. But this diff --git a/CHANGELOG.md b/CHANGELOG.md index 238176ed97..72af014b74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ## [Unreleased] -- No changes yet. +- Add new experimental LSP support under `buf beta lsp`. ## [v1.41.0] - 2024-09-11 diff --git a/go.mod b/go.mod index 4480c4e7fa..35b64507b8 100644 --- a/go.mod +++ b/go.mod @@ -34,6 +34,8 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 + go.lsp.dev/jsonrpc2 v0.10.0 + go.lsp.dev/protocol v0.12.0 go.opentelemetry.io/otel v1.30.0 go.opentelemetry.io/otel/sdk v1.30.0 go.opentelemetry.io/otel/trace v1.30.0 @@ -106,9 +108,13 @@ require ( github.com/quic-go/qpack v0.5.1 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/segmentio/asm v1.1.3 // indirect + github.com/segmentio/encoding v0.3.4 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/vbatts/tar-split v0.11.5 // indirect + go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 // indirect + go.lsp.dev/uri v0.3.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect go.opentelemetry.io/otel/metric v1.30.0 // indirect diff --git a/go.sum b/go.sum index 42698b1694..2ea38e04c3 100644 --- a/go.sum +++ b/go.sum @@ -230,6 +230,10 @@ github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/segmentio/asm v1.1.3 h1:WM03sfUOENvvKexOLp+pCqgb/WDjsi7EK8gIsICtzhc= +github.com/segmentio/asm v1.1.3/go.mod h1:Ld3L4ZXGNcSLRg4JBsZ3//1+f/TjYl0Mzen/DQy1EJg= +github.com/segmentio/encoding v0.3.4 h1:WM4IBnxH8B9TakiM2QD5LyNl9JSndh88QbHqVC+Pauc= +github.com/segmentio/encoding v0.3.4/go.mod h1:n0JeuIqEQrQoPDGsjo8UNd1iA0U8d8+oHAA4E3G3OxM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= @@ -251,6 +255,14 @@ github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinC github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.lsp.dev/jsonrpc2 v0.10.0 h1:Pr/YcXJoEOTMc/b6OTmcR1DPJ3mSWl/SWiU1Cct6VmI= +go.lsp.dev/jsonrpc2 v0.10.0/go.mod h1:fmEzIdXPi/rf6d4uFcayi8HpFP1nBF99ERP1htC72Ac= +go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 h1:hCzQgh6UcwbKgNSRurYWSqh8MufqRRPODRBblutn4TE= +go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2/go.mod h1:gtSHRuYfbCT0qnbLnovpie/WEmqyJ7T4n6VXiFMBtcw= +go.lsp.dev/protocol v0.12.0 h1:tNprUI9klQW5FAFVM4Sa+AbPFuVQByWhP1ttNUAjIWg= +go.lsp.dev/protocol v0.12.0/go.mod h1:Qb11/HgZQ72qQbeyPfJbu3hZBH23s1sr4st8czGeDMQ= +go.lsp.dev/uri v0.3.0 h1:KcZJmh6nFIBeJzTugn5JTU6OOyG0lDOo3R9KwTxTYbo= +go.lsp.dev/uri v0.3.0/go.mod h1:P5sbO1IQR+qySTWOCnhnK7phBx+W3zbLqSMDJNTw88I= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= @@ -321,6 +333,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/private/buf/buflsp/buflsp.go b/private/buf/buflsp/buflsp.go new file mode 100644 index 0000000000..cc79544593 --- /dev/null +++ b/private/buf/buflsp/buflsp.go @@ -0,0 +1,187 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package buflsp implements a language server for Protobuf. +// +// The main entry-point of this package is the Serve() function, which creates a new LSP server. +package buflsp + +import ( + "context" + "fmt" + "sync/atomic" + + "github.com/bufbuild/buf/private/buf/bufctl" + "github.com/bufbuild/buf/private/bufpkg/bufcheck" + "github.com/bufbuild/buf/private/bufpkg/bufimage" + "github.com/bufbuild/buf/private/pkg/app/appext" + "github.com/bufbuild/buf/private/pkg/command" + "github.com/bufbuild/buf/private/pkg/pluginrpcutil" + "github.com/bufbuild/buf/private/pkg/storage" + "github.com/bufbuild/buf/private/pkg/storage/storageos" + "github.com/bufbuild/buf/private/pkg/tracing" + "go.lsp.dev/jsonrpc2" + "go.lsp.dev/protocol" + "go.opentelemetry.io/otel/attribute" + "go.uber.org/zap" +) + +// Serve spawns a new LSP server, listening on the given stream. +// +// Returns a context for managing the server. +func Serve( + ctx context.Context, + container appext.Container, + controller bufctl.Controller, + stream jsonrpc2.Stream, +) (jsonrpc2.Conn, error) { + // The LSP protocol deals with absolute filesystem paths. This requires us to + // bypass the bucket API completely, so we create a bucket pointing at the filesystem + // root. + bucketProvider := storageos.NewProvider(storageos.ProviderWithSymlinks()) + bucket, err := bucketProvider.NewReadWriteBucket( + "/", // TODO: This is not correct for Windows. + storageos.ReadWriteBucketWithSymlinksIfSupported(), + ) + if err != nil { + return nil, err + } + + tracer := tracing.NewTracer(container.Tracer()) + checkClient, err := bufcheck.NewClient(container.Logger(), tracer, pluginrpcutil.NewRunnerProvider(command.NewRunner()), bufcheck.ClientWithStderr(container.Stderr())) + if err != nil { + return nil, err + } + + conn := jsonrpc2.NewConn(stream) + lsp := &lsp{ + conn: conn, + client: protocol.ClientDispatcher( + &connWrapper{Conn: conn, logger: container.Logger()}, + zap.NewNop(), // The logging from protocol itself isn't very good, we've replaced it with connAdapter here. + ), + logger: container.Logger(), + tracer: tracer, + controller: controller, + checkClient: checkClient, + rootBucket: bucket, + } + lsp.fileManager = newFileManager(lsp) + off := protocol.TraceOff + lsp.traceValue.Store(&off) + + conn.Go(ctx, lsp.newHandler()) + return conn, nil +} + +// *** PRIVATE *** + +// lsp contains all of the LSP server's state. (I.e., it is the "god class" the protocol requires +// that we implement). +// +// This type does not implement protocol.Server; see server.go for that. +// This type contains all the necessary book-keeping for keeping the server running. +// Its handler methods are not defined in buflsp.go; they are defined in other files, grouped +// according to the groupings in +type lsp struct { + conn jsonrpc2.Conn + client protocol.Client + + logger *zap.Logger + tracer tracing.Tracer + controller bufctl.Controller + checkClient bufcheck.Client + rootBucket storage.ReadBucket + fileManager *fileManager + + // These are atomics, because they are read often and written to + // almost never, but potentially concurrently. Having them side-by-side + // is fine; they are almost never written to so false sharing is not a + // concern. + initParams atomic.Pointer[protocol.InitializeParams] + traceValue atomic.Pointer[protocol.TraceValue] +} + +// init performs *actual* initialization of the server. This is called by Initialize(). +// +// It may only be called once for a given server. +func (l *lsp) init(params *protocol.InitializeParams) error { + if l.initParams.Load() != nil { + return fmt.Errorf("called the %q method more than once", protocol.MethodInitialize) + } + l.initParams.Store(params) + + // TODO: set up logging. We need to forward everything from server.logger through to + // the client, if tracing is turned on. The right way to do this is with an extra + // goroutine and some channels. + + return nil +} + +// findImportable finds all files that can potentially be imported by the proto file at +// uri. This returns a map from potential Protobuf import path to the URI of the file it would import. +// +// Note that this performs no validation on these files, because those files might be open in the +// editor and might contain invalid syntax at the moment. We only want to get their paths and nothing +// more. +func (l *lsp) findImportable( + ctx context.Context, + uri protocol.URI, +) (map[string]bufimage.ImageFileInfo, error) { + fileInfos, err := l.controller.GetImportableImageFileInfos(ctx, uri.Filename()) + if err != nil { + return nil, err + } + + imports := make(map[string]bufimage.ImageFileInfo) + for _, fileInfo := range fileInfos { + imports[fileInfo.Path()] = fileInfo + } + + l.logger.Sugar().Debugf("found imports for %q: %#v", uri, imports) + + return imports, nil +} + +// newHandler constructs an RPC handler that wraps the default one from jsonrpc2. This allows us +// to inject debug logging, tracing, and timeouts to requests. +func (l *lsp) newHandler() jsonrpc2.Handler { + actual := protocol.ServerHandler(newServer(l), nil) + return func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) (retErr error) { + ctx, span := l.tracer.Start( + ctx, + tracing.WithErr(&retErr), + tracing.WithAttributes(attribute.String("method", req.Method())), + ) + defer span.End() + + l.logger.Debug( + "processing request", + zap.String("method", req.Method()), + zap.ByteString("params", req.Params()), + ) + + ctx = withRequestID(ctx) + + replier := l.wrapReplier(reply, req) + + // Verify that the server has been initialized if this isn't the initialization + // request. + if req.Method() != protocol.MethodInitialize && l.initParams.Load() == nil { + return replier(ctx, nil, fmt.Errorf("the first call to the server must be the %q method", protocol.MethodInitialize)) + } + + return actual(ctx, replier, req) + } +} diff --git a/private/buf/buflsp/builtin.go b/private/buf/buflsp/builtin.go new file mode 100644 index 0000000000..e9d78131d6 --- /dev/null +++ b/private/buf/buflsp/builtin.go @@ -0,0 +1,108 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Data for the built-in types. + +package buflsp + +// builtinDocs contains documentation for the built-in types, to display in hover inlays. +var builtinDocs = map[string][]string{ + "int32": { + "A 32-bit integer (varint encoding).", + "", + "Values of this type range between `-2147483648` and `2147483647`.", + "Beware that negative values are encoded as five bytes on the wire!", + }, + "int64": { + "A 64-bit integer (varint encoding).", + "", + "Values of this type range between `-9223372036854775808` and `9223372036854775807`.", + "Beware that negative values are encoded as ten bytes on the wire!", + }, + + "uint32": { + "A 32-bit unsigned integer (varint encoding).", + "", + "Values of this type range between `0` and `4294967295`.", + }, + "uint64": { + "A 64-bit unsigned integer (varint encoding).", + "", + "Values of this type range between `0` and `18446744073709551615`.", + }, + + "sint32": { + "A 32-bit integer (ZigZag encoding).", + "", + "Values of this type range between `-2147483648` and `2147483647`.", + }, + "sint64": { + "A 64-bit integer (ZigZag encoding).", + "", + "Values of this type range between `-9223372036854775808` and `9223372036854775807`.", + }, + + "fixed32": { + "A 32-bit unsigned integer (4-byte encoding).", + "", + "Values of this type range between `0` and `4294967295`.", + }, + "fixed64": { + "A 64-bit unsigned integer (8-byte encoding).", + "", + "Values of this type range between `0` and `18446744073709551615`.", + }, + + "sfixed32": { + "A 32-bit integer (4-byte encoding).", + "", + "Values of this type range between `-2147483648` and `2147483647`.", + }, + "sfixed64": { + "A 64-bit integer (8-byte encoding).", + "", + "Values of this type range between `-9223372036854775808` and `9223372036854775807`.", + }, + + "float": { + "A single-precision floating point number (IEEE-745.2008 binary32).", + }, + "double": { + "A double-precision floating point number (IEEE-745.2008 binary64).", + }, + + "string": { + "A string of text.", + "", + "Stores at most 4GB of text. Intended to be UTF-8 encoded Unicode; use `bytes` if you need other encodings.", + }, + "bytes": { + "A blob of arbitrary bytes.", + "", + "Stores at most 4GB of binary data. Encoded as base64 in JSON.", + }, + + "bool": { + "A Boolean value: `true` or `false`.", + "", + "Encoded as a single byte: `0x00` or `0xff` (all non-zero bytes decode to `true`).", + }, + + "default": { + "A magic option that specifies the field's default value.", + "", + "Unlike every other option on a field, this does not have a corresponding field in", + "`google.protobuf.FieldOptions`; it is implemented by compiler magic.", + }, +} diff --git a/private/buf/buflsp/file.go b/private/buf/buflsp/file.go new file mode 100644 index 0000000000..4f89cf2b0b --- /dev/null +++ b/private/buf/buflsp/file.go @@ -0,0 +1,675 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines file manipulation operations. + +package buflsp + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "slices" + "strings" + + "github.com/bufbuild/buf/private/buf/bufworkspace" + "github.com/bufbuild/buf/private/bufpkg/bufanalysis" + "github.com/bufbuild/buf/private/bufpkg/bufcheck" + "github.com/bufbuild/buf/private/bufpkg/bufimage" + "github.com/bufbuild/buf/private/bufpkg/bufmodule" + "github.com/bufbuild/buf/private/pkg/ioext" + "github.com/bufbuild/buf/private/pkg/tracing" + "github.com/bufbuild/protocompile" + "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/linker" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/protoutil" + "github.com/bufbuild/protocompile/reporter" + "github.com/gofrs/uuid/v5" + "go.lsp.dev/protocol" + "go.opentelemetry.io/otel/attribute" + "go.uber.org/zap" + "google.golang.org/protobuf/reflect/protoreflect" +) + +const descriptorPath = "google/protobuf/descriptor.proto" + +// file is a file that has been opened by the client. +// +// Mutating a file is thread-safe. +type file struct { + // lsp and uri are not protected by file.lock; they are immutable after + // file creation! + lsp *lsp + uri protocol.URI + + // All variables after this lock variables are protected by file.lock. + // + // NOTE: this package must NEVER attempt to acquire a lock on a file while + // holding a lock on another file. This guarantees that any concurrent operations + // on distinct files can always make forward progress, even if the information they + // have is incomplete. This trades off up-to-date accuracy for responsiveness. + // + // For example, suppose g1 locks a.proto, and then attempts to lock b.proto + // because it followed a pointer in importMap. However, in the meantime, g2 + // has acquired b.proto's lock already, and attempts to acquire a lock to a.proto, + // again because of a pointer in importMap. This will deadlock, and it will + // deadlock in such a way that will be undetectable to the Go scheduler, so the + // LSP will hang forever. + // + // This seems like a contrived scenario, but it can happen if a user creates two + // mutually-recursive Protobuf files. Although this is not permitted by Protobuf, + // the LSP must handle this invalid state gracefully. + // + // This is enforced by mutex.go. + lock mutex + + text string + // Version is an opaque version identifier given to us by the LSP client. This + // is used in the protocol to disambiguate which version of a file e.g. publishing + // diagnostics or symbols an operating refers to. + version int32 + hasText bool // Whether this file has ever had text read into it. + // Always set false->true. Once true, never becomes false again. + + workspace bufworkspace.Workspace + module bufmodule.Module + imageFileInfo bufimage.ImageFileInfo + + isWKT bool + + fileNode *ast.FileNode + packageNode *ast.PackageNode + diagnostics []protocol.Diagnostic + importableToImage map[string]bufimage.ImageFileInfo + importToFile map[string]*file + symbols []*symbol + image bufimage.Image +} + +// Manager returns the file manager that owns this file. +func (f *file) Manager() *fileManager { + return f.lsp.fileManager +} + +// Package returns the package of this file, if known. +func (f *file) Package() []string { + if f.packageNode == nil { + return nil + } + + return strings.Split(string(f.packageNode.Name.AsIdentifier()), ".") +} + +// Reset clears all bookkeeping information on this file. +func (f *file) Reset(ctx context.Context) { + f.lsp.logger.Sugar().Debugf("resetting file %v", f.uri) + + // Lock and unlock to acquire the import map, then nil everything out + // This map is never mutated after being created, so we only + // need to read the pointer. + // + // We need to lock and unlock because Close() will call Reset() on other + // files, and this will deadlock if cyclic imports exist. + f.lock.Lock(ctx) + imports := f.importToFile + + f.fileNode = nil + f.packageNode = nil + f.diagnostics = nil + f.importableToImage = nil + f.importToFile = nil + f.symbols = nil + f.image = nil + f.lock.Unlock(ctx) + + // Close all imported files while file.mu is not held. + for _, imported := range imports { + imported.Close(ctx) + } +} + +// Close marks a file as closed. +// +// This will not necessarily evict the file, since there may be more than one user +// for this file. +func (f *file) Close(ctx context.Context) { + f.lsp.fileManager.Close(ctx, f.uri) +} + +// ReadFromDisk reads this file from disk if it has never had data loaded into it before. +// +// If it has been read from disk before, or has received updates from the LSP client, this +// function returns nil. +func (f *file) ReadFromDisk(ctx context.Context) (err error) { + f.lock.Lock(ctx) + defer f.lock.Unlock(ctx) + if f.hasText { + return nil + } + + data, err := os.ReadFile(f.uri.Filename()) + if err != nil { + return fmt.Errorf("could not read file %q from disk: %w", f.uri, err) + } + + f.version = -1 + f.text = string(data) + return nil +} + +// Update updates the contents of this file with the given text received from +// the LSP client. +func (f *file) Update(ctx context.Context, version int32, text string) { + f.Reset(ctx) + + f.lock.Lock(ctx) + defer f.lock.Unlock(ctx) + + f.lsp.logger.Sugar().Infof("new file version: %v, %v -> %v", f.uri, f.version, version) + f.version = version + f.text = text + f.hasText = true +} + +// Refresh rebuilds all of a file's internal book-keeping. +// +// If deep is set, this will also load imports and refresh those, too. +func (f *file) Refresh(ctx context.Context) { + progress := newProgress(f.lsp) + progress.Begin(ctx, "Indexing") + + progress.Report(ctx, "Parsing AST", 1.0/6) + hasReport := f.RefreshAST(ctx) + + progress.Report(ctx, "Indexing Imports", 2.0/6) + f.IndexImports(ctx) + + progress.Report(ctx, "Detecting Module", 3.0/6) + f.FindModule(ctx) + + progress.Report(ctx, "Linking Descriptors", 4.0/6) + f.BuildImage(ctx) + hasReport = f.RunLints(ctx) || hasReport // Avoid short-circuit here. + + progress.Report(ctx, "Indexing Symbols", 5.0/6) + f.IndexSymbols(ctx) + + progress.Done(ctx) + if hasReport { + f.PublishDiagnostics(ctx) + } +} + +// RefreshAST reparses the file and generates diagnostics if necessary. +// +// Returns whether a reparse was necessary. +func (f *file) RefreshAST(ctx context.Context) bool { + f.lock.Lock(ctx) + defer f.lock.Unlock(ctx) + if f.fileNode != nil { + return false + } + + // NOTE: We intentionally do not use var report report here, because we need + // report to be non-nil when empty; this is because if it is nil, when calling + // PublishDiagnostics() below it will be serialized as JSON null. + report := report{} + handler := reporter.NewHandler(&report) + + f.lsp.logger.Sugar().Infof("parsing AST for %v, %v", f.uri, f.version) + parsed, err := parser.Parse(f.uri.Filename(), strings.NewReader(f.text), handler) + if err == nil { + // Throw away the error. It doesn't contain anything not in the diagnostic array. + _, _ = parser.ResultFromAST(parsed, true, handler) + } + + f.fileNode = parsed + f.diagnostics = report.diagnostics + f.lsp.logger.Sugar().Debugf("got %v diagnostic(s)", len(f.diagnostics)) + + // Search for a potential package node. + if f.fileNode != nil { + for _, decl := range f.fileNode.Decls { + if pkg, ok := decl.(*ast.PackageNode); ok { + f.packageNode = pkg + break + } + } + } + + return true +} + +// PublishDiagnostics publishes all of this file's diagnostics to the LSP client. +func (f *file) PublishDiagnostics(ctx context.Context) { + ctx, span := f.lsp.tracer.Start(ctx, + tracing.WithAttributes(attribute.String("uri", string(f.uri)))) + defer span.End() + + f.lock.Lock(ctx) + defer f.lock.Unlock(ctx) + + if f.diagnostics == nil { + return + } + + // Publish the diagnostics. This error is automatically logged by the LSP framework. + _ = f.lsp.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{ + URI: f.uri, + // NOTE: For some reason, Version is int32 in the document struct, but uint32 here. + // This seems like a bug in the LSP protocol package. + Version: uint32(f.version), + Diagnostics: f.diagnostics, + }) +} + +// FindModule finds the Buf module for this file. +func (f *file) FindModule(ctx context.Context) { + workspace, err := f.lsp.controller.GetWorkspace(ctx, f.uri.Filename()) + if err != nil { + f.lsp.logger.Warn("could not load workspace", zap.String("uri", string(f.uri)), zap.Error(err)) + return + } + + // Figure out which module this file belongs to. + var module bufmodule.Module + for _, mod := range workspace.Modules() { + // We do not care about this error, so we discard it. + _ = mod.WalkFileInfos(ctx, func(fileInfo bufmodule.FileInfo) error { + if fileInfo.LocalPath() == f.uri.Filename() { + module = mod + } + return nil + }) + if module != nil { + break + } + } + if module == nil { + f.lsp.logger.Sugar().Warnf("could not find module for %q", f.uri) + } + + // Determine if this is the WKT module. We do so by checking if this module contains + // descriptor.proto. + file, err := module.GetFile(ctx, descriptorPath) + if err == nil { + defer file.Close() + } + + f.lock.Lock(ctx) + f.workspace = workspace + f.module = module + f.lock.Unlock(ctx) +} + +// IndexImports finds URIs for all of the files imported by this file. +func (f *file) IndexImports(ctx context.Context) { + ctx, span := f.lsp.tracer.Start(ctx, + tracing.WithAttributes(attribute.String("uri", string(f.uri)))) + defer span.End() + + unlock := f.lock.Lock(ctx) + defer unlock() + + if f.fileNode == nil || f.importToFile != nil { + return + } + + importable, err := f.lsp.findImportable(ctx, f.uri) + if err != nil { + f.lsp.logger.Sugar().Warnf("could not compute importable files for %s: %s", f.uri, err) + return + } + f.importableToImage = importable + + // Find the FileInfo for this path. The crazy thing is that it may appear in importable + // multiple times, with different path lengths! We want to pick the one with the longest path + // length. + for _, fileInfo := range importable { + if fileInfo.LocalPath() == f.uri.Filename() { + if f.imageFileInfo != nil && len(f.imageFileInfo.Path()) > len(fileInfo.Path()) { + continue + } + f.imageFileInfo = fileInfo + } + } + + f.importToFile = make(map[string]*file) + for _, decl := range f.fileNode.Decls { + node, ok := decl.(*ast.ImportNode) + if !ok { + continue + } + + name := node.Name.AsString() + fileInfo, ok := importable[name] + if !ok { + f.lsp.logger.Sugar().Warnf("could not find URI for import %q", name) + continue + } + + var imported *file + if fileInfo.LocalPath() == f.uri.Filename() { + imported = f + } else { + imported = f.Manager().Open(ctx, protocol.URI("file://"+fileInfo.LocalPath())) + } + + imported.imageFileInfo = fileInfo + f.isWKT = strings.HasPrefix("google/protobuf/", fileInfo.Path()) + f.importToFile[node.Name.AsString()] = imported + } + + // descriptor.proto is always implicitly imported. + if _, ok := f.importToFile[descriptorPath]; !ok { + descriptorFile := importable[descriptorPath] + descriptorURI := protocol.URI("file://" + descriptorFile.LocalPath()) + if f.uri == descriptorURI { + f.importToFile[descriptorPath] = f + } else { + imported := f.Manager().Open(ctx, descriptorURI) + imported.imageFileInfo = descriptorFile + f.importToFile[descriptorPath] = imported + } + f.isWKT = true + } + + // FIXME: This algorithm is not correct: it does not account for `import public`. + + // Drop the lock after copying the pointer to the imports map. This + // particular map will not be mutated further, and since we're going to grab the lock of + // other files, we need to drop the currently held lock. + fileImports := f.importToFile + unlock() + + for _, file := range fileImports { + if err := file.ReadFromDisk(ctx); err != nil { + file.lsp.logger.Sugar().Warnf("could not load import import %q from disk: %w", + file.uri, err) + continue + } + + // Parse the imported file and find all symbols in it, but do not + // index symbols in the import's imports, otherwise we will recursively + // index the universe and that would be quite slow. + file.RefreshAST(ctx) + file.IndexSymbols(ctx) + } +} + +// BuildImage builds a Buf Image for this file. This does not use the controller to build +// the image, because we need delicate control over the input files: namely, for the case +// when we depend on a file that has been opened and modified in the editor. +// +// This operation requires IndexImports(). +func (f *file) BuildImage(ctx context.Context) { + f.lock.Lock(ctx) + importable := f.importableToImage + fileInfo := f.imageFileInfo + f.lock.Unlock(ctx) + + if importable == nil || fileInfo == nil { + return + } + + var report report + var symbols linker.Symbols + compiler := protocompile.Compiler{ + SourceInfoMode: protocompile.SourceInfoExtraOptionLocations, + Resolver: &protocompile.SourceResolver{ + Accessor: func(path string) (io.ReadCloser, error) { + var uri protocol.URI + fileInfo, ok := importable[path] + if ok { + uri = protocol.URI("file://" + fileInfo.LocalPath()) + } else { + uri = protocol.URI("file://" + path) + } + + if file := f.Manager().Get(uri); file != nil { + return ioext.CompositeReadCloser(strings.NewReader(file.text), ioext.NopCloser), nil + } else if !ok { + return nil, os.ErrNotExist + } + + return os.Open(fileInfo.LocalPath()) + }, + }, + Symbols: &symbols, + Reporter: &report, + } + + compiled, err := compiler.Compile(ctx, fileInfo.Path()) + if err != nil { + f.lock.Lock(ctx) + f.diagnostics = report.diagnostics + f.lock.Unlock(ctx) + } + if compiled[0] == nil { + return + } + + var imageFiles []bufimage.ImageFile + seen := map[string]bool{} + + queue := []protoreflect.FileDescriptor{compiled[0]} + for len(queue) > 0 { + descriptor := queue[len(queue)-1] + queue = queue[:len(queue)-1] + + if seen[descriptor.Path()] { + continue + } + seen[descriptor.Path()] = true + + unused, ok := report.pathToUnusedImports[descriptor.Path()] + var unusedIndices []int32 + if ok { + unusedIndices = make([]int32, 0, len(unused)) + } + + imports := descriptor.Imports() + for i := 0; i < imports.Len(); i++ { + dep := imports.Get(i).FileDescriptor + if dep == nil { + f.lsp.logger.Sugar().Warnf("found nil FileDescriptor for import %s", imports.Get(i).Path()) + continue + } + + queue = append(queue, dep) + + if unused != nil { + if _, ok := unused[dep.Path()]; ok { + unusedIndices = append(unusedIndices, int32(i)) + } + } + } + + descriptorProto := protoutil.ProtoFromFileDescriptor(descriptor) + if descriptorProto == nil { + err = fmt.Errorf("protoutil.ProtoFromFileDescriptor() returned nil for %q", descriptor.Path()) + break + } + + var imageFile bufimage.ImageFile + imageFile, err = bufimage.NewImageFile( + descriptorProto, + nil, + uuid.UUID{}, + "", + descriptor.Path(), + descriptor.Path() != fileInfo.Path(), + report.syntaxMissing[descriptor.Path()], + unusedIndices, + ) + if err != nil { + break + } + + imageFiles = append(imageFiles, imageFile) + f.lsp.logger.Sugar().Debugf("added image file for %s", descriptor.Path()) + } + + if err != nil { + f.lsp.logger.Warn("could not build image", zap.String("uri", string(f.uri)), zap.Error(err)) + return + } + + image, err := bufimage.NewImage(imageFiles) + if err != nil { + f.lsp.logger.Warn("could not build image", zap.String("uri", string(f.uri)), zap.Error(err)) + return + } + + f.lock.Lock(ctx) + f.image = image + f.lock.Unlock(ctx) +} + +// RunLints runs linting on this file. Returns whether any lints failed. +// +// This operation requires BuildImage(). +func (f *file) RunLints(ctx context.Context) bool { + if f.isWKT { + // Well-known types are not linted. + return false + } + + f.lock.Lock(ctx) + workspace := f.workspace + module := f.module + image := f.image + f.lock.Unlock(ctx) + + if module == nil || image == nil { + f.lsp.logger.Sugar().Warnf("could not find image for %q", f.uri) + return false + } + + f.lsp.logger.Sugar().Debugf("running lint for %q in %v", f.uri, module.ModuleFullName()) + + lintConfig := workspace.GetLintConfigForOpaqueID(module.OpaqueID()) + err := f.lsp.checkClient.Lint( + ctx, + lintConfig, + image, + bufcheck.WithPluginConfigs(workspace.PluginConfigs()...), + bufcheck.WithPluginsEnabled(), + ) + + if err == nil { + f.lsp.logger.Sugar().Warnf("lint generated no errors for %s", f.uri) + return false + } + + var annotations bufanalysis.FileAnnotationSet + if !errors.As(err, &annotations) { + f.lsp.logger.Warn("error while linting", zap.String("uri", string(f.uri)), zap.Error(err)) + return false + } + + f.lsp.logger.Sugar().Warnf("lint generated %d error(s) for %s", len(annotations.FileAnnotations()), f.uri) + + f.lock.Lock(ctx) + f.lock.Unlock(ctx) + for _, annotation := range annotations.FileAnnotations() { + f.lsp.logger.Sugar().Info(annotation.FileInfo().Path(), " ", annotation.FileInfo().ExternalPath()) + + f.diagnostics = append(f.diagnostics, protocol.Diagnostic{ + Range: protocol.Range{ + Start: protocol.Position{ + Line: uint32(annotation.StartLine()) - 1, + Character: uint32(annotation.StartColumn()) - 1, + }, + End: protocol.Position{ + Line: uint32(annotation.EndLine()) - 1, + Character: uint32(annotation.EndColumn()) - 1, + }, + }, + Code: annotation.Type(), + Severity: protocol.DiagnosticSeverityError, + Source: "buf lint", + Message: annotation.Message(), + }) + } + return true +} + +// IndexSymbols processes the AST of a file and generates symbols for each symbol in +// the document. +func (f *file) IndexSymbols(ctx context.Context) { + _, span := f.lsp.tracer.Start(ctx, + tracing.WithAttributes(attribute.String("uri", string(f.uri)))) + defer span.End() + + unlock := f.lock.Lock(ctx) + defer unlock() + + // Throw away all the old symbols. Unlike other indexing functions, we rebuild + // symbols unconditionally. + f.symbols = nil + + // Generate new symbols. + newWalker(f).Walk(f.fileNode, f.fileNode) + + // Finally, sort the symbols in position order, with shorter symbols sorting smaller. + slices.SortFunc(f.symbols, func(s1, s2 *symbol) int { + diff := s1.info.Start().Offset - s2.info.Start().Offset + if diff == 0 { + return s1.info.End().Offset - s2.info.End().Offset + } + return diff + }) + + // Now we can drop the lock and search for cross-file references. + symbols := f.symbols + unlock() + for _, symbol := range symbols { + symbol.ResolveCrossFile(ctx) + } + + f.lsp.logger.Sugar().Debugf("symbol indexing complete %s", f.uri) +} + +// SymbolAt finds a symbol in this file at the given cursor position, if one exists. +// +// Returns nil if no symbol is found. +func (f *file) SymbolAt(ctx context.Context, cursor protocol.Position) *symbol { + f.lock.Lock(ctx) + defer f.lock.Unlock(ctx) + + // Binary search for the symbol whose start is before or equal to cursor. + idx, found := slices.BinarySearchFunc(f.symbols, cursor, func(sym *symbol, cursor protocol.Position) int { + return comparePositions(sym.Range().Start, cursor) + }) + if !found { + if idx == 0 { + return nil + } + idx-- + } + + symbol := f.symbols[idx] + f.lsp.logger.Debug("found symbol", zap.Object("symbol", symbol)) + + // Check that cursor is before the end of the symbol. + if comparePositions(symbol.Range().End, cursor) <= 0 { + return nil + } + + return symbol +} diff --git a/private/buf/buflsp/file_manager.go b/private/buf/buflsp/file_manager.go new file mode 100644 index 0000000000..8d526478ae --- /dev/null +++ b/private/buf/buflsp/file_manager.go @@ -0,0 +1,66 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines a manager for tracking individual files. + +package buflsp + +import ( + "context" + + "github.com/bufbuild/buf/private/pkg/refcount" + "go.lsp.dev/protocol" +) + +// fileManager tracks all files the LSP is currently handling, whether read from disk or opened +// by the editor. +type fileManager struct { + lsp *lsp + uriToFile refcount.Map[protocol.URI, file] + mutexPool mutexPool +} + +// newFiles creates a new file manager. +func newFileManager(lsp *lsp) *fileManager { + return &fileManager{lsp: lsp} +} + +// Open finds a file with the given URI, or creates one. +// +// This will increment the file's refcount. +func (fm *fileManager) Open(ctx context.Context, uri protocol.URI) *file { + file, found := fm.uriToFile.Insert(uri) + if !found { + file.lsp = fm.lsp + file.uri = uri + file.lock = fm.mutexPool.NewMutex() + } + + return file +} + +// Get finds a file with the given URI, or returns nil. +func (fm *fileManager) Get(uri protocol.URI) *file { + return fm.uriToFile.Get(uri) +} + +// Close marks a file as closed. +// +// This will not necessarily evict the file, since there may be more than one user +// for this file. +func (fm *fileManager) Close(ctx context.Context, uri protocol.URI) { + if deleted := fm.uriToFile.Delete(uri); deleted != nil { + deleted.Reset(ctx) + } +} diff --git a/private/buf/buflsp/jsonrpc_wrappers.go b/private/buf/buflsp/jsonrpc_wrappers.go new file mode 100644 index 0000000000..7b13dfa1de --- /dev/null +++ b/private/buf/buflsp/jsonrpc_wrappers.go @@ -0,0 +1,107 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package buflsp implements a language server for Protobuf. +// +// The main entry-point of this package is the Serve() function, which creates a new LSP server. +package buflsp + +import ( + "context" + + "go.lsp.dev/jsonrpc2" + "go.uber.org/zap" +) + +// wrapReplier wraps a jsonrpc2.Replier, allowing us to inject logging and tracing and so on. +func (l *lsp) wrapReplier(reply jsonrpc2.Replier, req jsonrpc2.Request) jsonrpc2.Replier { + return func(ctx context.Context, result any, err error) error { + if err != nil { + l.logger.Warn( + "responding with error", + zap.String("method", req.Method()), + zap.Error(err), + ) + } else { + l.logger.Debug( + "responding", + zap.String("method", req.Method()), + zap.Reflect("params", result), + ) + } + + return reply(ctx, result, err) + } +} + +// connWrapper wraps a connection and logs calls and notifications. +// +// By default, the ClientDispatcher does not log the bodies of requests and responses, making +// for much lower-quality debugging. +type connWrapper struct { + jsonrpc2.Conn + + logger *zap.Logger +} + +func (c *connWrapper) Call( + ctx context.Context, method string, params, result any) (id jsonrpc2.ID, err error) { + c.logger.Debug( + "call", + zap.String("method", method), + zap.Reflect("params", params), + ) + + id, err = c.Conn.Call(ctx, method, params, result) + if err != nil { + c.logger.Warn( + "call returned error", + zap.String("method", method), + zap.Error(err), + ) + } else { + c.logger.Warn( + "call returned", + zap.String("method", method), + zap.Reflect("result", result), + ) + } + + return +} + +func (c *connWrapper) Notify( + ctx context.Context, method string, params any) error { + c.logger.Debug( + "notify", + zap.String("method", method), + zap.Reflect("params", params), + ) + + err := c.Conn.Notify(ctx, method, params) + if err != nil { + c.logger.Warn( + "notify returned error", + zap.String("method", method), + zap.Error(err), + ) + } else { + c.logger.Warn( + "notify returned", + zap.String("method", method), + ) + } + + return err +} diff --git a/private/buf/buflsp/mutex.go b/private/buf/buflsp/mutex.go new file mode 100644 index 0000000000..c377b126b7 --- /dev/null +++ b/private/buf/buflsp/mutex.go @@ -0,0 +1,182 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines various concurrency helpers. + +package buflsp + +import ( + "context" + "fmt" + "sync" + "sync/atomic" +) + +const poison = ^uint64(0) + +var nextRequestID atomic.Uint64 + +// mutexPool represents a group of reentrant muteces that cannot be acquired simultaneously. +// +// A zero mutexPool is ready to use. +type mutexPool struct { + lock sync.Mutex + held map[uint64]*mutex +} + +// NewMutex creates a new mutex in this pool. +func (mp *mutexPool) NewMutex() mutex { + return mutex{pool: mp} +} + +// check checks what id is either not holding a lock, or is holding the given +// map, depending on whether isUnlock is set. +func (mp *mutexPool) check(id uint64, mu *mutex, isUnlock bool) { + if mp == nil { + return + } + + mp.lock.Lock() + defer mp.lock.Unlock() + + if mp.held == nil { + mp.held = make(map[uint64]*mutex) + } + + if isUnlock { + if held := mp.held[id]; held != mu { + panic(fmt.Sprintf("buflsp/mutex.go: attempted to unlock incorrect non-reentrant lock: %p -> %p", held, mu)) + } + + delete(mp.held, id) + } else { + if held := mp.held[id]; held != nil { + panic(fmt.Sprintf("buflsp/mutex.go: attempted to acquire two non-reentrant locks at once: %p -> %p", mu, held)) + } + + mp.held[id] = mu + } +} + +// mutex is a sync.Mutex with some extra features. +// +// The main feature is reentrancy-checking. Within the LSP, we need to lock-protect many structures, +// and it is very easy to deadlock if the same request tries to lock something multiple times. +// To achieve this, Lock() takes a context, which must be modified by withRequestID(). +type mutex struct { + lock sync.Mutex + // This is the id of the context currently holding the lock. + who atomic.Uint64 + pool *mutexPool +} + +// Lock attempts to acquire this mutex or blocks. +// +// Unlike [sync.Mutex.Lock], this takes a Context. If that context was updated with withRequestID, +// this function will panic when attempting to lock the mutex while it is already held by a +// goroutine using this same context. +// +// NOTE: to Lock() and Unlock() with the same context DO NOT synchronize with each other. For example, +// attempting to lock this mutex from two different goroutines with the same context will +// result in undefined behavior. +// +// Also unlike [sync.Mutex.Lock], it returns an idempotent unlocker function. This can be used like +// defer mu.Lock()(). Note that only the outer function call is deferred: this is part of the +// definition of defer. See https://go.dev/play/p/RJNKRcoQRo1. This unlocker can also be used to +// defer unlocking but also unlock before the function returns. +// +// The returned unlocker is not thread-safe. +func (mu *mutex) Lock(ctx context.Context) (unlocker func()) { + var unlocked bool + unlocker = func() { + if unlocked { + return + } + mu.Unlock(ctx) + unlocked = true + } + + id := getRequestID(ctx) + + if mu.who.Load() == id && id > 0 { + // We seem to have tried to lock this lock twice. Panic, and poison the lock. + mu.who.Store(poison) + panic("buflsp/mutex.go: non-reentrant lock locked twice by the same request") + } + + mu.pool.check(id, mu, false) + + // Ok, we're definitely not holding a lock, so we can block until we acquire the lock. + mu.lock.Lock() + mu.storeWho(id) + + return unlocker +} + +// Unlock releases this mutex. +// +// Unlock must be called with the same context that locked it, otherwise this function panics. +func (mu *mutex) Unlock(ctx context.Context) { + id := getRequestID(ctx) + if mu.who.Load() != id { + panic("buflsp/mutex.go: lock was locked by one request and unlocked by another") + } + + mu.storeWho(0) + + mu.pool.check(id, mu, true) + mu.lock.Unlock() +} + +func (mu *mutex) storeWho(id uint64) { + for { + // This has to be a CAS loop to avoid races with a poisoning p. + old := mu.who.Load() + if old == poison { + panic("buflsp/mutex.go: non-reentrant lock locked twice by the same request") + } + if mu.who.CompareAndSwap(old, id) { + break + } + } +} + +// withRequestID assigns a unique request ID to the given context, which can be retrieved +// with with getRequestID. +func withRequestID(ctx context.Context) context.Context { + // This will always be unique. It is impossible to increment a uint64 and wrap around before + // the heat death of the universe. + id := nextRequestID.Add(1) + // We need to give the context package a unique identifier for the request; it can be + // any value. The address of the global we mint new IDs from is actually great for this, + // because users can't access it outside of this package, nor can they extract it out + // of the context itself. + return context.WithValue(ctx, &nextRequestID, id) +} + +// getRequestID returns the request ID for this context, or 0 if ctx is nil or has no +// such ID. +func getRequestID(ctx context.Context) uint64 { + if ctx == nil { + return 0 + } + id, ok := ctx.Value(&nextRequestID).(uint64) + if !ok { + return 0 + } + + // Make sure we don't return 0. This is the only place where the id is actually + // witnessed so doing +1 won't affect anything. + return id + 1 +} diff --git a/private/buf/buflsp/nyi.go b/private/buf/buflsp/nyi.go new file mode 100644 index 0000000000..9b19696d54 --- /dev/null +++ b/private/buf/buflsp/nyi.go @@ -0,0 +1,226 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file provides an implementation of protocol.Server where every function returns an error. + +package buflsp + +import ( + "context" + "fmt" + "runtime" + + "go.lsp.dev/protocol" +) + +// validate the protocol.Server implementation. +var _ protocol.Server = nyi{} + +// nyi implements protocol. Server, but every function returns an error. +type nyi struct{} + +// NOTE: The functions below were generated using code completion. Do not edit! + +func (nyi) CodeAction(ctx context.Context, params *protocol.CodeActionParams) (result []protocol.CodeAction, err error) { + return nil, newNYIError() +} +func (nyi) CodeLens(ctx context.Context, params *protocol.CodeLensParams) (result []protocol.CodeLens, err error) { + return nil, newNYIError() +} +func (nyi) CodeLensRefresh(ctx context.Context) (err error) { + return newNYIError() +} +func (nyi) CodeLensResolve(ctx context.Context, params *protocol.CodeLens) (result *protocol.CodeLens, err error) { + return nil, newNYIError() +} +func (nyi) ColorPresentation(ctx context.Context, params *protocol.ColorPresentationParams) (result []protocol.ColorPresentation, err error) { + return nil, newNYIError() +} +func (nyi) Completion(ctx context.Context, params *protocol.CompletionParams) (result *protocol.CompletionList, err error) { + return nil, newNYIError() +} +func (nyi) CompletionResolve(ctx context.Context, params *protocol.CompletionItem) (result *protocol.CompletionItem, err error) { + return nil, newNYIError() +} +func (nyi) Declaration(ctx context.Context, params *protocol.DeclarationParams) (result []protocol.Location, err error) { + return nil, newNYIError() +} +func (nyi) Definition(ctx context.Context, params *protocol.DefinitionParams) (result []protocol.Location, err error) { + return nil, newNYIError() +} +func (nyi) DidChange(ctx context.Context, params *protocol.DidChangeTextDocumentParams) (err error) { + return newNYIError() +} +func (nyi) DidChangeConfiguration(ctx context.Context, params *protocol.DidChangeConfigurationParams) (err error) { + return newNYIError() +} +func (nyi) DidChangeWatchedFiles(ctx context.Context, params *protocol.DidChangeWatchedFilesParams) (err error) { + return newNYIError() +} +func (nyi) DidChangeWorkspaceFolders(ctx context.Context, params *protocol.DidChangeWorkspaceFoldersParams) (err error) { + return newNYIError() +} +func (nyi) DidClose(ctx context.Context, params *protocol.DidCloseTextDocumentParams) (err error) { + return newNYIError() +} +func (nyi) DidCreateFiles(ctx context.Context, params *protocol.CreateFilesParams) (err error) { + return newNYIError() +} +func (nyi) DidDeleteFiles(ctx context.Context, params *protocol.DeleteFilesParams) (err error) { + return newNYIError() +} +func (nyi) DidOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) (err error) { + return newNYIError() +} +func (nyi) DidRenameFiles(ctx context.Context, params *protocol.RenameFilesParams) (err error) { + return newNYIError() +} +func (nyi) DidSave(ctx context.Context, params *protocol.DidSaveTextDocumentParams) (err error) { + return newNYIError() +} +func (nyi) DocumentColor(ctx context.Context, params *protocol.DocumentColorParams) (result []protocol.ColorInformation, err error) { + return nil, newNYIError() +} +func (nyi) DocumentHighlight(ctx context.Context, params *protocol.DocumentHighlightParams) (result []protocol.DocumentHighlight, err error) { + return nil, newNYIError() +} +func (nyi) DocumentLink(ctx context.Context, params *protocol.DocumentLinkParams) (result []protocol.DocumentLink, err error) { + return nil, newNYIError() +} +func (nyi) DocumentLinkResolve(ctx context.Context, params *protocol.DocumentLink) (result *protocol.DocumentLink, err error) { + return nil, newNYIError() +} +func (nyi) DocumentSymbol(ctx context.Context, params *protocol.DocumentSymbolParams) (result []interface{}, err error) { + return nil, newNYIError() +} +func (nyi) ExecuteCommand(ctx context.Context, params *protocol.ExecuteCommandParams) (result interface{}, err error) { + return nil, newNYIError() +} +func (nyi) Exit(ctx context.Context) (err error) { + return newNYIError() +} +func (nyi) FoldingRanges(ctx context.Context, params *protocol.FoldingRangeParams) (result []protocol.FoldingRange, err error) { + return nil, newNYIError() +} +func (nyi) Formatting(ctx context.Context, params *protocol.DocumentFormattingParams) (result []protocol.TextEdit, err error) { + return nil, newNYIError() +} +func (nyi) Hover(ctx context.Context, params *protocol.HoverParams) (result *protocol.Hover, err error) { + return nil, newNYIError() +} +func (nyi) Implementation(ctx context.Context, params *protocol.ImplementationParams) (result []protocol.Location, err error) { + return nil, newNYIError() +} +func (nyi) IncomingCalls(ctx context.Context, params *protocol.CallHierarchyIncomingCallsParams) (result []protocol.CallHierarchyIncomingCall, err error) { + return nil, newNYIError() +} +func (nyi) Initialize(ctx context.Context, params *protocol.InitializeParams) (result *protocol.InitializeResult, err error) { + return nil, newNYIError() +} +func (nyi) Initialized(ctx context.Context, params *protocol.InitializedParams) (err error) { + return newNYIError() +} +func (nyi) LinkedEditingRange(ctx context.Context, params *protocol.LinkedEditingRangeParams) (result *protocol.LinkedEditingRanges, err error) { + return nil, newNYIError() +} +func (nyi) LogTrace(ctx context.Context, params *protocol.LogTraceParams) (err error) { + return newNYIError() +} +func (nyi) Moniker(ctx context.Context, params *protocol.MonikerParams) (result []protocol.Moniker, err error) { + return nil, newNYIError() +} +func (nyi) OnTypeFormatting(ctx context.Context, params *protocol.DocumentOnTypeFormattingParams) (result []protocol.TextEdit, err error) { + return nil, newNYIError() +} +func (nyi) OutgoingCalls(ctx context.Context, params *protocol.CallHierarchyOutgoingCallsParams) (result []protocol.CallHierarchyOutgoingCall, err error) { + return nil, newNYIError() +} +func (nyi) PrepareCallHierarchy(ctx context.Context, params *protocol.CallHierarchyPrepareParams) (result []protocol.CallHierarchyItem, err error) { + return nil, newNYIError() +} +func (nyi) PrepareRename(ctx context.Context, params *protocol.PrepareRenameParams) (result *protocol.Range, err error) { + return nil, newNYIError() +} +func (nyi) RangeFormatting(ctx context.Context, params *protocol.DocumentRangeFormattingParams) (result []protocol.TextEdit, err error) { + return nil, newNYIError() +} +func (nyi) References(ctx context.Context, params *protocol.ReferenceParams) (result []protocol.Location, err error) { + return nil, newNYIError() +} +func (nyi) Rename(ctx context.Context, params *protocol.RenameParams) (result *protocol.WorkspaceEdit, err error) { + return nil, newNYIError() +} +func (nyi) Request(ctx context.Context, method string, params interface{}) (result interface{}, err error) { + return nil, newNYIError() +} +func (nyi) SemanticTokensFull(ctx context.Context, params *protocol.SemanticTokensParams) (result *protocol.SemanticTokens, err error) { + return nil, newNYIError() +} +func (nyi) SemanticTokensFullDelta(ctx context.Context, params *protocol.SemanticTokensDeltaParams) (result interface{}, err error) { + return nil, newNYIError() +} +func (nyi) SemanticTokensRange(ctx context.Context, params *protocol.SemanticTokensRangeParams) (result *protocol.SemanticTokens, err error) { + return nil, newNYIError() +} +func (nyi) SemanticTokensRefresh(ctx context.Context) (err error) { + return newNYIError() +} +func (nyi) SetTrace(ctx context.Context, params *protocol.SetTraceParams) (err error) { + return newNYIError() +} +func (nyi) ShowDocument(ctx context.Context, params *protocol.ShowDocumentParams) (result *protocol.ShowDocumentResult, err error) { + return nil, newNYIError() +} +func (nyi) Shutdown(ctx context.Context) (err error) { + return newNYIError() +} +func (nyi) SignatureHelp(ctx context.Context, params *protocol.SignatureHelpParams) (result *protocol.SignatureHelp, err error) { + return nil, newNYIError() +} +func (nyi) Symbols(ctx context.Context, params *protocol.WorkspaceSymbolParams) (result []protocol.SymbolInformation, err error) { + return nil, newNYIError() +} +func (nyi) TypeDefinition(ctx context.Context, params *protocol.TypeDefinitionParams) (result []protocol.Location, err error) { + return nil, newNYIError() +} +func (nyi) WillCreateFiles(ctx context.Context, params *protocol.CreateFilesParams) (result *protocol.WorkspaceEdit, err error) { + return nil, newNYIError() +} +func (nyi) WillDeleteFiles(ctx context.Context, params *protocol.DeleteFilesParams) (result *protocol.WorkspaceEdit, err error) { + return nil, newNYIError() +} +func (nyi) WillRenameFiles(ctx context.Context, params *protocol.RenameFilesParams) (result *protocol.WorkspaceEdit, err error) { + return nil, newNYIError() +} +func (nyi) WillSave(ctx context.Context, params *protocol.WillSaveTextDocumentParams) (err error) { + return newNYIError() +} +func (nyi) WillSaveWaitUntil(ctx context.Context, params *protocol.WillSaveTextDocumentParams) (result []protocol.TextEdit, err error) { + return nil, newNYIError() +} +func (nyi) WorkDoneProgressCancel(ctx context.Context, params *protocol.WorkDoneProgressCancelParams) (err error) { + return newNYIError() +} + +// nyi returns a "not yet implemented" error containing the name of the function that called it. +func newNYIError() error { + caller := "" + if pc, _, _, ok := runtime.Caller(1); ok { + if fn := runtime.FuncForPC(pc); fn != nil { + caller = fn.Name() + } + } + + return fmt.Errorf("not yet implemented, sorry: %s", caller) +} diff --git a/private/buf/buflsp/progress.go b/private/buf/buflsp/progress.go new file mode 100644 index 0000000000..276c8c0387 --- /dev/null +++ b/private/buf/buflsp/progress.go @@ -0,0 +1,100 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package buflsp + +import ( + "context" + "fmt" + "math/rand/v2" + + "go.lsp.dev/protocol" +) + +// progress is a client-side progress bar. +// +// This type manages all the necessary state with the client to show the +// progress bar. +type progress struct { + lsp *lsp + token string +} + +// Creates new server-initiated progress. +func newProgress(lsp *lsp) *progress { + return &progress{ + lsp: lsp, + token: fmt.Sprintf("%016x", rand.Uint64()), + } +} + +// Creates progress to track client-initiated progress. +// +// If params is nil (i.e., the client doesn't want progress) this returns a nil progress +// that will do nothing when notified. +func newProgressFromClient(lsp *lsp, params *protocol.WorkDoneProgressParams) *progress { + if params == nil || params.WorkDoneToken == nil { + return nil + } + + return &progress{ + lsp: lsp, + token: params.WorkDoneToken.String(), + } +} + +func (p *progress) Begin(ctx context.Context, title string) { + if p == nil { + return + } + + // NOTE: The error is automatically logged by the client binding. + _ = p.lsp.client.Progress(ctx, &protocol.ProgressParams{ + Token: *protocol.NewProgressToken(p.token), + Value: &protocol.WorkDoneProgressBegin{ + Kind: protocol.WorkDoneProgressKindBegin, + Title: title, + }, + }) +} + +func (p *progress) Report(ctx context.Context, message string, percent float64) { + if p == nil { + return + } + + // NOTE: The error is automatically logged by the client binding. + _ = p.lsp.client.Progress(ctx, &protocol.ProgressParams{ + Token: *protocol.NewProgressToken(p.token), + Value: &protocol.WorkDoneProgressReport{ + Kind: protocol.WorkDoneProgressKindReport, + Message: message, + Percentage: uint32(percent * 100), + }, + }) +} + +func (p *progress) Done(ctx context.Context) { + if p == nil { + return + } + + // NOTE: The error is automatically logged by the client binding. + _ = p.lsp.client.Progress(ctx, &protocol.ProgressParams{ + Token: *protocol.NewProgressToken(p.token), + Value: &protocol.WorkDoneProgressEnd{ + Kind: protocol.WorkDoneProgressKindEnd, + }, + }) +} diff --git a/private/buf/buflsp/report.go b/private/buf/buflsp/report.go new file mode 100644 index 0000000000..6e71363546 --- /dev/null +++ b/private/buf/buflsp/report.go @@ -0,0 +1,83 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file provides helpers for bridging protocompile and LSP diagnostics. + +package buflsp + +import ( + "fmt" + + "github.com/bufbuild/protocompile/linker" + "github.com/bufbuild/protocompile/parser" + "github.com/bufbuild/protocompile/reporter" + "go.lsp.dev/protocol" +) + +// report is a reporter.Reporter that captures diagnostic events as +// protocol.Diagnostic values. +type report struct { + diagnostics []protocol.Diagnostic + syntaxMissing map[string]bool + pathToUnusedImports map[string]map[string]bool +} + +// Error implements reporter.Handler for *diagnostics. +func (r *report) Error(err reporter.ErrorWithPos) error { + r.diagnostics = append(r.diagnostics, newDiagnostic(err, false)) + return nil +} + +// Error implements reporter.Handler for *diagnostics. +func (r *report) Warning(err reporter.ErrorWithPos) { + r.diagnostics = append(r.diagnostics, newDiagnostic(err, true)) + + if err.Unwrap() == parser.ErrNoSyntax { + r.syntaxMissing[err.GetPosition().Filename] = true + } else if unusedImport, ok := err.Unwrap().(linker.ErrorUnusedImport); ok { + path := err.GetPosition().Filename + unused, ok := r.pathToUnusedImports[path] + if !ok { + unused = map[string]bool{} + r.pathToUnusedImports[path] = unused + } + + unused[unusedImport.UnusedImport()] = true + } +} + +// newDiagnostic converts a protocompile error into a diagnostic. +// +// Unfortunately, protocompile's errors are currently too meagre to provide full code +// spans; that will require a fix in the compiler. +func newDiagnostic(err reporter.ErrorWithPos, isWarning bool) protocol.Diagnostic { + pos := protocol.Position{ + Line: uint32(err.GetPosition().Line - 1), + Character: uint32(err.GetPosition().Col - 1), + } + + diagnostic := protocol.Diagnostic{ + // TODO: The compiler currently does not record spans for diagnostics. This is + // essentially a bug that will result in worse diagnostics until fixed. + Range: protocol.Range{Start: pos, End: pos}, + Severity: protocol.DiagnosticSeverityError, + Message: fmt.Sprintf("%s:%d:%d: %s", err.GetPosition().Filename, err.GetPosition().Line, err.GetPosition().Col, err.Unwrap().Error()), + } + + if isWarning { + diagnostic.Severity = protocol.DiagnosticSeverityWarning + } + + return diagnostic +} diff --git a/private/buf/buflsp/server.go b/private/buf/buflsp/server.go new file mode 100644 index 0000000000..ada3cea0a7 --- /dev/null +++ b/private/buf/buflsp/server.go @@ -0,0 +1,399 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package buflsp implements a language server for Protobuf. +// +// The main entry-point of this package is the Serve() function, which creates a new LSP server. +package buflsp + +import ( + "context" + "fmt" + "runtime/debug" + "strings" + "time" + + "github.com/bufbuild/buf/private/buf/bufformat" + "github.com/bufbuild/protocompile/ast" + "go.lsp.dev/protocol" +) + +const ( + semanticTypeType = iota + semanticTypeStruct + semanticTypeVariable + semanticTypeEnum + semanticTypeEnumMember + semanticTypeInterface + semanticTypeMethod + semanticTypeDecorator +) + +var ( + // These slices must match the order of the indices in the above const block. + semanticTypeLegend = []string{ + "type", "struct", "variable", "enum", + "enumMember", "interface", "method", "decorator", + } + semanticModifierLegend = []string{} +) + +// server is an implementation of protocol.Server. +// +// This is a separate type from buflsp.lsp so that the dozens of handler methods for this +// type are kept separate from the rest of the logic. +// +// See https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification. +type server struct { + // This automatically implements all of protocol.Server for us. By default, + // every method returns an error. + nyi + + // We embed the LSP pointer as well, since it only has private members. + *lsp +} + +// newServer creates a protocol.Server implementation out of an lsp. +func newServer(lsp *lsp) protocol.Server { + return &server{lsp: lsp} +} + +// Methods for server are grouped according to the groups in the LSP protocol specification. + +// -- Lifecycle Methods + +// Initialize is the first message the LSP receives from the client. This is where all +// initialization of the server wrt to the project is is invoked on must occur. +func (s *server) Initialize( + ctx context.Context, + params *protocol.InitializeParams, +) (*protocol.InitializeResult, error) { + if err := s.init(params); err != nil { + return nil, err + } + + info := &protocol.ServerInfo{Name: "buf-lsp"} + if buildInfo, ok := debug.ReadBuildInfo(); ok { + info.Version = buildInfo.Main.Version + } + + // The LSP protocol library doesn't actually provide SemanticTokensOptions + // correctly. + type SematicTokensLegend struct { + TokenTypes []string `json:"tokenTypes"` + TokenModifiers []string `json:"tokenModifiers"` + } + type SemanticTokensOptions struct { + protocol.WorkDoneProgressOptions + + Legend SematicTokensLegend `json:"legend"` + Full bool `json:"full"` + } + + return &protocol.InitializeResult{ + Capabilities: protocol.ServerCapabilities{ + // These are all the things we advertise to the client we can do. + // For now, incomplete features are explicitly disabled here as TODOs. + TextDocumentSync: &protocol.TextDocumentSyncOptions{ + OpenClose: true, + // Request that whole files be sent to us. Protobuf IDL files don't + // usually get especially huge, so this simplifies our logic without + // necessarily making the LSP slow. + Change: protocol.TextDocumentSyncKindFull, + }, + DefinitionProvider: &protocol.DefinitionOptions{ + WorkDoneProgressOptions: protocol.WorkDoneProgressOptions{WorkDoneProgress: true}, + }, + DocumentFormattingProvider: true, + HoverProvider: true, + SemanticTokensProvider: &SemanticTokensOptions{ + WorkDoneProgressOptions: protocol.WorkDoneProgressOptions{WorkDoneProgress: true}, + Legend: SematicTokensLegend{ + TokenTypes: semanticTypeLegend, + TokenModifiers: semanticModifierLegend, + }, + Full: true, + }, + }, + ServerInfo: info, + }, nil +} + +// Initialized is sent by the client after it receives the Initialize response and has +// initialized itself. This is only a notification. +func (s *server) Initialized( + ctx context.Context, + params *protocol.InitializedParams, +) error { + return nil +} + +func (s *server) SetTrace( + ctx context.Context, + params *protocol.SetTraceParams, +) error { + s.lsp.traceValue.Store(¶ms.Value) + return nil +} + +// Shutdown is sent by the client when it wants the server to shut down and exit. +// The client will wait until Shutdown returns, and then call Exit. +func (s *server) Shutdown(ctx context.Context) error { + return nil +} + +// Exit is a notification that the client has seen shutdown complete, and that the +// server should now exit. +func (s *server) Exit(ctx context.Context) error { + // TODO: return an error if Shutdown() has not been called yet. + + // Close the connection. This will let the server shut down gracefully once this + // notification is replied to. + return s.lsp.conn.Close() +} + +// -- File synchronization methods. + +// DidOpen is called whenever the client opens a document. This is our signal to parse +// the file. +func (s *server) DidOpen( + ctx context.Context, + params *protocol.DidOpenTextDocumentParams, +) error { + file := s.fileManager.Open(ctx, params.TextDocument.URI) + file.Update(ctx, params.TextDocument.Version, params.TextDocument.Text) + go file.Refresh(context.WithoutCancel(ctx)) + return nil +} + +// DidOpen is called whenever the client opens a document. This is our signal to parse +// the file. +func (s *server) DidChange( + ctx context.Context, + params *protocol.DidChangeTextDocumentParams, +) error { + file := s.fileManager.Get(params.TextDocument.URI) + if file == nil { + // Update for a file we don't know about? Seems bad! + return fmt.Errorf("received update for file that was not open: %q", params.TextDocument.URI) + } + + file.Update(ctx, params.TextDocument.Version, params.ContentChanges[0].Text) + go file.Refresh(context.WithoutCancel(ctx)) + return nil +} + +// Formatting is called whenever the user explicitly requests formatting. +func (s *server) Formatting( + ctx context.Context, + params *protocol.DocumentFormattingParams, +) ([]protocol.TextEdit, error) { + file := s.fileManager.Get(params.TextDocument.URI) + if file == nil { + // Format for a file we don't know about? Seems bad! + return nil, fmt.Errorf("received update for file that was not open: %q", params.TextDocument.URI) + } + + // Currently we have no way to honor any of the parameters. + _ = params + + file.lock.Lock(ctx) + defer file.lock.Unlock(ctx) + if file.fileNode == nil { + return nil, nil + } + + var out strings.Builder + if err := bufformat.FormatFileNode(&out, file.fileNode); err != nil { + return nil, err + } + + return []protocol.TextEdit{ + { + Range: infoToRange(file.fileNode.NodeInfo(file.fileNode)), + NewText: out.String(), + }, + }, nil +} + +// DidOpen is called whenever the client opens a document. This is our signal to parse +// the file. +func (s *server) DidClose( + ctx context.Context, + params *protocol.DidCloseTextDocumentParams, +) error { + s.fileManager.Close(ctx, params.TextDocument.URI) + return nil +} + +// -- Language functionality methods. + +// Definition is the entry point for hover inlays. +func (s *server) Hover( + ctx context.Context, + params *protocol.HoverParams, +) (*protocol.Hover, error) { + file := s.fileManager.Get(params.TextDocument.URI) + if file == nil { + return nil, nil + } + + symbol := file.SymbolAt(ctx, params.Position) + if symbol == nil { + return nil, nil + } + + docs := symbol.FormatDocs(ctx) + if docs == "" { + return nil, nil + } + + // Escape < and > occurrences in the docs. + replacer := strings.NewReplacer("<", "<", ">", ">") + docs = replacer.Replace(docs) + + range_ := symbol.Range() // Need to spill this here because Hover.Range is a pointer. + return &protocol.Hover{ + Contents: protocol.MarkupContent{ + Kind: protocol.Markdown, + Value: docs, + }, + Range: &range_, + }, nil +} + +// Definition is the entry point for go-to-definition. +func (s *server) Definition( + ctx context.Context, + params *protocol.DefinitionParams, +) ([]protocol.Location, error) { + file := s.fileManager.Get(params.TextDocument.URI) + if file == nil { + return nil, nil + } + + progress := newProgressFromClient(s.lsp, ¶ms.WorkDoneProgressParams) + progress.Begin(ctx, "Searching") + defer progress.Done(ctx) + + symbol := file.SymbolAt(ctx, params.Position) + if symbol == nil { + return nil, nil + } + + if imp, ok := symbol.kind.(*import_); ok { + // This is an import, we just want to jump to the file. + return []protocol.Location{{URI: imp.file.uri}}, nil + } + + def, _ := symbol.Definition(ctx) + if def != nil { + return []protocol.Location{{ + URI: def.file.uri, + Range: def.Range(), + }}, nil + } + + return nil, nil +} + +// SemanticTokensFull is called to render semantic token information on the client. +func (s *server) SemanticTokensFull( + ctx context.Context, + params *protocol.SemanticTokensParams, +) (*protocol.SemanticTokens, error) { + file := s.fileManager.Get(params.TextDocument.URI) + if file == nil { + return nil, nil + } + + progress := newProgressFromClient(s.lsp, ¶ms.WorkDoneProgressParams) + progress.Begin(ctx, "Processing Tokens") + defer progress.Done(ctx) + + var symbols []*symbol + for { + file.lock.Lock(ctx) + symbols = file.symbols + file.lock.Unlock(ctx) + if symbols != nil { + break + } + time.Sleep(1 * time.Millisecond) + } + + var ( + encoded []uint32 + prevLine, prevCol uint32 + ) + for i, symbol := range symbols { + progress.Report(ctx, fmt.Sprintf("%d/%d", i+1, len(symbols)), float64(i)/float64(len(symbols))) + + var semanticType uint32 + + if symbol.isOption { + semanticType = semanticTypeDecorator + } else if def, defNode := symbol.Definition(ctx); def != nil { + switch defNode.(type) { + case *ast.FileNode: + continue + case *ast.MessageNode, *ast.GroupNode: + semanticType = semanticTypeStruct + case *ast.FieldNode, *ast.MapFieldNode, *ast.OneofNode: + semanticType = semanticTypeVariable + case *ast.EnumNode: + semanticType = semanticTypeEnum + case *ast.EnumValueNode: + semanticType = semanticTypeEnumMember + case *ast.ServiceNode: + semanticType = semanticTypeInterface + case *ast.RPCNode: + semanticType = semanticTypeMethod + } + } else if _, ok := symbol.kind.(*builtin); ok { + semanticType = semanticTypeType + } else { + continue + } + + // This fairly painful encoding is described in detail here: + // https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_semanticTokens + start, end := symbol.info.Start(), symbol.info.End() + for i := start.Line; i <= end.Line; i++ { + newLine := uint32(i - 1) + var newCol uint32 + if i == start.Line { + newCol = uint32(start.Col - 1) + if prevLine == newLine { + newCol -= prevCol + } + } + + symbolLen := uint32(end.Col - 1) + if i == start.Line { + symbolLen -= uint32(start.Col - 1) + } + + encoded = append(encoded, newLine-prevLine, newCol, symbolLen, semanticType, 0) + prevLine = newLine + if i == start.Line { + prevCol = uint32(start.Col - 1) + } else { + prevCol = 0 + } + } + } + + return &protocol.SemanticTokens{Data: encoded}, nil +} diff --git a/private/buf/buflsp/symbol.go b/private/buf/buflsp/symbol.go new file mode 100644 index 0000000000..46bddee5ab --- /dev/null +++ b/private/buf/buflsp/symbol.go @@ -0,0 +1,870 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines all of the message handlers that involve symbols. +// +// In particular, this file handles semantic information in fileManager that have been +// *opened by the editor*, and thus do not need references to Buf modules to find. +// See imports.go for that part of the LSP. + +package buflsp + +import ( + "context" + "fmt" + "slices" + "strings" + + "github.com/bufbuild/buf/private/pkg/slicesext" + "github.com/bufbuild/protocompile/ast" + "go.lsp.dev/protocol" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// symbol represents a named symbol inside of a buflsp.file +type symbol struct { + // The file this symbol sits in. + file *file + + // The node containing the symbol's name. + name ast.Node + // Node info for the symbol itself. This specifies the region of the file + // that contains this symbol. + info ast.NodeInfo + // What kind of symbol this is. + kind symbolKind + + // Whether this symbol came from an option node. + isOption bool +} + +// symbolKind is a kind of symbol. It is implemented by *definition, *reference, and *import_. +type symbolKind interface { + isSymbolKind() +} + +// definition is a symbol that is a definition. +type definition struct { + // The node of the overall definition. E.g. for a message this is the whole message node. + node ast.Node + // The fully qualified path of this symbol, not including its package (which is implicit from + // its file.) + path []string +} + +// reference is a reference to a symbol in some other file. +type reference struct { + // The file this symbol is defined in. Nil if this reference is unresolved. + file *file + // The fully qualified path of this symbol, not including its package (which is implicit from + // its definition file.) + path []string + + // If this is nonnil, this is a reference symbol to a field inside of an option path + // or composite textproto literal. For example, consider the code + // + // [(foo.bar).baz = xyz] + // + // baz is a symbol, whose reference depends on the type of foo.bar, which depends on the + // imports of the file foo.bar is defined in. + seeTypeOf *symbol + + // If this is nonnil, this is a non-custom option reference defined in the given node. + isNonCustomOptionIn ast.Node +} + +// import_ is a symbol representing an import. +type import_ struct { + // The imported file. Nil if this reference is unresolved. + file *file +} + +// builtin is a built-in symbol. +type builtin struct { + name string +} + +func (*definition) isSymbolKind() {} +func (*reference) isSymbolKind() {} +func (*import_) isSymbolKind() {} +func (*builtin) isSymbolKind() {} + +// Range constructs an LSP protocol code range for this symbol. +func (s *symbol) Range() protocol.Range { + return infoToRange(s.info) +} + +// Definition looks up the definition of this symbol, if known. +func (s *symbol) Definition(ctx context.Context) (*symbol, ast.Node) { + switch kind := s.kind.(type) { + case *definition: + return s, kind.node + case *reference: + if kind.file == nil { + return nil, nil + } + + kind.file.lock.Lock(ctx) + defer kind.file.lock.Unlock(ctx) + for _, symbol := range kind.file.symbols { + def, ok := symbol.kind.(*definition) + if ok && slices.Equal(kind.path, def.path) { + return symbol, def.node + } + } + } + + return nil, nil +} + +// ReferencePath returns the reference path of this string, i.e., the components of +// a path like foo.bar.Baz. +// +// Returns nil if the name of this symbol is not a path. +func (s *symbol) ReferencePath() (path []string, absolute bool) { + switch name := s.name.(type) { + case *ast.IdentNode: + path = []string{name.Val} + case *ast.CompoundIdentNode: + path = slicesext.Map(name.Components, func(name *ast.IdentNode) string { return name.Val }) + absolute = name.LeadingDot != nil + } + return +} + +// Resolve attempts to resolve an unresolved reference across fileManager. +func (s *symbol) ResolveCrossFile(ctx context.Context) { + switch kind := s.kind.(type) { + case *definition: + case *builtin: + case *import_: + // These symbols do not require resolution. + + case *reference: + if kind.file != nil { + // Already resolved, not our problem! + return + } + + components, _ := s.ReferencePath() + + // This is a field of some foreign type. We need to track down where this is. + if kind.seeTypeOf != nil { + ref, ok := kind.seeTypeOf.kind.(*reference) + if !ok || ref.file == nil { + s.file.lsp.logger.Debug( + "unexpected unresolved or non-reference symbol for seeTypeOf", + zap.Object("symbol", s)) + return + } + + // Fully index the file this reference is in, if different from the current. + if s.file != ref.file { + ref.file.Refresh(ctx) + } + + // Find the definition that contains the type we want. + def, node := kind.seeTypeOf.Definition(ctx) + if def == nil { + s.file.lsp.logger.Debug( + "could not resolve dependent symbol definition", + zap.Object("symbol", s), + zap.Object("dep", kind.seeTypeOf)) + return + } + + // Node here should be some kind of field. + // TODO: Support more exotic field types. + field, ok := node.(*ast.FieldNode) + if !ok { + s.file.lsp.logger.Debug( + "dependent symbol definition was not a field", + zap.Object("symbol", s), + zap.Object("dep", kind.seeTypeOf), + zap.Object("def", def)) + return + } + + // Now, find the symbol for the field's type in the file's symbol table. + // Searching by offset is faster. + info := def.file.fileNode.NodeInfo(field.FldType) + ty := def.file.SymbolAt(ctx, protocol.Position{ + Line: uint32(info.Start().Line) - 1, + Character: uint32(info.Start().Col) - 1, + }) + if ty == nil { + s.file.lsp.logger.Debug( + "dependent symbol's field type didn't resolve", + zap.Object("symbol", s), + zap.Object("dep", kind.seeTypeOf), + zap.Object("def", def)) + return + } + + // This will give us enough information to figure out the path of this + // symbol, namely, the name of the thing the symbol is inside of. We don't + // actually validate if the dependent symbol exists, because that will happen for us + // when we go to hover over the symbol. + ref, ok = ty.kind.(*reference) + if !ok || ty.file == nil { + s.file.lsp.logger.Debug( + "dependent symbol's field type didn't resolve to a reference", + zap.Object("symbol", s), + zap.Object("dep", kind.seeTypeOf), + zap.Object("def", def), + zap.Object("resolved", ty)) + return + } + + // Done. + kind.file = def.file + kind.path = append(slicesext.Copy(ref.path), components...) + return + } + + if kind.isNonCustomOptionIn != nil { + var optionsType []string + switch kind.isNonCustomOptionIn.(type) { + case *ast.FileNode: + optionsType = []string{"FileOptions"} + case *ast.MessageNode: + optionsType = []string{"MessageOptions"} + case *ast.FieldNode, *ast.MapFieldNode: + optionsType = []string{"FieldOptions"} + case *ast.OneofNode: + optionsType = []string{"OneofOptions"} + case *ast.EnumNode: + optionsType = []string{"EnumOptions"} + case *ast.EnumValueNode: + optionsType = []string{"EnumValueOptions"} + case *ast.ServiceNode: + optionsType = []string{"ServiceOptions"} + case *ast.RPCNode: + optionsType = []string{"MethodOptions"} + case *ast.ExtensionRangeNode: + optionsType = []string{"DescriptorProto", "ExtensionRangeOptions"} + default: + // This node cannot contain options. + return + } + + fieldPath := append(optionsType, kind.path...) + + if slices.Equal(fieldPath, []string{"FieldOptions", "default"}) { + // This one is a bit magical. + s.kind = &builtin{name: "default"} + return + } + + // Make a copy of the import table pointer and then drop the lock, + // since searching inside of the imports will need to acquire other + // fileManager' locks. + s.file.lock.Lock(ctx) + descriptorProto := s.file.importToFile[descriptorPath] + s.file.lock.Unlock(ctx) + + if descriptorProto == nil { + return + } + + // Look for a symbol with this exact path in descriptor proto. + + descriptorProto.lock.Lock(ctx) + defer descriptorProto.lock.Unlock(ctx) + + var fieldSymbol *symbol + for _, symbol := range descriptorProto.symbols { + if def, ok := symbol.kind.(*definition); ok && slices.Equal(def.path, fieldPath) { + fieldSymbol = symbol + break + } + } + if fieldSymbol == nil { + return + } + + kind.file = descriptorProto + kind.path = fieldPath + return + } + + // Make a copy of the import table pointer and then drop the lock, + // since searching inside of the imports will need to acquire other + // fileManager' locks. + s.file.lock.Lock(ctx) + imports := s.file.importToFile + s.file.lock.Unlock(ctx) + + if imports == nil { + // Hopeless. We'll have to try again once we have imports! + return + } + + for _, imported := range imports { + // Remove a leading pkg from components. + path, ok := slicesext.TrimPrefix(components, imported.Package()) + if !ok { + continue + } + + if findDeclByPath(imported.fileNode.Decls, path) != nil { + kind.file = imported + kind.path = path + break + } + } + } +} + +func (s *symbol) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + enc.AddString("file", s.file.uri.Filename()) + + // zapPos converts an ast.SourcePos into a zap marshaller. + zapPos := func(pos ast.SourcePos) zapcore.ObjectMarshalerFunc { + return func(enc zapcore.ObjectEncoder) error { + enc.AddInt("offset", pos.Offset) + enc.AddInt("line", pos.Line) + enc.AddInt("col", pos.Col) + return nil + } + } + + err = enc.AddObject("start", zapPos(s.info.Start())) + if err != nil { + return err + } + + err = enc.AddObject("end", zapPos(s.info.End())) + if err != nil { + return err + } + + switch kind := s.kind.(type) { + case *builtin: + enc.AddString("builtin", kind.name) + + case *import_: + if kind.file != nil { + enc.AddString("imports", kind.file.uri.Filename()) + } + + case *definition: + enc.AddString("defines", strings.Join(kind.path, ".")) + + case *reference: + if kind.file != nil { + enc.AddString("imports", kind.file.uri.Filename()) + } + if kind.path != nil { + enc.AddString("references", strings.Join(kind.path, ".")) + } + if kind.seeTypeOf != nil { + err = enc.AddObject("see_type_of", kind.seeTypeOf) + if err != nil { + return err + } + } + } + + return nil +} + +// FormatDocs finds appropriate documentation for the given s and constructs a Markdown +// string for showing to the client. +// +// Returns the empty string if no docs are available. +func (s *symbol) FormatDocs(ctx context.Context) string { + var ( + tooltip strings.Builder + def *symbol + node ast.Node + path []string + ) + + switch kind := s.kind.(type) { + case *builtin: + fmt.Fprintf(&tooltip, "```proto\nbuiltin %s\n```\n", kind.name) + for _, line := range builtinDocs[kind.name] { + fmt.Fprintln(&tooltip, line) + } + + fmt.Fprintln(&tooltip) + fmt.Fprintf( + &tooltip, + "This symbol is a Protobuf builtin. [Learn more on protobuf.com.](https://protobuf.com/docs/language-spec#field-types)", + ) + return tooltip.String() + + case *reference: + def, node = s.Definition(ctx) + path = kind.path + + case *definition: + def = s + node = kind.node + path = kind.path + + default: + return "" + } + + pkg := "" + if pkgNode := def.file.packageNode; pkgNode != nil { + pkg = string(pkgNode.Name.AsIdentifier()) + } + + what := "unresolved" + switch node := node.(type) { + case *ast.FileNode: + what = "file" + case *ast.MessageNode: + what = "message" + case *ast.FieldNode: + what = "field" + if node.FieldExtendee() != nil { + what = "extension" + } + case *ast.MapFieldNode: + what = "field" + if node.FieldExtendee() != nil { + what = "extension" + } + case *ast.GroupNode: + what = "group" + case *ast.OneofNode: + what = "oneof" + case *ast.EnumNode: + what = "enum" + case *ast.EnumValueNode: + what = "const" + case *ast.ServiceNode: + what = "service" + case *ast.RPCNode: + what = "rpc" + } + + fmt.Fprintf(&tooltip, "```proto-decl\n%s %s.%s\n```\n\n", what, pkg, strings.Join(path, ".")) + + if node == nil { + fmt.Fprintln(&tooltip, "") + return tooltip.String() + } + + if def.file.imageFileInfo != nil { + path := strings.Join(path, ".") + + fmt.Fprintf( + &tooltip, + "[`%s.%s` on the Buf Schema Registry](https://%s/docs/main:%s#%s.%s)\n\n", + pkg, + path, + def.file.imageFileInfo.ModuleFullName(), + pkg, + pkg, + path, + ) + } + + // Dump all of the comments into the tooltip. These will be rendered as Markdown automatically + // by the client. + info := def.file.fileNode.NodeInfo(node) + allComments := []ast.Comments{info.LeadingComments(), info.TrailingComments()} + var printed bool + for _, comments := range allComments { + for i := 0; i < comments.Len(); i++ { + comment := comments.Index(i).RawText() + + // The compiler does not currently provide comments without their + // delimited removed, so we have to do this ourselves. + if strings.HasPrefix(comment, "//") { + comment = strings.TrimSpace(strings.TrimPrefix(comment, "//")) + } else { + comment = strings.TrimSpace(strings.TrimSuffix(strings.TrimPrefix(comment, "/*"), "*/")) + } + + if comment != "" { + printed = true + } + + // No need to process Markdown in comment; this Just Works! + fmt.Fprintln(&tooltip, comment) + } + } + + if !printed { + fmt.Fprintln(&tooltip, "") + } + + return tooltip.String() +} + +// symbolWalker is an AST walker that generates the symbol table for a file in IndexSymbols(). +type symbolWalker struct { + file *file + + // This is the set of *ast.MessageNode, *ast.EnumNode, and *ast.ServiceNode that + // we have traversed. They are used for same-file symbol resolution, and for constructing + // the full paths of symbols. + path []ast.Node + + // This is a prefix sum of the length of each line in file.text. This is + // necessary for mapping a line+col value in a source position to byte offsets. + // + // lineSum[n] is the number of bytes on every line up to line n, including the \n + // byte on the current line. + lineSum []int +} + +// newWalker constructs a new walker from a file, constructing any necessary book-keeping. +func newWalker(file *file) *symbolWalker { + walker := &symbolWalker{ + file: file, + } + + // NOTE: Don't use range here, that produces runes, not bytes. + for i := 0; i < len(file.text); i++ { + if file.text[i] == '\n' { + walker.lineSum = append(walker.lineSum, i+1) + } + } + walker.lineSum = append(walker.lineSum, len(file.text)) + + return walker +} + +func (w *symbolWalker) Walk(node, parent ast.Node) { + if node == nil { + return + } + + // Save the stack depth on entry, so we can undo it on exit. + top := len(w.path) + defer func() { w.path = w.path[:top] }() + + switch node := node.(type) { + case *ast.FileNode: + for _, decl := range node.Decls { + w.Walk(decl, node) + } + + case *ast.ImportNode: + // Generate a symbol for the import string. This symbol points to a file, + // not another symbol. + symbol := w.newSymbol(node.Name) + import_ := new(import_) + symbol.kind = import_ + if imported, ok := w.file.importToFile[node.Name.AsString()]; ok { + import_.file = imported + } + + case *ast.MessageNode: + w.newDef(node, node.Name) + w.path = append(w.path, node) + for _, decl := range node.Decls { + w.Walk(decl, node) + } + + case *ast.ExtendNode: + w.newRef(node.Extendee) + for _, decl := range node.Decls { + w.Walk(decl, node) + } + + case *ast.GroupNode: + w.newDef(node, node.Name) + // TODO: also do the name of the generated field. + for _, decl := range node.Decls { + w.Walk(decl, node) + } + + case *ast.FieldNode: + w.newDef(node, node.Name) + w.newRef(node.FldType) + if node.Options != nil { + for _, option := range node.Options.Options { + w.Walk(option, node) + } + } + + case *ast.MapFieldNode: + w.newDef(node, node.Name) + w.newRef(node.MapType.KeyType) + w.newRef(node.MapType.ValueType) + if node.Options != nil { + for _, option := range node.Options.Options { + w.Walk(option, node) + } + } + + case *ast.OneofNode: + w.newDef(node, node.Name) + // NOTE: oneof fields are not scoped to their oneof's name, so we can skip + // pushing to w.path. + // w.path = append(w.path, node.Name.Val) + for _, decl := range node.Decls { + w.Walk(decl, node) + } + + case *ast.EnumNode: + w.newDef(node, node.Name) + w.path = append(w.path, node) + for _, decl := range node.Decls { + w.Walk(decl, node) + } + + case *ast.EnumValueNode: + w.newDef(node, node.Name) + if node.Options != nil { + for _, option := range node.Options.Options { + w.Walk(option, node) + } + } + + case *ast.ServiceNode: + w.newDef(node, node.Name) + w.path = append(w.path, node) + for _, decl := range node.Decls { + w.Walk(decl, node) + } + + case *ast.RPCNode: + w.newDef(node, node.Name) + w.newRef(node.Input.MessageType) + w.newRef(node.Output.MessageType) + for _, decl := range node.Decls { + w.Walk(decl, node) + } + + case *ast.OptionNode: + for i, part := range node.Name.Parts { + var next *symbol + if part.IsExtension() { + next = w.newRef(part.Name) + } else if i == 0 { + // This lies in descriptor.proto and has to wait until we're resolving + // cross-file references. + next = w.newSymbol(part.Name) + next.kind = &reference{ + path: []string{part.Value()}, + isNonCustomOptionIn: parent, + } + } else { + // This depends on the type of the previous symbol. + prev := w.file.symbols[len(w.file.symbols)-1] + next = w.newSymbol(part.Name) + next.kind = &reference{seeTypeOf: prev} + } + next.isOption = true + } + + // TODO: node.Val + } +} + +// newSymbol creates a new symbol and adds it to the running list. +// +// name is the node representing the name of the symbol that can be go-to-definition'd. +func (w *symbolWalker) newSymbol(name ast.Node) *symbol { + symbol := &symbol{ + file: w.file, + name: name, + info: w.file.fileNode.NodeInfo(name), + } + + w.file.symbols = append(w.file.symbols, symbol) + return symbol +} + +// newDef creates a new symbol for a definition, and adds it to the running list. +// +// Returns a new symbol for that definition. +func (w *symbolWalker) newDef(node ast.Node, name *ast.IdentNode) *symbol { + symbol := w.newSymbol(name) + symbol.kind = &definition{ + node: node, + path: append(makeNestingPath(w.path), name.Val), + } + return symbol +} + +// newDef creates a new symbol for a name reference, and adds it to the running list. +// +// newRef performs same-file Protobuf name resolution. It searches for a partial package +// name in each enclosing scope (per w.path). Cross-file resolution is done by +// ResolveCrossFile(). +// +// Returns a new symbol for that reference. +func (w *symbolWalker) newRef(name ast.IdentValueNode) *symbol { + symbol := w.newSymbol(name) + components, absolute := symbol.ReferencePath() + + // Handle the built-in types. + if !absolute && len(components) == 1 { + switch components[0] { + case "int32", "int64", "uint32", "uint64", "sint32", "sint64", + "fixed32", "fixed64", "sfixed32", "sfixed64", + "float", "double", "bool", "string", "bytes": + symbol.kind = &builtin{components[0]} + return symbol + } + } + + ref := new(reference) + symbol.kind = ref + + // First, search the containing messages. + if !absolute { + for i := len(w.path) - 1; i >= 0; i-- { + message, ok := w.path[i].(*ast.MessageNode) + if !ok { + continue + } + + if findDeclByPath(message.Decls, components) != nil { + ref.file = w.file + ref.path = append(makeNestingPath(w.path[:i+1]), components...) + return symbol + } + } + } + + // If we couldn't find it within a nested message, we now try to find it at the top level. + if !absolute && findDeclByPath(w.file.fileNode.Decls, components) != nil { + ref.file = w.file + ref.path = components + return symbol + } + + // Also try with the package removed. + if path, ok := slicesext.TrimPrefix(components, symbol.file.Package()); ok { + if findDeclByPath(w.file.fileNode.Decls, path) != nil { + ref.file = w.file + ref.path = path + return symbol + } + } + + // NOTE: cross-file resolution happens elsewhere, after we have walked the whole + // ast and dropped this file's lock. + + // If we couldn't resolve the symbol, symbol.definedIn will be nil. + // However, for hover, it's necessary to still remember the components. + ref.path = components + return symbol +} + +// findDeclByPath searches for a declaration node that the given path names that is nested +// among decls. This is, in effect, Protobuf name resolution within a file. +// +// Currently, this will only find *ast.MessageNode and *ast.EnumNode values. +func findDeclByPath[N ast.Node](nodes []N, path []string) ast.Node { + if len(path) == 0 { + return nil + } + + for _, node := range nodes { + switch node := ast.Node(node).(type) { + case *ast.MessageNode: + if node.Name.Val == path[0] { + if len(path) == 1 { + return node + } + return findDeclByPath(node.Decls, path[1:]) + } + case *ast.GroupNode: + // TODO: This is incorrect. The name to compare with should have + // its first letter lowercased. + if len(path) == 1 && node.Name.Val == path[0] { + return node + } + + msg := node.AsMessage() + if msg.Name.Val == path[0] { + if len(path) == 1 { + return msg + } + return findDeclByPath(msg.Decls, path[1:]) + } + + case *ast.ExtendNode: + if found := findDeclByPath(node.Decls, path); found != nil { + return found + } + case *ast.OneofNode: + if found := findDeclByPath(node.Decls, path); found != nil { + return found + } + + case *ast.EnumNode: + if len(path) == 1 && node.Name.Val == path[0] { + return node + } + case *ast.FieldNode: + if len(path) == 1 && node.Name.Val == path[0] { + return node + } + case *ast.MapFieldNode: + if len(path) == 1 && node.Name.Val == path[0] { + return node + } + } + } + + return nil +} + +// compareRanges compares two ranges for lexicographic ordering. +func comparePositions(a, b protocol.Position) int { + diff := int(a.Line) - int(b.Line) + if diff == 0 { + return int(a.Character) - int(b.Character) + } + return diff +} + +// makeNestingPath converts a path composed of messages, enums, and services into a path +// composed of their names. +func makeNestingPath(path []ast.Node) []string { + return slicesext.Map(path, func(node ast.Node) string { + switch node := node.(type) { + case *ast.MessageNode: + return node.Name.Val + case *ast.EnumNode: + return node.Name.Val + case *ast.ServiceNode: + return node.Name.Val + default: + return "" + } + }) +} + +func infoToRange(info ast.NodeInfo) protocol.Range { + return protocol.Range{ + // NOTE: protocompile uses 1-indexed lines and columns (as most compilers do) but bizarrely + // the LSP protocol wants 0-indexed lines and columns, which is a little weird. + //319 + // FIXME: the LSP protocol defines positions in terms of UTF-16, so we will need + // to sort that out at some point. + Start: protocol.Position{ + Line: uint32(info.Start().Line) - 1, + Character: uint32(info.Start().Col) - 1, + }, + End: protocol.Position{ + Line: uint32(info.End().Line) - 1, + Character: uint32(info.End().Col) - 1, + }, + } +} diff --git a/private/buf/buflsp/usage.gen.go b/private/buf/buflsp/usage.gen.go new file mode 100644 index 0000000000..ff0e51f83a --- /dev/null +++ b/private/buf/buflsp/usage.gen.go @@ -0,0 +1,19 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Generated. DO NOT EDIT. + +package buflsp + +import _ "github.com/bufbuild/buf/private/usage" diff --git a/private/buf/cmd/buf/buf.go b/private/buf/cmd/buf/buf.go index c7cfd144db..1cfaf1d819 100644 --- a/private/buf/cmd/buf/buf.go +++ b/private/buf/cmd/buf/buf.go @@ -33,6 +33,7 @@ import ( "github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/bufpluginv1" "github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/bufpluginv1beta1" "github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/bufpluginv2" + "github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/lsp" "github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/price" "github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/plugindelete" "github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/pluginpush" @@ -229,6 +230,7 @@ func NewRootCommand(name string) *appcmd.Command { Use: "beta", Short: "Beta commands. Unstable and likely to change", SubCommands: []*appcmd.Command{ + lsp.NewCommand("lsp", builder), price.NewCommand("price", builder), stats.NewCommand("stats", builder), bufpluginv1beta1.NewCommand("buf-plugin-v1beta1", builder), diff --git a/private/buf/cmd/buf/command/beta/lsp/lsp.go b/private/buf/cmd/buf/command/beta/lsp/lsp.go new file mode 100644 index 0000000000..e20dcf229d --- /dev/null +++ b/private/buf/cmd/buf/command/beta/lsp/lsp.go @@ -0,0 +1,121 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package lsp defines the entry-point for the Buf LSP within the CLI. +// +// The actual implementation of the LSP lives under private/buf/buflsp +package lsp + +import ( + "context" + "fmt" + "io" + "net" + + "github.com/bufbuild/buf/private/buf/bufcli" + "github.com/bufbuild/buf/private/buf/buflsp" + "github.com/bufbuild/buf/private/pkg/app/appcmd" + "github.com/bufbuild/buf/private/pkg/app/appext" + "github.com/bufbuild/buf/private/pkg/ioext" + "github.com/spf13/pflag" + "go.lsp.dev/jsonrpc2" +) + +const ( + // pipe is chosen because that's what the vscode LSP client expects. + pipeFlagName = "pipe" +) + +// NewCommand constructs the CLI command for executing the LSP. +func NewCommand(name string, builder appext.Builder) *appcmd.Command { + flags := newFlags() + return &appcmd.Command{ + Use: name, + Short: "Start the language server", + Args: appcmd.NoArgs, + Run: builder.NewRunFunc( + func(ctx context.Context, container appext.Container) error { + return run(ctx, container, flags) + }, + ), + BindFlags: flags.Bind, + } +} + +type flags struct { + // A file path to a UNIX socket to use for IPC. If empty, stdio is used instead. + PipePath string +} + +// Bind sets up the CLI flags that the LSP needs. +func (f *flags) Bind(flagSet *pflag.FlagSet) { + flagSet.StringVar( + &f.PipePath, + pipeFlagName, + "", + "path to a UNIX socket to listen on; uses stdio if not specified", + ) +} + +func newFlags() *flags { + return &flags{} +} + +// run starts the LSP server and listens on the configured. +func run( + ctx context.Context, + container appext.Container, + flags *flags, +) error { + bufcli.WarnBetaCommand(ctx, container) + + transport, err := dial(container, flags) + if err != nil { + return err + } + + controller, err := bufcli.NewController(container) + if err != nil { + return err + } + + conn, err := buflsp.Serve(ctx, container, controller, jsonrpc2.NewStream(transport)) + if err != nil { + return err + } + <-conn.Done() + return conn.Err() +} + +// dial opens a connection to the LSP client. +func dial(container appext.Container, flags *flags) (io.ReadWriteCloser, error) { + switch { + case flags.PipePath != "": + conn, err := net.Dial("unix", flags.PipePath) + if err != nil { + return nil, fmt.Errorf("could not open IPC socket %q: %w", flags.PipePath, err) + } + return conn, nil + + // TODO: Add other transport implementations, such as TCP, here! + + default: + // Fall back to stdio by default. + return ioext.CompositeReadWriteCloser( + container.Stdin(), + container.Stdout(), + ioext.NopCloser, + ), nil + } +} diff --git a/private/buf/cmd/buf/command/beta/lsp/usage.gen.go b/private/buf/cmd/buf/command/beta/lsp/usage.gen.go new file mode 100644 index 0000000000..dddecf0e34 --- /dev/null +++ b/private/buf/cmd/buf/command/beta/lsp/usage.gen.go @@ -0,0 +1,19 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Generated. DO NOT EDIT. + +package lsp + +import _ "github.com/bufbuild/buf/private/usage" diff --git a/private/pkg/refcount/refcount.go b/private/pkg/refcount/refcount.go new file mode 100644 index 0000000000..cdbdf3b63b --- /dev/null +++ b/private/pkg/refcount/refcount.go @@ -0,0 +1,115 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package refcount provides utilities for working with reference-counted objects. +// +// Why would you need refcounting in a language that already has GC? The GC +// can't always tell that all references to some object are gone. For example, +// suppose that we have a map[string]*T for looking up values based on some +// string key, but we want to evict elements of that map if no structure holds +// the key to it anymore. Doing this correctly requires a separately managed +// refcount. For this, you would use [refcount.Map]. +package refcount + +import "sync" + +// Map is a map from keys of type K to values of type *V. +// +// Unlike a built-in map, refcount.Map allows for a key to be inserted multiple +// times concurrently, and deleted multiple times. A key that is inserted n times +// will only be evicted from the map once it is deleted n times. +// +// A zero map is empty and ready to use. Like other Go concurrency primitives, it +// must not be copied after first use. +// +// recount.Map is thread-safe: insertions synchronize-before deletions. +type Map[K comparable, V any] struct { + lock sync.RWMutex + table map[K]*counted[V] +} + +// Insert inserts a key into the map. +// +// If the value is already present in the map, its count is incremented by one; +// otherwise, the zero value is inserted and returned. This function returns whether +// an existing entry was found. +// +// The returned pointer is never nil. +func (m *Map[K, V]) Insert(key K) (value *V, found bool) { + // NOTE: By replacing counted[V].count with an atomic.Int64, this + // can be downgraded to a read lock, with an upgrade only in the case + // we are inserting a new entry. + // + // This optimization is not performed in the name of expediency, I have + // only recorded it as potential future work + m.lock.Lock() + defer m.lock.Unlock() + + if m.table == nil { + m.table = make(map[K]*counted[V]) + } + + v, found := m.table[key] + if !found { + v = &counted[V]{} + m.table[key] = v + } + v.count++ + return &v.value, found +} + +// Get looks up a key in the map. +// +// This is identical to ordinary map lookup: if they key is not present, it does not +// insert and returns nil. +func (m *Map[K, V]) Get(key K) *V { + m.lock.RLock() + defer m.lock.RUnlock() + value := m.table[key] + if value == nil { + return nil + } + return &value.value +} + +// Delete deletes a key from the map. +// +// The key will only be evicted once [Map.Delete] has been called an equal number of times +// to prior calls to [Map.Insert] for this key. +// +// If the key is present and was actually evicted, the element it maps to is returned. Otherwise, +// this function returns nil. +func (m *Map[K, V]) Delete(key K) *V { + m.lock.Lock() + defer m.lock.Unlock() + + v := m.table[key] + if v == nil { + return nil + } + + v.count-- + if v.count > 0 { + return nil + } + + delete(m.table, key) + return &v.value +} + +// counted is a reference-counted value. +type counted[T any] struct { + count int32 // Protected by Map.lock. + value T +} diff --git a/private/pkg/refcount/refcount_test.go b/private/pkg/refcount/refcount_test.go new file mode 100644 index 0000000000..f42d890c92 --- /dev/null +++ b/private/pkg/refcount/refcount_test.go @@ -0,0 +1,42 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package refcount + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMap(t *testing.T) { + t.Parallel() + + table := &Map[string, int]{} + + value, found := table.Insert("foo") + assert.Equal(t, *value, 0) + assert.Equal(t, found, false) + *value = 42 + + value, found = table.Insert("foo") + assert.Equal(t, *value, 42) + assert.Equal(t, found, true) + + assert.Equal(t, *table.Get("foo"), 42) + assert.Nil(t, table.Get("bar")) + + assert.Nil(t, table.Delete("foo")) + assert.Equal(t, *table.Delete("foo"), 42) +} diff --git a/private/pkg/refcount/usage.gen.go b/private/pkg/refcount/usage.gen.go new file mode 100644 index 0000000000..666430e112 --- /dev/null +++ b/private/pkg/refcount/usage.gen.go @@ -0,0 +1,19 @@ +// Copyright 2020-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Generated. DO NOT EDIT. + +package refcount + +import _ "github.com/bufbuild/buf/private/usage" diff --git a/private/pkg/slicesext/slicesext.go b/private/pkg/slicesext/slicesext.go index fc787d91ec..38b12f3ee9 100644 --- a/private/pkg/slicesext/slicesext.go +++ b/private/pkg/slicesext/slicesext.go @@ -491,3 +491,23 @@ func ElementsContained[T comparable](superset []T, subset []T) bool { } return true } + +// TrimPrefix removes a leading prefix from s, otherwise leaves s as-is. +// +// A slice s is considered to have a prefix p if the elements of p are equal +// to the first len(p) elements of s. +// +// Returns false if p was not a prefix of s. +func TrimPrefix[T comparable](s []T, p []T) ([]T, bool) { + if len(s) < len(p) { + return s, false + } + + for i, x := range p { + if s[i] != x { + return s, false + } + } + + return s[len(p):], true +}