diff --git a/cmd/influxd/launcher/cmd.go b/cmd/influxd/launcher/cmd.go index 12b92b8a36e..8c3b225d10f 100644 --- a/cmd/influxd/launcher/cmd.go +++ b/cmd/influxd/launcher/cmd.go @@ -190,8 +190,11 @@ type InfluxdOpts struct { Viper *viper.Viper + // HardeningEnabled toggles multiple best-practice hardening options on. HardeningEnabled bool - StrongPasswords bool + // TemplateFileUrlsDisabled disables file protocol URIs in templates. + TemplateFileUrlsDisabled bool + StrongPasswords bool } // NewOpts constructs options with default values. @@ -243,8 +246,9 @@ func NewOpts(viper *viper.Viper) *InfluxdOpts { Testing: false, TestingAlwaysAllowSetup: false, - HardeningEnabled: false, - StrongPasswords: false, + HardeningEnabled: false, + TemplateFileUrlsDisabled: false, + StrongPasswords: false, } } @@ -643,9 +647,10 @@ func (o *InfluxdOpts) BindCliOpts() []cli.Opt { }, // hardening options - // --hardening-enabled is meant to enable all hardending + // --hardening-enabled is meant to enable all hardening // options in one go. Today it enables the IP validator for - // flux and pkger templates HTTP requests. In the future, + // flux and pkger templates HTTP requests, and disables file:// + // protocol for pkger templates. In the future, // --hardening-enabled might be used to enable other security // features, at which point we can add per-feature flags so // that users can either opt into all features @@ -657,7 +662,16 @@ func (o *InfluxdOpts) BindCliOpts() []cli.Opt { DestP: &o.HardeningEnabled, Flag: "hardening-enabled", Default: o.HardeningEnabled, - Desc: "enable hardening options (disallow private IPs within flux and templates HTTP requests)", + Desc: "enable hardening options (disallow private IPs within flux and templates HTTP requests; disable file URLs in templates)", + }, + + // --template-file-urls-disabled prevents file protocol URIs + // from being used for templates. + { + DestP: &o.TemplateFileUrlsDisabled, + Flag: "template-file-urls-disabled", + Default: o.TemplateFileUrlsDisabled, + Desc: "disable template file URLs", }, { DestP: &o.StrongPasswords, diff --git a/cmd/influxd/launcher/launcher.go b/cmd/influxd/launcher/launcher.go index 78e467ee7b9..215788f5d7b 100644 --- a/cmd/influxd/launcher/launcher.go +++ b/cmd/influxd/launcher/launcher.go @@ -752,8 +752,10 @@ func (m *Launcher) run(ctx context.Context, opts *InfluxdOpts) (err error) { authedOrgSVC := authorizer.NewOrgService(b.OrganizationService) authedUrmSVC := authorizer.NewURMService(b.OrgLookupService, b.UserResourceMappingService) pkgerLogger := m.log.With(zap.String("service", "pkger")) + disableFileUrls := opts.HardeningEnabled || opts.TemplateFileUrlsDisabled pkgSVC = pkger.NewService( pkger.WithHTTPClient(pkger.NewDefaultHTTPClient(urlValidator)), + pkger.WithFileUrlsDisabled(disableFileUrls), pkger.WithLogger(pkgerLogger), pkger.WithStore(pkger.NewStoreKV(m.kvStore)), pkger.WithBucketSVC(authorizer.NewBucketService(b.BucketService)), diff --git a/pkg/fs/special.go b/pkg/fs/special.go new file mode 100644 index 00000000000..e77e308b247 --- /dev/null +++ b/pkg/fs/special.go @@ -0,0 +1,13 @@ +//go:build !linux +// +build !linux + +package fs + +import "io/fs" + +// IsSpecialFSFromFileInfo determines if a file resides on a special file +// system (e.g. /proc, /dev/, /sys) based on its fs.FileInfo. +// The bool return value should be ignored if err is not nil. +func IsSpecialFSFromFileInfo(st fs.FileInfo) (bool, error) { + return false, nil +} diff --git a/pkg/fs/special_linux.go b/pkg/fs/special_linux.go new file mode 100644 index 00000000000..b40c2e1c40a --- /dev/null +++ b/pkg/fs/special_linux.go @@ -0,0 +1,91 @@ +package fs + +import ( + "errors" + "io/fs" + "math" + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +// IsSpecialFSFromFileInfo determines if a file resides on a special file +// system (e.g. /proc, /dev/, /sys) based on its fs.FileInfo. +// The bool return value should be ignored if err is not nil. +func IsSpecialFSFromFileInfo(st fs.FileInfo) (bool, error) { + // On Linux, special file systems like /proc, /dev/, and /sys are + // considered unnamed devices (non-device mounts). These devices + // will always have a major device number of 0 per the kernels + // Documentation/admin-guide/devices.txt file. + + getDevId := func(st fs.FileInfo) (uint64, error) { + st_sys_any := st.Sys() + if st_sys_any == nil { + return 0, errors.New("nil returned by fs.FileInfo.Sys") + } + + st_sys, ok := st_sys_any.(*syscall.Stat_t) + if !ok { + return 0, errors.New("could not convert st.sys() to a *syscall.Stat_t") + } + return st_sys.Dev, nil + } + + devId, err := getDevId(st) + if err != nil { + return false, err + } + if unix.Major(devId) != 0 { + // This file is definitely not on a special file system. + return false, nil + } + + // We know the file is in a special file system, but we'll make an + // exception for tmpfs, which might be used at a variety of mount points. + // Since the minor IDs are assigned dynamically, we'll find the device ID + // for each common tmpfs mount point. If the mount point's device ID matches this st's, + // then it is reasonable to assume the file is in tmpfs. If the device ID + // does not match, then st is not located in that special file system so we + // can't give an exception based on that file system root. This check is still + // valid even if the directory we check against isn't mounted as tmpfs, because + // the device ID won't match so we won't grant a tmpfs exception based on it. + // On Linux, every tmpfs mount has a different device ID, so we need to check + // against all common ones that might be in use. + tmpfsMounts := []string{"/tmp", "/run", "/dev/shm"} + if tmpdir := os.TempDir(); tmpdir != "/tmp" { + tmpfsMounts = append(tmpfsMounts, tmpdir) + } + if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { + tmpfsMounts = append(tmpfsMounts, xdgRuntimeDir) + } + getFileDevId := func(n string) (uint64, error) { + fSt, err := os.Stat(n) + if err != nil { + return math.MaxUint64, err + } + fDevId, err := getDevId(fSt) + if err != nil { + return math.MaxUint64, err + } + return fDevId, nil + } + var errs []error + for _, fn := range tmpfsMounts { + // Don't stop if getFileDevId returns an error. It could + // be because the tmpfsMount we're checking doesn't exist, + // which shouldn't prevent us from checking the other + // potential mount points. + if fnDevId, err := getFileDevId(fn); err == nil { + if fnDevId == devId { + return false, nil + } + } else if !errors.Is(err, os.ErrNotExist) { + // Ignore errors for missing mount points. + errs = append(errs, err) + } + } + + // We didn't find any a reason to give st a special file system exception. + return true, errors.Join(errs...) +} diff --git a/pkger/parser.go b/pkger/parser.go index dce50ab9424..6739c856276 100644 --- a/pkger/parser.go +++ b/pkger/parser.go @@ -23,6 +23,8 @@ import ( fluxurl "github.com/influxdata/flux/dependencies/url" "github.com/influxdata/flux/parser" errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" + caperr "github.com/influxdata/influxdb/v2/pkg/errors" + "github.com/influxdata/influxdb/v2/pkg/fs" "github.com/influxdata/influxdb/v2/pkg/jsonnet" "github.com/influxdata/influxdb/v2/task/options" "gopkg.in/yaml.v3" @@ -102,8 +104,65 @@ func Parse(encoding Encoding, readerFn ReaderFn, opts ...ValidateOptFn) (*Templa return pkg, nil } -// FromFile reads a file from disk and provides a reader from it. -func FromFile(filePath string) ReaderFn { +// limitReadFileMaxSize is the maximum file size that limitReadFile will read. +const limitReadFileMaxSize int64 = 2 * 1024 * 1024 + +// limitReadFile operates like ioutil.ReadFile() in that it reads the contents +// of a file into RAM, but will only read regular files up to the specified +// max. limitReadFile reads the file named by filename and returns the +// contents. A successful call returns err == nil, not err == EOF. Because +// limitReadFile reads the whole file, it does not treat an EOF from Read as an +// error to be reported. +func limitReadFile(name string) (buf []byte, rErr error) { + // use os.Open() to avoid TOCTOU + f, err := os.Open(name) + if err != nil { + return nil, err + } + defer caperr.Capture(&rErr, f.Close)() + + // Check that properties of file are OK. + st, err := f.Stat() + if err != nil { + return nil, err + } + + // Disallow reading from special file systems (e.g. /proc, /sys/, /dev). + if special, err := fs.IsSpecialFSFromFileInfo(st); err != nil { + return nil, fmt.Errorf("%w: %q", err, name) + } else if special { + return nil, fmt.Errorf("file in special file system: %q", name) + } + + // only support reading regular files + if st.Mode()&os.ModeType != 0 { + return nil, fmt.Errorf("not a regular file: %q", name) + } + + // limit how much we read into RAM + var size int + size64 := st.Size() + if limitReadFileMaxSize > 0 && size64 > limitReadFileMaxSize { + return nil, fmt.Errorf("file too big: %q", name) + } else if size64 == 0 { + return nil, fmt.Errorf("file empty: %q", name) + } + size = int(size64) + + // Read file + data := make([]byte, size) + b, err := f.Read(data) + if err != nil { + return nil, err + } else if b != size { + return nil, fmt.Errorf("short read: %q", name) + } + + return data, nil +} + +// fromFile reads a file from disk and provides a reader from it. +func FromFile(filePath string, extraFileChecks bool) ReaderFn { return func() (io.Reader, string, error) { u, err := url.Parse(filePath) if err != nil { @@ -118,9 +177,15 @@ func FromFile(filePath string) ReaderFn { } // not using os.Open to avoid having to deal with closing the file in here - b, err := os.ReadFile(u.Path) - if err != nil { - return nil, filePath, err + var b []byte + var rerr error + if extraFileChecks { + b, rerr = limitReadFile(u.Path) + } else { + b, rerr = os.ReadFile(u.Path) + } + if rerr != nil { + return nil, filePath, rerr } return bytes.NewBuffer(b), u.String(), nil @@ -260,7 +325,7 @@ func parseSource(r io.Reader, opts ...ValidateOptFn) (*Template, error) { b = bb } - contentType := http.DetectContentType(b[:512]) + contentType := http.DetectContentType(b[:min(len(b), 512)]) switch { case strings.Contains(contentType, "jsonnet"): // highly unlikely to fall in here with supported content type detection as is diff --git a/pkger/parser_test.go b/pkger/parser_test.go index 830160e7470..d4f0f55a350 100644 --- a/pkger/parser_test.go +++ b/pkger/parser_test.go @@ -5,7 +5,10 @@ import ( "errors" "fmt" "net/url" + "os" + "path" "path/filepath" + "runtime" "sort" "strconv" "strings" @@ -13,6 +16,7 @@ import ( "testing" "time" + "github.com/google/uuid" "github.com/influxdata/influxdb/v2" errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/notification" @@ -4825,6 +4829,249 @@ func Test_validGeometry(t *testing.T) { } } +func Test_FromFile(t *testing.T) { + dir := t.TempDir() + + // create empty test file + emptyFn := filepath.Join(dir, "empty") + fe, err := os.Create(emptyFn) + require.NoError(t, err) + require.NoError(t, fe.Close()) + + // create too big test file + bigFn := filepath.Join(dir, "big") + fb, err := os.Create(bigFn) + require.NoError(t, err) + require.NoError(t, fb.Close()) + require.NoError(t, os.Truncate(bigFn, limitReadFileMaxSize+1)) + + // create symlink to /dev/null (linux only) + devNullSym := filepath.Join(dir, uuid.NewString()) + if runtime.GOOS == "linux" { + require.NoError(t, os.Symlink("/dev/null", devNullSym)) + } + + type testCase struct { + path string + extra bool + expErr string + oses []string // list of OSes to run test on, empty means all. + } + + // Static test suites + tests := []testCase{ + // valid + { + path: "testdata/bucket_schema.yml", + extra: false, + expErr: "", + }, + { + path: "testdata/bucket_schema.yml", + extra: true, + expErr: "", + }, + // invalid + { + path: "i\nvalid:///foo", + extra: false, + expErr: "invalid filepath provided", + }, + { + path: "testdata/nonexistent (darwin|linux)", + extra: false, + expErr: "no such file or directory", + oses: []string{"darwin", "linux"}, + }, + { + path: "testdata/nonexistent (windows)", + extra: false, + expErr: "The system cannot find the file specified.", + oses: []string{"windows"}, + }, + // invalid with extra + { + path: "/dev/null", + extra: true, + expErr: "file in special file system", + oses: []string{"linux"}, + }, + // symlink to /dev/null, invalid with extra + { + path: devNullSym, + extra: true, + expErr: "file in special file system", + oses: []string{"linux"}, + }, + // invalid with extra + { + path: "/dev/null", + extra: true, + expErr: "not a regular file", + oses: []string{"darwin"}, + }, + { + path: "/", + extra: true, + expErr: "not a regular file", + }, + { + path: "testdata/nonexistent (darwin|linux)", + extra: true, + expErr: "no such file or directory", + oses: []string{"darwin", "linux"}, + }, + { + path: "testdata/nonexistent (windows)", + extra: true, + expErr: "The system cannot find the file specified.", + oses: []string{"windows"}, + }, + { + path: emptyFn, + extra: true, + expErr: "file empty", + }, + { + path: bigFn, + extra: true, + expErr: "file too big", + }, + } + + // Add tmpfs special file system exception tests for Linux. + // We don't consider errors creating the tests cases (e.g. no tmpfs mounts, no write + // permissions to any tmpfs mount) errors for the test itself. It's possible we + // can't run these tests in some locked down environments, but we will warn the + // use we couldn't run the tests. + if runtime.GOOS == "linux" { + tmpfsDirs, err := func() ([]string, error) { + t.Helper() + // Quick and dirty parse of /proc/mounts to find tmpfs mount points on system. + mounts, err := os.ReadFile("/proc/mounts") + if err != nil { + return nil, fmt.Errorf("error reading /proc/mounts: %w", err) + } + mountsLines := strings.Split(string(mounts), "\n") + var tmpfsMounts []string + for _, line := range mountsLines { + if line == "" { + continue + } + cols := strings.Split(line, " ") + if len(cols) != 6 { + return nil, fmt.Errorf("unexpected /proc/mounts line format (%d columns): %q", len(cols), line) + } + if cols[0] == "tmpfs" { + tmpfsMounts = append(tmpfsMounts, cols[1]) + } + } + if len(tmpfsMounts) == 0 { + return nil, errors.New("no tmpfs mount points found") + } + + // Find which common tmpfs directories are actually mounted as tmpfs. + candidateDirs := []string{ + "/dev/shm", + "/run", + "/tmp", + os.Getenv("XDG_RUNTIME_DIR"), + os.TempDir(), + } + var actualDirs []string + for _, dir := range candidateDirs { + if dir == "" { + continue + } + for _, mount := range tmpfsMounts { + if strings.HasPrefix(dir, mount) { + actualDirs = append(actualDirs, dir) + } + } + } + if len(actualDirs) == 0 { + return nil, errors.New("no common tmpfs directories on tmpfs mount points") + } + return actualDirs, nil + }() + if err == nil { + // Create test files in the tmpfs directories and create test cases + var tmpfsTests []testCase + var tmpfsErrs []error + contents, err := os.ReadFile("testdata/bucket_schema.yml") + require.NoError(t, err) + require.NotEmpty(t, contents) + for _, dir := range tmpfsDirs { + testFile, err := os.CreateTemp(dir, "fromfile_*") + if err == nil { + testPath := testFile.Name() + defer os.Remove(testPath) + _, err = testFile.Write(contents) + require.NoError(t, err) + require.NoError(t, testFile.Close()) + tmpfsTests = append(tmpfsTests, testCase{ + path: testPath, + extra: true, + expErr: "", + }) + + // Create a test that's a symlink to the tmpfs file + symPath := path.Join(dir, uuid.NewString()) + require.NoError(t, os.Symlink(testPath, symPath)) + tmpfsTests = append(tmpfsTests, testCase{ + path: symPath, + extra: true, + expErr: "", + }) + } else { + tmpfsErrs = append(tmpfsErrs, fmt.Errorf("error tmpfs test file in %q: %w", dir, err)) + } + } + // Ignore errors creating tmpfs files if we got at least one test case from the bunch + if len(tmpfsTests) > 0 { + tests = append(tests, tmpfsTests...) + } else { + t.Logf("WARNING: could not create files for tmpfs special file system tests: %s", errors.Join(tmpfsErrs...)) + } + } else { + t.Logf("WARNING: unable to run tmpfs special file system tests: %s", err) + } + } + + for _, tt := range tests { + fn := func(t *testing.T) { + if len(tt.oses) > 0 { + osFound := false + for _, os := range tt.oses { + if runtime.GOOS == os { + osFound = true + break + } + } + if !osFound { + t.Skipf("skipping test for %q OS", runtime.GOOS) + } + } + + urlPath := pathToURLPath(tt.path) + readFn := FromFile(urlPath, tt.extra) + assert.NotNil(t, readFn) + + reader, path, err := readFn() + if tt.expErr == "" { + assert.NotNil(t, reader) + assert.Nil(t, err) + assert.Equal(t, fmt.Sprintf("file://%s", urlPath), path) + } else { + assert.Nil(t, reader) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), tt.expErr) + } + } + t.Run(tt.path, fn) + } +} + type testTemplateResourceError struct { name string encoding Encoding @@ -4943,15 +5190,21 @@ func nextField(t *testing.T, field string) (string, int) { return "", -1 } +// pathToURL converts file paths to URLs. This is a simple operation on Unix, +// but is complicated by correct handling of drive letters on Windows. +func pathToURLPath(p string) string { + return filepath.ToSlash(p) +} + func validParsedTemplateFromFile(t *testing.T, path string, encoding Encoding, opts ...ValidateOptFn) *Template { t.Helper() var readFn ReaderFn templateBytes, ok := availableTemplateFiles[path] if ok { - readFn = FromReader(bytes.NewBuffer(templateBytes), "file://"+path) + readFn = FromReader(bytes.NewBuffer(templateBytes), "file://"+pathToURLPath(path)) } else { - readFn = FromFile(path) + readFn = FromFile(path, false) atomic.AddInt64(&missedTemplateCacheCounter, 1) } diff --git a/pkger/service.go b/pkger/service.go index 838132b0390..d44e145a4e4 100644 --- a/pkger/service.go +++ b/pkger/service.go @@ -149,12 +149,13 @@ type SVCMiddleware func(SVC) SVC type serviceOpt struct { logger *zap.Logger - applyReqLimit int - client *http.Client - idGen platform.IDGenerator - nameGen NameGenerator - timeGen influxdb.TimeGenerator - store Store + applyReqLimit int + client *http.Client + fileUrlsDisabled bool + idGen platform.IDGenerator + nameGen NameGenerator + timeGen influxdb.TimeGenerator + store Store bucketSVC influxdb.BucketService checkSVC influxdb.CheckService @@ -186,6 +187,13 @@ func WithLogger(log *zap.Logger) ServiceSetterFn { } } +// WithFileUrlsDisable sets if file URLs are disabled for the service. +func WithFileUrlsDisabled(v bool) ServiceSetterFn { + return func(o *serviceOpt) { + o.fileUrlsDisabled = v + } +} + // WithIDGenerator sets the id generator for the service. func WithIDGenerator(idGen platform.IDGenerator) ServiceSetterFn { return func(opt *serviceOpt) { @@ -305,12 +313,13 @@ type Service struct { log *zap.Logger // internal dependencies - applyReqLimit int - client *http.Client - idGen platform.IDGenerator - nameGen NameGenerator - store Store - timeGen influxdb.TimeGenerator + applyReqLimit int + client *http.Client + fileUrlsDisabled bool + idGen platform.IDGenerator + nameGen NameGenerator + store Store + timeGen influxdb.TimeGenerator // external service dependencies bucketSVC influxdb.BucketService @@ -344,12 +353,13 @@ func NewService(opts ...ServiceSetterFn) *Service { return &Service{ log: opt.logger, - applyReqLimit: opt.applyReqLimit, - client: opt.client, - idGen: opt.idGen, - nameGen: opt.nameGen, - store: opt.store, - timeGen: opt.timeGen, + applyReqLimit: opt.applyReqLimit, + client: opt.client, + fileUrlsDisabled: opt.fileUrlsDisabled, + idGen: opt.idGen, + nameGen: opt.nameGen, + store: opt.store, + timeGen: opt.timeGen, bucketSVC: opt.bucketSVC, checkSVC: opt.checkSVC, @@ -3101,11 +3111,35 @@ func (s *Service) getStackRemoteTemplates(ctx context.Context, stackID platform. readerFn := FromHTTPRequest(u.String(), s.client) if u.Scheme == "file" { - readerFn = FromFile(u.Path) + s.log.Info("file:// specified in call to /api/v2/templates/apply with stack", + zap.String("file", u.Path), + ) + + if s.fileUrlsDisabled { + return nil, &errors2.Error{ + Code: errors2.EInvalid, + Msg: "invalid URL scheme", + Err: errors.New("invalid URL scheme"), + } + } + + readerFn = FromFile(u.Path, true) } template, err := Parse(encoding, readerFn) if err != nil { + if u.Scheme == "file" { + // Prevent leaking information about local files to client. + // Log real error for debugging purposes. + s.log.Error("error parsing file:// specified in call to /api/v2/templates/apply with stack", + zap.Stringer("orgID", stack.OrgID), + zap.String("file", u.Path), + zap.String("err", err.Error()), + ) + + // Send the client a generic error. + return nil, fmt.Errorf("file:// URL failed to parse: %s", u.Path) + } return nil, err } remotes = append(remotes, template) diff --git a/pkger/service_test.go b/pkger/service_test.go index 5c8b94468ca..e3f6633f3e9 100644 --- a/pkger/service_test.go +++ b/pkger/service_test.go @@ -5,11 +5,16 @@ import ( "context" "errors" "fmt" + "io" "math/rand" "net/url" + "os" + "path/filepath" "regexp" + "runtime" "sort" "strconv" + "strings" "testing" "time" @@ -24,7 +29,10 @@ import ( "github.com/influxdata/influxdb/v2/task/taskmodel" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest" + "go.uber.org/zap/zaptest/observer" ) func TestService(t *testing.T) { @@ -60,6 +68,7 @@ func TestService(t *testing.T) { } applyOpts := []ServiceSetterFn{ + WithFileUrlsDisabled(opt.fileUrlsDisabled), WithStore(opt.store), WithBucketSVC(opt.bucketSVC), WithCheckSVC(opt.checkSVC), @@ -82,6 +91,9 @@ func TestService(t *testing.T) { if opt.nameGen != nil { applyOpts = append(applyOpts, withNameGen(opt.nameGen)) } + if opt.logger != nil { + applyOpts = append(applyOpts, WithLogger(opt.logger)) + } return NewService(applyOpts...) } @@ -770,6 +782,365 @@ func TestService(t *testing.T) { }) }) + t.Run("DryRun - stack", func(t *testing.T) { + dir := t.TempDir() + + // create a valid yaml file + ySrc, err := os.Open("testdata/bucket.yml") + require.NoError(t, err) + validYamlFn := filepath.Join(dir, "bucket.yml") + yDst, err := os.Create(validYamlFn) + require.NoError(t, err) + _, err = io.Copy(yDst, ySrc) + require.NoError(t, err) + require.NoError(t, ySrc.Close()) + require.NoError(t, yDst.Close()) + + // create a valid yaml file + jSrc, err := os.Open("testdata/bucket.json") + require.NoError(t, err) + validJsonFn := filepath.Join(dir, "bucket.json") + jDst, err := os.Create(validJsonFn) + require.NoError(t, err) + _, err = io.Copy(jDst, jSrc) + require.NoError(t, err) + require.NoError(t, jSrc.Close()) + require.NoError(t, jDst.Close()) + + // create an invalid file + iSrc := strings.NewReader("this is invalid") + invalidFn := filepath.Join(dir, "invalid") + iDst, err := os.Create(invalidFn) + require.NoError(t, err) + _, err = io.Copy(iDst, iSrc) + require.NoError(t, err) + require.NoError(t, iDst.Close()) + + // create too big test file + bigFn := filepath.Join(dir, "big") + fb, err := os.Create(bigFn) + require.NoError(t, err) + require.NoError(t, fb.Close()) + err = os.Truncate(bigFn, limitReadFileMaxSize+1) + require.NoError(t, err) + + // create a symlink to /proc/cpuinfo + procLinkFn := filepath.Join(dir, "cpuinfo") + require.NoError(t, os.Symlink("/proc/cpuinfo", procLinkFn)) + + // create tricky symlink to /proc/cpuinfo + trickyProcLinkFn := filepath.Join(dir, "tricky_cpuinfo") + require.NoError(t, os.Symlink("/../proc/cpuinfo", trickyProcLinkFn)) + + now := time.Time{}.Add(10 * 24 * time.Hour) + testOrgID := platform.ID(33) + testStackID := platform.ID(3) + + t.Run("file URL", func(t *testing.T) { + type logm struct { + level zapcore.Level + msg string + err string + } + tests := []struct { + name string + oses []string // list of OSes to run test on, empty means all. + path string + fileUrlsDisabled bool + expErr string + expLog []logm + }{ + { + name: "valid yaml", + path: validYamlFn, + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + }, + }, + { + name: "valid json", + path: validJsonFn, + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + }, + }, + // invalid + { + name: "invalid yaml", + path: invalidFn, + expErr: "file:// URL failed to parse: ", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "line 1: cannot unmarshal"}, + }, + }, + // fileUrlsDisabled always shows error + { + name: "invalid yaml with disable flag", + path: validYamlFn, + fileUrlsDisabled: true, + expErr: "invalid URL scheme", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + }, + }, + { + name: "nonexistent with disable flag", + path: "/nonexistent", + fileUrlsDisabled: true, + expErr: "invalid URL scheme", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + }, + }, + { + name: "big file with disable flag", + path: bigFn, + fileUrlsDisabled: true, + expErr: "invalid URL scheme", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + }, + }, + // invalid 'extra' with generic errors + { + name: "invalid yaml wildcard", + path: invalidFn + "?.yml", + expErr: "file:// URL failed to parse", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "line 1: cannot unmarshal"}, + }, + }, + { + name: "directory (/tmp)", + path: "/tmp", + expErr: "file:// URL failed to parse", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "not a regular file"}, + }, + }, + { + // /proc/cpuinfo is a regular file with 0 length + name: "/proc/cpuinfo", + path: "/proc/cpuinfo", + oses: []string{"linux"}, + expErr: "file:// URL failed to parse", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "file in special file system"}, + }, + }, + { + // try fooling the parser with a tricky path + name: "trick /proc path", + path: "/../proc/cpuinfo", + oses: []string{"linux"}, + expErr: "file:// URL failed to parse", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "file in special file system"}, + }, + }, + { + // try fooling the parser with a symlink to /proc + name: "symlink /proc path", + path: procLinkFn, + oses: []string{"linux"}, + expErr: "file:// URL failed to parse", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "file in special file system"}, + }, + }, + { + // try fooling the parser with a symlink to /../proc + name: "tricky symlink /proc path", + path: trickyProcLinkFn, + oses: []string{"linux"}, + expErr: "file:// URL failed to parse", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "file in special file system"}, + }, + }, + { + name: "/dev/core", + path: "/dev/core", + oses: []string{"linux"}, + expErr: "file:// URL failed to parse", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "permission denied"}, + }, + }, + { + name: "/dev/zero", + path: "/dev/zero", + oses: []string{"linux"}, + expErr: "file:// URL failed to parse", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "file in special file system"}, + }, + }, + { + name: "/dev/zero", + path: "/dev/zero", + oses: []string{"darwin"}, + expErr: "file:// URL failed to parse", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "not a regular file"}, + }, + }, + { + name: "/sys/kernel/vmcoreinfo", + path: "/sys/kernel/vmcoreinfo", + oses: []string{"linux"}, + expErr: "file:// URL failed to parse", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "file in special file system"}, + }, + }, + { + name: "nonexistent (darwin|linux)", + path: "/nonexistent", + oses: []string{"darwin", "linux"}, + expErr: "file:// URL failed to parse", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "open /nonexistent: no such file or directory"}, + }, + }, + { + name: "nonexistent (windows)", + path: "/nonexistent", + oses: []string{"windows"}, + expErr: "file:// URL failed to parse", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "open /nonexistent: The system cannot find the file specified."}, + }, + }, + { + name: "big file", + path: bigFn, + expErr: "file:// URL failed to parse", + expLog: []logm{ + {level: zapcore.InfoLevel, msg: "file:// specified in call to /api/v2/templates/apply with stack"}, + {level: zapcore.ErrorLevel, msg: "error parsing file:// specified in call to /api/v2/templates/apply with stack", err: "file too big"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if len(tt.oses) > 0 { + osFound := false + for _, os := range tt.oses { + if runtime.GOOS == os { + osFound = true + break + } + } + if !osFound { + t.Skipf("skipping test for %q OS", runtime.GOOS) + } + } + testStore := &fakeStore{ + createFn: func(ctx context.Context, stack Stack) error { + return nil + }, + readFn: func(ctx context.Context, id platform.ID) (Stack, error) { + return Stack{ + ID: id, + OrgID: testOrgID, + Events: []StackEvent{ + { + Name: "some-file", + Description: "some file with file://", + TemplateURLs: []string{fmt.Sprintf("file://%s", pathToURLPath(tt.path))}, + }, + }, + }, nil + }, + } + + var logger *zap.Logger = nil + var core zapcore.Core + var sink *observer.ObservedLogs + ctx := context.Background() + core, sink = observer.New(zap.InfoLevel) + logger = zap.New(core) + + svc := newTestService( + WithIDGenerator(newFakeIDGen(testStackID)), + WithTimeGenerator(newTimeGen(now)), + WithLogger(logger), + WithFileUrlsDisabled(tt.fileUrlsDisabled), + WithStore(testStore), + ) + + sc := StackCreate{ + OrgID: testOrgID, + TemplateURLs: []string{fmt.Sprintf("file://%s", pathToURLPath(tt.path))}, + } + stack, err := svc.InitStack(ctx, 9000, sc) + require.NoError(t, err) + + assert.Equal(t, testStackID, stack.ID) + assert.Equal(t, testOrgID, stack.OrgID) + assert.Equal(t, now, stack.CreatedAt) + assert.Equal(t, now, stack.LatestEvent().UpdatedAt) + + applyOpts := []ApplyOptFn{ + ApplyWithStackID(testStackID), + } + _, err = svc.DryRun( + ctx, + platform.ID(100), + 0, + applyOpts..., + ) + + entries := sink.TakeAll() // resets to 0 + require.Equal(t, len(tt.expLog), len(entries)) + for idx, exp := range tt.expLog { + actual := entries[idx] + require.Equal(t, exp.msg, actual.Entry.Message) + require.Equal(t, exp.level, actual.Entry.Level) + + // Check for correct err in log context + var errFound bool + for _, lctx := range actual.Context { + if lctx.Key == "err" { + errFound = true + if len(exp.err) > 0 { + require.Contains(t, lctx.String, exp.err) + } else { + require.Fail(t, "unexpected err in log context: %s", lctx.String) + } + } + } + // Make sure we found an err if we expected one + if len(exp.err) > 0 { + require.True(t, errFound, "err not found in log context when expected: %s", exp.err) + } + } + + if tt.expErr == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, tt.expErr) + } + }) + } + }) + }) + t.Run("Apply", func(t *testing.T) { t.Run("buckets", func(t *testing.T) { t.Run("successfully creates template of buckets", func(t *testing.T) {