diff --git a/.gitignore b/.gitignore index f1c181ec9c..c0f3915ecc 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,11 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out + +# Ignore builds +cmd/revad/revad +cmd/reva/reva + + +# For Mac OS +.DS_Store diff --git a/README.md b/README.md index eb00ac2deb..99526ce8e8 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,5 @@ -# reva -Cloud Storage Sync and Share Interoperability Platform +# REVA + +Cloud Storage Sync & Share Interoperability Platform + + diff --git a/cmd/reva/app-provider-get-iframe.go b/cmd/reva/app-provider-get-iframe.go new file mode 100644 index 0000000000..a15d9cf2d9 --- /dev/null +++ b/cmd/reva/app-provider-get-iframe.go @@ -0,0 +1,49 @@ +package main + +import ( + "context" + "fmt" + "os" + + appproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/appprovider/v0alpha" + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" +) + +func appProviderGetIFrameCommand() *command { + cmd := newCommand("app-provider-get-iframe") + cmd.Description = func() string { + return "find iframe UI provider for filename" + } + cmd.Action = func() error { + if cmd.NArg() < 3 { + fmt.Println(cmd.Usage()) + os.Exit(1) + } + + appProvider := cmd.Args()[0] + fn := cmd.Args()[1] + token := cmd.Args()[2] + req := &appproviderv0alphapb.GetIFrameRequest{ + Filename: fn, + AccessToken: token, + } + + client, err := getAppProviderClient(appProvider) + if err != nil { + return err + } + ctx := context.Background() + res, err := client.GetIFrame(ctx, req) + if err != nil { + return err + } + + if res.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res.Status) + } + + fmt.Printf("Load in your browser the following iframe to edit the resource: %s", res.IframeLocation) + return nil + } + return cmd +} diff --git a/cmd/reva/app-registry-find.go b/cmd/reva/app-registry-find.go new file mode 100644 index 0000000000..1d1dd8a099 --- /dev/null +++ b/cmd/reva/app-registry-find.go @@ -0,0 +1,51 @@ +package main + +import ( + "context" + "fmt" + "mime" + "os" + "path" + + appregistryv0alphapb "github.com/cernbox/go-cs3apis/cs3/appregistry/v0alpha" + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" +) + +func appRegistryFindCommand() *command { + cmd := newCommand("app-registry-find") + cmd.Description = func() string { + return "find applicaton provider for file extension or mimetype" + } + cmd.Action = func() error { + if cmd.NArg() == 0 { + fmt.Println(cmd.Usage()) + os.Exit(1) + } + + fn := cmd.Args()[0] + ext := path.Ext(fn) + mime := mime.TypeByExtension(ext) + req := &appregistryv0alphapb.FindRequest{ + FilenameExtension: ext, + FilenameMimetype: mime, + } + + client, err := getAppRegistryClient() + if err != nil { + return err + } + ctx := context.Background() + res, err := client.Find(ctx, req) + if err != nil { + return err + } + + if res.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res.Status) + } + + fmt.Printf("application provider can be found at %s\n", res.AppProviderInfo.Location) + return nil + } + return cmd +} diff --git a/cmd/reva/broker-find.go b/cmd/reva/broker-find.go new file mode 100644 index 0000000000..31fc8a267e --- /dev/null +++ b/cmd/reva/broker-find.go @@ -0,0 +1,43 @@ +package main + +import ( + "context" + "fmt" + + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + storagebrokerv0alphapb "github.com/cernbox/go-cs3apis/cs3/storagebroker/v0alpha" +) + +func brokerFindCommand() *command { + cmd := newCommand("broker-find") + cmd.Description = func() string { + return "find storage provider for path" + } + cmd.Action = func() error { + fn := "/" + if cmd.NArg() >= 1 { + fn = cmd.Args()[0] + } + + req := &storagebrokerv0alphapb.FindRequest{ + Filename: fn, + } + client, err := getStorageBrokerClient() + if err != nil { + return err + } + ctx := context.Background() + res, err := client.Find(ctx, req) + if err != nil { + return err + } + + if res.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res.Status) + } + + fmt.Printf("resource can be found at %s\n", res.ProviderInfo.Location) + return nil + } + return cmd +} diff --git a/cmd/reva/command.go b/cmd/reva/command.go new file mode 100644 index 0000000000..9577d0cd92 --- /dev/null +++ b/cmd/reva/command.go @@ -0,0 +1,35 @@ +package main + +import ( + "flag" + "fmt" +) + +// command is the representation to create commands +type command struct { + *flag.FlagSet + Name string + Action func() error + Usage func() string + Description func() string +} + +// newCommand creates a new command +func newCommand(name string) *command { + fs := flag.NewFlagSet(name, flag.ExitOnError) + cmd := &command{ + Name: name, + Usage: func() string { + return fmt.Sprintf("Usage: %s", name) + }, + Action: func() error { + fmt.Println("Hello REVA") + return nil + }, + Description: func() string { + return "TODO description" + }, + FlagSet: fs, + } + return cmd +} diff --git a/cmd/reva/common.go b/cmd/reva/common.go new file mode 100644 index 0000000000..465acc7f42 --- /dev/null +++ b/cmd/reva/common.go @@ -0,0 +1,83 @@ +package main + +import ( + "bufio" + "encoding/json" + "io/ioutil" + gouser "os/user" + "path" + "strings" + + "golang.org/x/crypto/ssh/terminal" +) + +func getConfigFile() string { + user, err := gouser.Current() + if err != nil { + panic(err) + } + + return path.Join(user.HomeDir, ".reva.config") +} + +func getTokenFile() string { + user, err := gouser.Current() + if err != nil { + panic(err) + } + + return path.Join(user.HomeDir, ".reva-token") +} + +func writeToken(token string) { + ioutil.WriteFile(getTokenFile(), []byte(token), 0600) +} + +func readToken() (string, error) { + data, err := ioutil.ReadFile(getTokenFile()) + if err != nil { + return "", err + } + return string(data), nil +} + +func readConfig() (*config, error) { + data, err := ioutil.ReadFile(getConfigFile()) + if err != nil { + return nil, err + } + + c := &config{} + if err := json.Unmarshal(data, c); err != nil { + return nil, err + } + + return c, nil +} + +func writeConfig(c *config) error { + data, err := json.Marshal(c) + if err != nil { + return err + } + return ioutil.WriteFile(getConfigFile(), data, 0600) +} + +type config struct { + Host string `json:"host"` +} + +func read(r *bufio.Reader) (string, error) { + text, err := r.ReadString('\n') + if err != nil { + return "", err + } + return strings.TrimSpace(text), nil +} +func readPassword(fd int) (string, error) { + bytePassword, err := terminal.ReadPassword(fd) + if err != nil { + return "", err + } + return strings.TrimSpace(string(bytePassword)), nil +} diff --git a/cmd/reva/configure.go b/cmd/reva/configure.go new file mode 100644 index 0000000000..2a4a4eaac2 --- /dev/null +++ b/cmd/reva/configure.go @@ -0,0 +1,26 @@ +package main + +import ( + "bufio" + "fmt" + "os" +) + +var configureCommand = func() *command { + cmd := newCommand("configure") + cmd.Description = func() string { return "configure the reva client" } + cmd.Action = func() error { + reader := bufio.NewReader(os.Stdin) + fmt.Print("host: ") + text, err := read(reader) + if err != nil { + return err + } + + c := &config{Host: text} + writeConfig(c) + fmt.Println("config saved in ", getConfigFile()) + return nil + } + return cmd +} diff --git a/cmd/reva/download.go b/cmd/reva/download.go new file mode 100644 index 0000000000..d1e51787cd --- /dev/null +++ b/cmd/reva/download.go @@ -0,0 +1,91 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + storageproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/storageprovider/v0alpha" + "github.com/cheggaaa/pb" +) + +func downloadCommand() *command { + cmd := newCommand("download") + cmd.Description = func() string { return "download a remote file into the local filesystem" } + cmd.Action = func() error { + fn := "/" + if cmd.NArg() < 3 { + fmt.Println(cmd.Usage()) + os.Exit(1) + } + + provider := cmd.Args()[0] + fn = cmd.Args()[1] + target := cmd.Args()[2] + + client, err := getStorageProviderClient(provider) + if err != nil { + return err + } + + req1 := &storageproviderv0alphapb.StatRequest{Filename: fn} + ctx := context.Background() + res1, err := client.Stat(ctx, req1) + if err != nil { + return err + } + if res1.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res1.Status) + } + + md := res1.Metadata + + fd, err := os.OpenFile(target, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + + req2 := &storageproviderv0alphapb.ReadRequest{Filename: fn} + ctx = context.Background() + stream, err := client.Read(ctx, req2) + if err != nil { + return err + } + + bar := pb.New(int(md.Size)).SetUnits(pb.U_BYTES) + bar.Start() + var reader io.Reader + for { + res, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if res.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res.Status) + } + dc := res.DataChunk + + if dc != nil { + if dc.Length > 0 { + reader = bytes.NewReader(dc.Data) + reader = bar.NewProxyReader(reader) + + _, err := io.CopyN(fd, reader, int64(dc.Length)) + if err != nil { + return err + } + } + } + } + bar.Finish() + return nil + + } + return cmd +} diff --git a/cmd/reva/grpc.go b/cmd/reva/grpc.go new file mode 100644 index 0000000000..a2e3e4006c --- /dev/null +++ b/cmd/reva/grpc.go @@ -0,0 +1,73 @@ +package main + +import ( + "fmt" + + "github.com/pkg/errors" + + appproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/appprovider/v0alpha" + appregistryv0alphapb "github.com/cernbox/go-cs3apis/cs3/appregistry/v0alpha" + authv0alphapb "github.com/cernbox/go-cs3apis/cs3/auth/v0alpha" + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + storagebrokerv0alphapb "github.com/cernbox/go-cs3apis/cs3/storagebroker/v0alpha" + storageproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/storageprovider/v0alpha" + + "google.golang.org/grpc" +) + +func getAppProviderClient(host string) (appproviderv0alphapb.AppProviderServiceClient, error) { + conn, err := getConnToHost(host) + if err != nil { + return nil, err + } + return appproviderv0alphapb.NewAppProviderServiceClient(conn), nil +} +func getStorageBrokerClient() (storagebrokerv0alphapb.StorageBrokerServiceClient, error) { + conn, err := getConn() + if err != nil { + return nil, err + } + return storagebrokerv0alphapb.NewStorageBrokerServiceClient(conn), nil +} + +func getAppRegistryClient() (appregistryv0alphapb.AppRegistryServiceClient, error) { + conn, err := getConn() + if err != nil { + return nil, err + } + return appregistryv0alphapb.NewAppRegistryServiceClient(conn), nil +} + +func getStorageProviderClient(host string) (storageproviderv0alphapb.StorageProviderServiceClient, error) { + conn, err := getConnToHost(host) + if err != nil { + return nil, err + } + return storageproviderv0alphapb.NewStorageProviderServiceClient(conn), nil +} + +func getAuthClient() (authv0alphapb.AuthServiceClient, error) { + conn, err := getConn() + if err != nil { + return nil, err + } + return authv0alphapb.NewAuthServiceClient(conn), nil +} + +func getConn() (*grpc.ClientConn, error) { + return grpc.Dial(conf.Host, grpc.WithInsecure()) +} + +func getConnToHost(host string) (*grpc.ClientConn, error) { + return grpc.Dial(host, grpc.WithInsecure()) +} + +func formatError(status *rpcpb.Status) error { + switch status.Code { + case rpcpb.Code_CODE_NOT_FOUND: + return errors.New("error: not found") + + default: + return errors.New(fmt.Sprintf("apierror: code=%v msg=%s", status.Code, status.Message)) + } +} diff --git a/cmd/reva/login.go b/cmd/reva/login.go new file mode 100644 index 0000000000..f34f35ce4a --- /dev/null +++ b/cmd/reva/login.go @@ -0,0 +1,64 @@ +package main + +import ( + "bufio" + "context" + "fmt" + "os" + + authv0alphapb "github.com/cernbox/go-cs3apis/cs3/auth/v0alpha" + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" +) + +var loginCommand = func() *command { + cmd := newCommand("login") + cmd.Description = func() string { return "login into the reva server" } + cmd.Action = func() error { + var username, password string + if cmd.NArg() >= 2 { + username = cmd.Args()[0] + password = cmd.Args()[1] + } else { + reader := bufio.NewReader(os.Stdin) + fmt.Print("username: ") + usernameInput, err := read(reader) + if err != nil { + return err + } + + fmt.Print("password: ") + passwordInput, err := readPassword(0) + if err != nil { + return err + } + + username = usernameInput + password = passwordInput + } + + client, err := getAuthClient() + if err != nil { + return err + } + + req := &authv0alphapb.GenerateAccessTokenRequest{ + Username: username, + Password: password, + } + + ctx := context.Background() + res, err := client.GenerateAccessToken(ctx, req) + if err != nil { + return err + } + + if res.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res.Status) + } + + writeToken(res.AccessToken) + fmt.Println("OK") + return nil + } + return cmd +} diff --git a/cmd/reva/ls.go b/cmd/reva/ls.go new file mode 100644 index 0000000000..3ece66462f --- /dev/null +++ b/cmd/reva/ls.go @@ -0,0 +1,65 @@ +package main + +import ( + "context" + "fmt" + "io" + "os" + + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + storageproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/storageprovider/v0alpha" +) + +func lsCommand() *command { + cmd := newCommand("ls") + cmd.Description = func() string { return "list a folder contents" } + longFlag := cmd.Bool("l", false, "long listing") + cmd.Action = func() error { + if cmd.NArg() < 2 { + fmt.Println(cmd.Usage()) + os.Exit(1) + } + + provider := cmd.Args()[0] + fn := cmd.Args()[1] + client, err := getStorageProviderClient(provider) + if err != nil { + return err + } + + req := &storageproviderv0alphapb.ListRequest{ + Filename: fn, + } + + ctx := context.Background() + stream, err := client.List(ctx, req) + if err != nil { + return err + } + + mds := []*storageproviderv0alphapb.Metadata{} + for { + res, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if res.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res.Status) + } + mds = append(mds, res.Metadata) + } + + for _, md := range mds { + if *longFlag { + fmt.Printf("%+v %d %d %s\n", md.Permissions, md.Mtime, md.Size, md.Filename) + } else { + fmt.Println(md.Filename) + } + } + return nil + } + return cmd +} diff --git a/cmd/reva/main.go b/cmd/reva/main.go new file mode 100644 index 0000000000..917e1394d3 --- /dev/null +++ b/cmd/reva/main.go @@ -0,0 +1,87 @@ +package main + +import ( + "fmt" + "os" + "strings" +) + +var ( + conf *config +) + +func main() { + + cmds := []*command{ + configureCommand(), + loginCommand(), + whoamiCommand(), + lsCommand(), + statCommand(), + uploadCommand(), + downloadCommand(), + rmCommand(), + moveCommand(), + mkdirCommand(), + brokerFindCommand(), + appRegistryFindCommand(), + appProviderGetIFrameCommand(), + } + + mainUsage := createMainUsage(cmds) + + // Verify that a subcommand has been provided + // os.Arg[0] is the main command + // os.Arg[1] will be the subcommand + if len(os.Args) < 2 { + fmt.Println(mainUsage) + os.Exit(1) + } + + // Verify a configuration file exists. + // If if does not, create one + c, err := readConfig() + if err != nil && os.Args[1] != "configure" { + fmt.Println("reva is not initialized, run \"reva configure\"") + os.Exit(1) + } else { + if os.Args[1] != "configure" { + conf = c + } + } + + // Run command + action := os.Args[1] + for _, v := range cmds { + if v.Name == action { + v.Parse(os.Args[2:]) + err := v.Action() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + os.Exit(0) + } + } + + fmt.Println(mainUsage) + os.Exit(1) +} + +func createMainUsage(cmds []*command) string { + n := 0 + for _, cmd := range cmds { + l := len(cmd.Name) + if l > n { + n = l + } + } + + usage := "Command line interface to REVA\n\n" + for _, cmd := range cmds { + usage += fmt.Sprintf("%s%s%s\n", cmd.Name, strings.Repeat(" ", 4+(n-len(cmd.Name))), cmd.Description()) + } + usage += "\nAuthors: hugo.gonzalez.labrador@cern.ch" + usage += "\nCopyright: CERN IT Storage Group" + return usage +} diff --git a/cmd/reva/mkdir.go b/cmd/reva/mkdir.go new file mode 100644 index 0000000000..1aaba35089 --- /dev/null +++ b/cmd/reva/mkdir.go @@ -0,0 +1,43 @@ +package main + +import ( + "context" + "fmt" + "os" + + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + storageproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/storageprovider/v0alpha" +) + +func mkdirCommand() *command { + cmd := newCommand("mkdir") + cmd.Description = func() string { return "creates a folder" } + cmd.Action = func() error { + if cmd.NArg() < 2 { + fmt.Println(cmd.Usage()) + os.Exit(1) + } + + fn := cmd.Args()[0] + provider := cmd.Args()[1] + + ctx := context.Background() + client, err := getStorageProviderClient(provider) + if err != nil { + return err + } + + req := &storageproviderv0alphapb.CreateDirectoryRequest{Filename: fn} + res, err := client.CreateDirectory(ctx, req) + if err != nil { + return err + } + + if res.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res.Status) + } + + return nil + } + return cmd +} diff --git a/cmd/reva/mv.go b/cmd/reva/mv.go new file mode 100644 index 0000000000..cc5e34caf9 --- /dev/null +++ b/cmd/reva/mv.go @@ -0,0 +1,44 @@ +package main + +import ( + "context" + "fmt" + "os" + + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + storageproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/storageprovider/v0alpha" +) + +func moveCommand() *command { + cmd := newCommand("mv") + cmd.Description = func() string { return "moves/rename a file/folder" } + cmd.Action = func() error { + if cmd.NArg() < 3 { + fmt.Println(cmd.Usage()) + os.Exit(1) + } + + provider := cmd.Args()[0] + src := cmd.Args()[1] + dst := cmd.Args()[2] + + ctx := context.Background() + client, err := getStorageProviderClient(provider) + if err != nil { + return err + } + + req := &storageproviderv0alphapb.MoveRequest{SourceFilename: src, TargetFilename: dst} + res, err := client.Move(ctx, req) + if err != nil { + return err + } + + if res.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res.Status) + } + + return nil + } + return cmd +} diff --git a/cmd/reva/rm.go b/cmd/reva/rm.go new file mode 100644 index 0000000000..5b02238c54 --- /dev/null +++ b/cmd/reva/rm.go @@ -0,0 +1,42 @@ +package main + +import ( + "context" + "fmt" + "os" + + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + storageproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/storageprovider/v0alpha" +) + +func rmCommand() *command { + cmd := newCommand("rm") + cmd.Description = func() string { return "removes a file or folder" } + cmd.Action = func() error { + if cmd.NArg() < 2 { + fmt.Println(cmd.Usage()) + os.Exit(1) + } + + provider := cmd.Args()[0] + fn := cmd.Args()[1] + ctx := context.Background() + client, err := getStorageProviderClient(provider) + if err != nil { + return err + } + + req := &storageproviderv0alphapb.DeleteRequest{Filename: fn} + res, err := client.Delete(ctx, req) + if err != nil { + return err + } + + if res.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res.Status) + } + + return nil + } + return cmd +} diff --git a/cmd/reva/stat.go b/cmd/reva/stat.go new file mode 100644 index 0000000000..1981add916 --- /dev/null +++ b/cmd/reva/stat.go @@ -0,0 +1,43 @@ +package main + +import ( + "context" + "fmt" + "os" + + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + storageproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/storageprovider/v0alpha" +) + +func statCommand() *command { + cmd := newCommand("stat") + cmd.Description = func() string { return "get the metadata for a file or folder" } + cmd.Action = func() error { + if cmd.NArg() < 2 { + fmt.Println(cmd.Usage()) + os.Exit(1) + } + + provider := cmd.Args()[0] + fn := cmd.Args()[1] + ctx := context.Background() + client, err := getStorageProviderClient(provider) + if err != nil { + return err + } + + req := &storageproviderv0alphapb.StatRequest{Filename: fn} + res, err := client.Stat(ctx, req) + if err != nil { + return err + } + + if res.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res.Status) + } + + fmt.Println(res.Metadata) + return nil + } + return cmd +} diff --git a/cmd/reva/upload.go b/cmd/reva/upload.go new file mode 100644 index 0000000000..354a1f6268 --- /dev/null +++ b/cmd/reva/upload.go @@ -0,0 +1,130 @@ +package main + +import ( + "context" + "crypto/md5" + "fmt" + "io" + "os" + + "github.com/cheggaaa/pb" + + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + storageproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/storageprovider/v0alpha" +) + +func uploadCommand() *command { + cmd := newCommand("upload") + cmd.Description = func() string { return "upload a local file to the remote server" } + cmd.Action = func() error { + if cmd.NArg() < 3 { + fmt.Println(cmd.Usage()) + os.Exit(1) + } + + provider := cmd.Args()[0] + fn := cmd.Args()[1] + target := cmd.Args()[2] + + fd, err := os.Open(fn) + if err != nil { + return err + } + md, err := fd.Stat() + if err != nil { + return err + } + defer fd.Close() + + client, err := getStorageProviderClient(provider) + if err != nil { + return err + } + + req1 := &storageproviderv0alphapb.StartWriteSessionRequest{} + ctx := context.Background() + res1, err := client.StartWriteSession(ctx, req1) + if err != nil { + return err + } + + if res1.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res1.Status) + } + + sessID := res1.SessionId + fmt.Println("Write session ID: ", sessID) + + ctx = context.Background() + stream, err := client.Write(ctx) + if err != nil { + return err + } + + bar := pb.New(int(md.Size())).SetUnits(pb.U_BYTES) + xs := md5.New() + nchunks, offset := 0, 0 + // TODO(labkode): change buffer size in configuration + bufferSize := 1024 * 1024 * 3 + buffer := make([]byte, bufferSize) + writer := io.MultiWriter(xs, bar) + bar.Start() + for { + n, err := fd.Read(buffer) + if n > 0 { + writer.Write(buffer[:n]) + req := &storageproviderv0alphapb.WriteRequest{ + Data: buffer[:n], + Length: uint64(n), + Offset: uint64(offset), + SessionId: sessID, + } + if err := stream.Send(req); err != nil { + return err + } + nchunks++ + offset += n + } + if err == io.EOF { + break + } + if err != nil { + return err + } + } + + bar.Finish() + res2, err := stream.CloseAndRecv() + if err != nil { + return err + } + + if res2.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res2.Status) + } + + //wb := res2.WrittenBytes + + //fmt.Println("Written bytes: ", wb, " NumChunks: ", nchunks, " MD5: ", fmt.Sprintf("%x", xs.Sum(nil))) + + fmt.Println("Closing write session ...") + req3 := &storageproviderv0alphapb.FinishWriteSessionRequest{ + Filename: target, + SessionId: sessID, + Checksum: fmt.Sprintf("md5:%x", xs.Sum(nil)), + } + ctx = context.Background() + res3, err := client.FinishWriteSession(ctx, req3) + if err != nil { + return err + } + + if res3.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res3.Status) + } + + fmt.Println("Upload succeed") + return nil + } + return cmd +} diff --git a/cmd/reva/whoami.go b/cmd/reva/whoami.go new file mode 100644 index 0000000000..975125e7e4 --- /dev/null +++ b/cmd/reva/whoami.go @@ -0,0 +1,58 @@ +package main + +import ( + "context" + "fmt" + "os" + + authv0alphapb "github.com/cernbox/go-cs3apis/cs3/auth/v0alpha" + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" +) + +func whoamiCommand() *command { + cmd := newCommand("whoami") + cmd.Description = func() string { return "tells who you are" } + tokenFlag := cmd.String("token", "", "access token to use") + + cmd.Action = func() error { + if cmd.NArg() != 0 { + cmd.PrintDefaults() + os.Exit(1) + } + var token string + if *tokenFlag != "" { + token = *tokenFlag + } else { + // read token from file + t, err := readToken() + if err != nil { + fmt.Println("the token file cannot be readed from file ", getTokenFile()) + fmt.Println("make sure you have login before with \"reva login\"") + return err + } + token = t + } + + client, err := getAuthClient() + if err != nil { + return err + } + + req := &authv0alphapb.WhoAmIRequest{AccessToken: token} + + ctx := context.Background() + res, err := client.WhoAmI(ctx, req) + if err != nil { + return err + } + + if res.Status.Code != rpcpb.Code_CODE_OK { + return formatError(res.Status) + } + + me := res.User + fmt.Printf("username: %s\ndisplay_name: %s\nmail: %s\ngroups: %v\n", me.Username, me.DisplayName, me.Mail, me.Groups) + return nil + } + return cmd +} diff --git a/cmd/revad/config/config.go b/cmd/revad/config/config.go new file mode 100644 index 0000000000..9e21c12196 --- /dev/null +++ b/cmd/revad/config/config.go @@ -0,0 +1,27 @@ +package config + +import ( + "github.com/spf13/viper" +) + +var v *viper.Viper + +func init() { + v = viper.New() +} + +func SetFile(fn string) { + v.SetConfigFile(fn) +} + +func Read() error { + return v.ReadInConfig() +} + +func Get(key string) map[string]interface{} { + return v.GetStringMap(key) +} + +func Dump() map[string]interface{} { + return v.AllSettings() +} diff --git a/cmd/revad/grace/grace.go b/cmd/revad/grace/grace.go new file mode 100644 index 0000000000..8088e6f9c2 --- /dev/null +++ b/cmd/revad/grace/grace.go @@ -0,0 +1,287 @@ +package grace + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "os" + "os/signal" + "path/filepath" + "strconv" + "syscall" + "time" + + "github.com/cernbox/reva/pkg/err" + "github.com/cernbox/reva/pkg/log" +) + +var ( + ctx = context.Background() + logger = log.New("grace") + errors = err.New("grace") + graceful = os.Getenv("GRACEFUL") == "true" + parentPID = os.Getppid() + listeners = []net.Listener{} + srvrs = []Server{} + pidFile string + childrenPID = []int{} +) + +func Exit(errc int) { + err := removePIDFile() + if err != nil { + logger.Error(ctx, err) + } else { + logger.Println(ctx, "pidfile got removed") + } + + os.Exit(errc) +} + +func getPIDFromFile(fn string) (int, error) { + piddata, err := ioutil.ReadFile(fn) + if err != nil { + return 0, err + } + // Convert the file contents to an integer. + pid, err := strconv.Atoi(string(piddata)) + if err != nil { + return 0, err + } + return pid, nil +} + +// Write a pid file, but first make sure it doesn't exist with a running pid. +func WritePIDFile(fn string) error { + // Read in the pid file as a slice of bytes. + if piddata, err := ioutil.ReadFile(fn); err == nil { + // Convert the file contents to an integer. + if pid, err := strconv.Atoi(string(piddata)); err == nil { + // Look for the pid in the process list. + if process, err := os.FindProcess(pid); err == nil { + // Send the process a signal zero kill. + if err := process.Signal(syscall.Signal(0)); err == nil { + if !graceful { + // We only get an error if the pid isn't running, or it's not ours. + return fmt.Errorf("pid already running: %d", pid) + } + + if pid != parentPID { // overwrite only if parent pid is pidfile + // We only get an error if the pid isn't running, or it's not ours. + return fmt.Errorf("pid %d is not this process parent", pid) + } + } else { + logger.Error(ctx, err) + } + } else { + logger.Error(ctx, err) + } + } else { + logger.Error(ctx, err) + } + } else { + logger.Error(ctx, err) + } + + // If we get here, then the pidfile didn't exist or we are are in graceful reload and thus we overwrite + // or the pid in it doesn't belong to the user running this app. + err := ioutil.WriteFile(fn, []byte(fmt.Sprintf("%d", os.Getpid())), 0664) + if err != nil { + return err + } + logger.Printf(ctx, "pid file written to %s", fn) + pidFile = fn + return nil +} + +func newListener(network, addr string) (net.Listener, error) { + return net.Listen(network, addr) +} + +// return grpc listener first and http listener second. +func GetListeners(servers []Server) ([]net.Listener, error) { + srvrs = servers + lns := []net.Listener{} + if graceful { + logger.Println(ctx, "graceful restart, inheriting parent ln fds for grpc and http") + count := 3 + for _, s := range servers { + network, addr := s.Network(), s.Address() + fd := os.NewFile(uintptr(count), "") // 3 because ExtraFile passed to new process + count++ + ln, err := net.FileListener(fd) + if err != nil { + logger.Error(ctx, err) + // create new fd + ln, err := newListener(network, addr) + if err != nil { + return nil, err + } + lns = append(lns, ln) + } else { + lns = append(lns, ln) + } + + } + // kill parent + logger.Printf(ctx, "killing parent pid gracefully with SIGQUIT: %d", parentPID) + syscall.Kill(parentPID, syscall.SIGQUIT) + listeners = lns + return lns, nil + } else { + // create two listeners for grpc and http + for _, s := range servers { + network, addr := s.Network(), s.Address() + ln, err := newListener(network, addr) + if err != nil { + return nil, err + } + lns = append(lns, ln) + + } + listeners = lns + return lns, nil + } +} + +type Server interface { + Stop() error + GracefulStop() error + Network() string + Address() string +} + +func removePIDFile() error { + // only remove PID file if the PID written is us + filePID, err := getPIDFromFile(pidFile) + if err != nil { + return err + } + + if filePID != os.Getpid() { + return fmt.Errorf("pid in pidfile is different from running pid") + } + + return os.Remove(pidFile) +} + +func TrapSignals() { + signalCh := make(chan os.Signal, 1024) + signal.Notify(signalCh, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT) + for { + select { + case s := <-signalCh: + logger.Printf(ctx, "%v signal received", s) + switch s { + case syscall.SIGHUP: + logger.Println(ctx, "preparing for a hot-reload, forking child process...") + // Fork a child process. + listeners := getListeners() + p, err := forkChild(listeners...) + if err != nil { + logger.Println(ctx, "unable to fork child process: ", err) + } else { + logger.Printf(ctx, "child forked with new pid %d", p.Pid) + childrenPID = append(childrenPID, p.Pid) + } + + case syscall.SIGQUIT: + logger.Println(ctx, "preparing for a graceful shutdown with deadline of 10 seconds") + go func() { + count := 10 + for range time.Tick(time.Second) { + logger.Printf(ctx, "shuting down in %d seconds", count-1) + count-- + if count <= 0 { + logger.Println(ctx, "deadline reached before draining active conns, hard stoping ...") + for _, s := range srvrs { + s.Stop() + logger.Printf(ctx, "fd to %s:%s abruptly closed", s.Network(), s.Address()) + } + Exit(1) + } + } + }() + for _, s := range srvrs { + logger.Printf(ctx, "fd to %s:%s gracefully closed ", s.Network(), s.Address()) + s.GracefulStop() + } + logger.Println(ctx, "exit with error code 0") + Exit(0) + case syscall.SIGINT, syscall.SIGTERM: + logger.Println(ctx, "preparing for hard shutdown, aborting all conns") + for _, s := range srvrs { + logger.Printf(ctx, "fd to %s:%s abruptly closed", s.Network(), s.Address()) + err := s.Stop() + if err != nil { + err = errors.Wrap(err, "error stopping server") + logger.Error(ctx, err) + } + } + Exit(0) + } + } + } +} + +func getListenerFile(ln net.Listener) (*os.File, error) { + switch t := ln.(type) { + case *net.TCPListener: + return t.File() + case *net.UnixListener: + return t.File() + } + return nil, fmt.Errorf("unsupported listener: %T", ln) +} + +func forkChild(lns ...net.Listener) (*os.Process, error) { + // Get the file descriptor for the listener and marshal the metadata to pass + // to the child in the environment. + fds := []*os.File{} + for _, ln := range lns { + fd, err := getListenerFile(ln) + if err != nil { + return nil, err + } + fds = append(fds, fd) + } + + // Pass stdin, stdout, and stderr along with the listener file to the child + files := []*os.File{ + os.Stdin, + os.Stdout, + os.Stderr, + } + files = append(files, fds...) + + // Get current environment and add in the listener to it. + environment := append(os.Environ(), "GRACEFUL=true") + + // Get current process name and directory. + execName, err := os.Executable() + if err != nil { + return nil, err + } + execDir := filepath.Dir(execName) + + // Spawn child process. + p, err := os.StartProcess(execName, os.Args, &os.ProcAttr{ + Dir: execDir, + Env: environment, + Files: files, + Sys: &syscall.SysProcAttr{}, + }) + + // TODO(labkode): if the process dies (because config changed and is wrong + // we need to return an error + if err != nil { + return nil, err + } + + return p, nil +} + +func getListeners() []net.Listener { + return listeners +} diff --git a/cmd/revad/grpcsvr/grpcsvr.go b/cmd/revad/grpcsvr/grpcsvr.go new file mode 100644 index 0000000000..b25ce8f039 --- /dev/null +++ b/cmd/revad/grpcsvr/grpcsvr.go @@ -0,0 +1,178 @@ +package grpcsvr + +import ( + "context" + "fmt" + "net" + + storagebrokerv0alphapb "github.com/cernbox/go-cs3apis/cs3/storagebroker/v0alpha" + + appproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/appprovider/v0alpha" + + appregistryv0alphapb "github.com/cernbox/go-cs3apis/cs3/appregistry/v0alpha" + authv0alphapb "github.com/cernbox/go-cs3apis/cs3/auth/v0alpha" + storageproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/storageprovider/v0alpha" + + "github.com/cernbox/reva/pkg/err" + "github.com/cernbox/reva/pkg/log" + "github.com/cernbox/reva/services/grpcsvc/appprovidersvc" + "github.com/cernbox/reva/services/grpcsvc/appregistrysvc" + + "github.com/cernbox/reva/services/grpcsvc/authsvc" + "github.com/cernbox/reva/services/grpcsvc/interceptors" + "github.com/cernbox/reva/services/grpcsvc/storagebrokersvc" + "github.com/cernbox/reva/services/grpcsvc/storageprovidersvc" + + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + + "github.com/mitchellh/mapstructure" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + ctx = context.Background() + logger = log.New("grpcsvr") + errors = err.New("grpcsvr") +) + +type config struct { + Network string `mapstructure:"network"` + Address string `mapstructure:"address"` + ShutdownDeadline int `mapstructure:"shutdown_deadline"` + EnabledServices []string `mapstructure:"enabled_services"` + StorageProviderSvc map[string]interface{} `mapstructure:"storage_provider_svc"` + AuthSvc map[string]interface{} `mapstructure:"auth_svc"` + StorageBrokerSvc map[string]interface{} `mapstructure:"storage_broker_svc"` + AppRegistrySvc map[string]interface{} `mapstructure:"app_registry_svc"` + AppProviderSvc map[string]interface{} `mapstructure:"app_provider_svc"` +} + +type Server struct { + s *grpc.Server + conf *config + listener net.Listener +} + +func New(m map[string]interface{}) (*Server, error) { + conf := &config{} + if err := mapstructure.Decode(m, conf); err != nil { + return nil, err + } + + opts := getOpts() + s := grpc.NewServer(opts...) + + return &Server{s: s, conf: conf}, nil +} + +func (s *Server) Start(ln net.Listener) error { + if err := s.registerServices(); err != nil { + err = errors.Wrap(err, "unable to register service") + return err + } + + s.listener = ln + + err := s.s.Serve(s.listener) + if err != nil { + err = errors.Wrap(err, "serve failed") + return err + } else { + return nil + } +} + +func (s *Server) Stop() error { + s.s.Stop() + return nil +} + +func (s *Server) GracefulStop() error { + s.s.GracefulStop() + return nil +} + +func (s *Server) Network() string { + return s.conf.Network +} + +func (s *Server) Address() string { + return s.conf.Address +} + +func (s *Server) registerServices() error { + enabled := []string{} + for _, k := range s.conf.EnabledServices { + switch k { + case "storage_provider_svc": + svc, err := storageprovidersvc.New(s.conf.StorageProviderSvc) + if err != nil { + return errors.Wrap(err, "unable to register service "+k) + } + storageproviderv0alphapb.RegisterStorageProviderServiceServer(s.s, svc) + enabled = append(enabled, k) + case "auth_svc": + svc, err := authsvc.New(s.conf.AuthSvc) + if err != nil { + return errors.Wrap(err, "unable to register service "+k) + } + authv0alphapb.RegisterAuthServiceServer(s.s, svc) + enabled = append(enabled, k) + + case "storage_broker_svc": + svc, err := storagebrokersvc.New(s.conf.StorageBrokerSvc) + if err != nil { + return errors.Wrap(err, "unable to register service "+k) + } + storagebrokerv0alphapb.RegisterStorageBrokerServiceServer(s.s, svc) + enabled = append(enabled, k) + case "app_registry_svc": + svc, err := appregistrysvc.New(s.conf.AppRegistrySvc) + if err != nil { + return errors.Wrap(err, "unable to register service "+k) + } + appregistryv0alphapb.RegisterAppRegistryServiceServer(s.s, svc) + enabled = append(enabled, k) + case "app_provider_svc": + svc, err := appprovidersvc.New(s.conf.AppProviderSvc) + if err != nil { + return errors.Wrap(err, "unable to register service "+k) + } + appproviderv0alphapb.RegisterAppProviderServiceServer(s.s, svc) + enabled = append(enabled, k) + } + } + if len(enabled) == 0 { + logger.Println(ctx, "no services enabled") + } else { + for k := range enabled { + logger.Printf(ctx, "grpc service enabled: %s", enabled[k]) + } + } + return nil +} + +func getOpts() []grpc.ServerOption { + opts := []grpc.ServerOption{ + grpc.UnaryInterceptor( + grpc_middleware.ChainUnaryServer( + grpc_recovery.UnaryServerInterceptor(grpc_recovery.WithRecoveryHandlerContext(recoveryFunc)), + interceptors.TraceUnaryServerInterceptor(), + interceptors.LogUnaryServerInterceptor(), + grpc_prometheus.UnaryServerInterceptor)), + grpc.StreamInterceptor( + grpc_middleware.ChainStreamServer( + grpc_recovery.StreamServerInterceptor(grpc_recovery.WithRecoveryHandlerContext(recoveryFunc)), + interceptors.TraceStreamServerInterceptor(), + grpc_prometheus.StreamServerInterceptor)), + } + return opts +} + +func recoveryFunc(ctx context.Context, p interface{}) (err error) { + logger.Panic(ctx, fmt.Sprintf("%+v", p)) + return grpc.Errorf(codes.Internal, "%s", p) +} diff --git a/cmd/revad/httpsvr/httpsvr.go b/cmd/revad/httpsvr/httpsvr.go new file mode 100644 index 0000000000..f31f2f3b89 --- /dev/null +++ b/cmd/revad/httpsvr/httpsvr.go @@ -0,0 +1,144 @@ +package httpsvr + +import ( + "context" + "net" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/cernbox/reva/services/httpsvc" + + "github.com/cernbox/reva/pkg/err" + "github.com/cernbox/reva/pkg/log" + "github.com/cernbox/reva/services/httpsvc/handlers" + "github.com/cernbox/reva/services/httpsvc/iframeuisvc" + "github.com/cernbox/reva/services/httpsvc/ocdavsvc" + "github.com/cernbox/reva/services/httpsvc/prometheussvc" + "github.com/cernbox/reva/services/httpsvc/webuisvc" + + "github.com/mitchellh/mapstructure" +) + +var ( + ctx = context.Background() + logger = log.New("httpsvr") + errors = err.New("httpsvr") +) + +type config struct { + Network string `mapstructure:"network"` + Address string `mapstructure:"address"` + EnabledServices []string `mapstructure:"enabled_services"` + WebUISvc map[string]interface{} `mapstructure:"webui_svc"` + OCDAVSvc map[string]interface{} `mapstructure:"ocdav_svc"` + PromSvc map[string]interface{} `mapstructure:"prometheus_svc"` + IFrameUISvc map[string]interface{} `mapstructure:"iframe_ui_svc"` +} + +// Server contains the server info. +type Server struct { + httpServer *http.Server + conf *config + listener net.Listener + svcs map[string]http.Handler +} + +// New returns a new server +func New(m map[string]interface{}) (*Server, error) { + conf := &config{} + if err := mapstructure.Decode(m, conf); err != nil { + return nil, err + } + + httpServer := &http.Server{} + return &Server{httpServer: httpServer, conf: conf}, nil +} + +// Start starts the server +func (s *Server) Start(ln net.Listener) error { + if err := s.registerServices(); err != nil { + return err + } + + s.httpServer.Handler = s.getHandler() + s.listener = ln + err := s.httpServer.Serve(s.listener) + if err == nil || err == http.ErrServerClosed { + return nil + } + return err +} + +func (s *Server) Stop() error { + // TODO(labkode): set ctx deadline to zero + ctx, _ = context.WithTimeout(ctx, time.Second) + return s.httpServer.Shutdown(ctx) +} + +func (s *Server) Network() string { + return s.conf.Network +} + +func (s *Server) Address() string { + return s.conf.Address +} + +func (s *Server) GracefulStop() error { + return s.httpServer.Shutdown(ctx) +} + +func (s *Server) registerServices() error { + svcs := map[string]http.Handler{} + var svc httpsvc.Service + var err error + for _, k := range s.conf.EnabledServices { + switch k { + case "webui_svc": + svc, err = webuisvc.New(s.conf.WebUISvc) + case "ocdav_svc": + svc, err = ocdavsvc.New(s.conf.OCDAVSvc) + case "prometheus_svc": + svc, err = prometheussvc.New(s.conf.PromSvc) + case "iframe_ui_svc": + svc, err = iframeuisvc.New(s.conf.IFrameUISvc) + } + + if err != nil { + return errors.Wrap(err, "unable to register service "+k) + } + svcs[svc.Prefix()] = svc.Handler() + } + + if len(svcs) == 0 { + logger.Println(ctx, "no services enabled") + } else { + for k := range s.conf.EnabledServices { + logger.Printf(ctx, "http service enabled: %s", s.conf.EnabledServices[k]) + } + } + + // instrument services with prometheus + for prefix, h := range svcs { + + svcs[prefix] = prometheus.InstrumentHandler(prefix, h) + } + s.svcs = svcs + return nil +} + +func (s *Server) getHandler() http.Handler { + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var head string + head, r.URL.Path = httpsvc.ShiftPath(r.URL.Path) + //logger.Println(r.Context(), "http routing: head=", head, " tail=", r.URL.Path) + + if h, ok := s.svcs[head]; ok { + h.ServeHTTP(w, r) + return + } + w.WriteHeader(http.StatusNotFound) + }) + return handlers.TraceHandler(handlers.LogHandler(logger, h)) +} diff --git a/cmd/revad/main.go b/cmd/revad/main.go new file mode 100644 index 0000000000..2ad5433c13 --- /dev/null +++ b/cmd/revad/main.go @@ -0,0 +1,210 @@ +package main + +import ( + "context" + "flag" + "fmt" + "io" + "os" + "runtime" + "strconv" + "strings" + + "github.com/cernbox/reva/pkg/err" + "github.com/cernbox/reva/pkg/log" + + "github.com/cernbox/reva/cmd/revad/config" + "github.com/cernbox/reva/cmd/revad/grace" + "github.com/cernbox/reva/cmd/revad/grpcsvr" + "github.com/cernbox/reva/cmd/revad/httpsvr" + + "github.com/mitchellh/mapstructure" +) + +var ( + errors = err.New("main") + logger = log.New("main") + ctx = context.Background() + conf *coreConfig + + versionFlag = flag.Bool("v", false, "show version and exit") + testFlag = flag.Bool("t", false, "test configuration and exit") + signalFlag = flag.String("s", "", "send signal to a master process: stop, quit, reopen, reload") + fileFlag = flag.String("c", "/etc/revad/revad.toml", "set configuration file") + pidFlag = flag.String("p", "/var/run/revad.pid", "pid file") + + // provided at compile time + GitCommit, GitBranch, GitState, GitSummary, BuildDate, Version string +) + +func init() { + checkFlags() + writePIDFile() + readConfig() + log.Out = getLogOutput(conf.LogFile) + log.Mode = conf.LogMode + if err := log.EnableAll(); err != nil { + fmt.Fprintln(os.Stderr, err) + grace.Exit(1) + } +} + +func main() { + tweakCPU() + printLoggedPkgs() + + grpcSvr := getGRPCServer() + httpSvr := getHTTPServer() + servers := []grace.Server{grpcSvr, httpSvr} + listeners, err := grace.GetListeners(servers) + if err != nil { + logger.Error(ctx, err) + grace.Exit(1) + } + + go func() { + if err := grpcSvr.Start(listeners[0]); err != nil { + err = errors.Wrap(err, "error starting grpc server") + logger.Error(ctx, err) + grace.Exit(1) + } + }() + + go func() { + if err := httpSvr.Start(listeners[1]); err != nil { + err = errors.Wrap(err, "error starting http server") + logger.Error(ctx, err) + grace.Exit(1) + } + }() + + grace.TrapSignals() +} + +func getGRPCServer() *grpcsvr.Server { + s, err := grpcsvr.New(config.Get("grpc")) + if err != nil { + logger.Error(ctx, err) + grace.Exit(1) + } + return s +} + +func getHTTPServer() *httpsvr.Server { + s, err := httpsvr.New(config.Get("http")) + if err != nil { + logger.Error(ctx, err) + grace.Exit(1) + } + return s +} + +func checkFlags() { + flag.Parse() + + if *versionFlag { + msg := "Version: %s\n" + msg += "GitCommit: %s\n" + msg += "GitBranch: %s\n" + msg += "GitSummary: %s\n" + msg += "BuildDate: %s\n" + fmt.Printf(msg, Version, GitCommit, GitBranch, GitSummary, BuildDate) + grace.Exit(1) + } + + if *fileFlag != "" { + config.SetFile(*fileFlag) + } + + if *testFlag { + err := config.Read() + if err != nil { + fmt.Println("unable to read configuration file: ", *fileFlag, err) + grace.Exit(1) + } + grace.Exit(0) + } + + if *signalFlag != "" { + fmt.Println("signaling master process") + grace.Exit(1) + } +} + +func readConfig() { + err := config.Read() + if err != nil { + fmt.Println("unable to read configuration file:", *fileFlag, err) + grace.Exit(1) + } + + // get core config + + conf = &coreConfig{} + if err := mapstructure.Decode(config.Get("core"), conf); err != nil { + fmt.Fprintln(os.Stderr, "unable to parse core config:", err) + grace.Exit(1) + } +} + +// tweakCPU parses string cpu and sets GOMAXPROCS +// according to its value. It accepts either +// a number (e.g. 3) or a percent (e.g. 50%). +func tweakCPU() error { + cpu := conf.MaxCPUs + var numCPU int + + availCPU := runtime.NumCPU() + + if strings.HasSuffix(cpu, "%") { + // Percent + var percent float32 + pctStr := cpu[:len(cpu)-1] + pctInt, err := strconv.Atoi(pctStr) + if err != nil || pctInt < 1 || pctInt > 100 { + return errors.New("invalid CPU value: percentage must be between 1-100") + } + percent = float32(pctInt) / 100 + numCPU = int(float32(availCPU) * percent) + } else { + // Number + num, err := strconv.Atoi(cpu) + if err != nil || num < 1 { + return errors.New("invalid CPU value: provide a number or percent greater than 0") + } + numCPU = num + } + + if numCPU > availCPU { + numCPU = availCPU + } + + logger.Printf(ctx, "running on %d cpus", numCPU) + runtime.GOMAXPROCS(numCPU) + return nil +} + +func writePIDFile() { + err := grace.WritePIDFile(*pidFlag) + if err != nil { + fmt.Fprintln(os.Stderr, err) + grace.Exit(1) + } +} + +type coreConfig struct { + MaxCPUs string `mapstructure:"max_cpus"` + LogFile string `mapstructure:"log_file"` + LogMode string `mapstructure:"log_mode"` +} + +func getLogOutput(val string) io.Writer { + return os.Stderr +} + +func printLoggedPkgs() { + pkgs := log.ListEnabledPackages() + for k := range pkgs { + logger.Printf(ctx, "logging enabled for package: %s", pkgs[k]) + } +} diff --git a/cmd/revad/revad.service b/cmd/revad/revad.service new file mode 100644 index 0000000000..df0dbdaedb --- /dev/null +++ b/cmd/revad/revad.service @@ -0,0 +1,16 @@ +[Unit] +Description=revad +After=syslog.target + +[Service] +Type=simple +User=root +Group=root +ExecStart=/usr/local/bin/revad +StandardOutput=syslog +StandardError=syslog +LimitNOFILE=49152 + +[Install] +WantedBy=multi-user.target + diff --git a/cmd/revad/revad.toml b/cmd/revad/revad.toml new file mode 100644 index 0000000000..e915782537 --- /dev/null +++ b/cmd/revad/revad.toml @@ -0,0 +1,74 @@ +[core] +log_file = "stderr" +log_mode = "dev" +max_cpus = "32" + +[grpc] +network = "tcp" +address = "0.0.0.0:9999" +access_log = "stderr" +tls_enabled = true +tls_cert = "/etc/gridsecurity/host.cert" +tls_key = "/etc/gridsecurity/host.key" +enabled_services = ["storage_provider_svc", "auth_svc", "storage_broker_svc", "app_registry_svc", "app_provider_svc"] + +[http] +enabled_services = ["prometheus_svc", "webui_svc", "ocdav_svc", "iframe_ui_svc"] +network = "tcp" +address = "0.0.0.0:9998" + +[grpc.storage_provider_svc] +driver = "local" +mount_path = "/" +mount_id = "123e4567-e89b-12d3-a456-426655440000" + +[grpc.auth_svc.auth_manager] +driver = "demo" + +[grpc.auth_svc.token_manager] +driver = "jwt" + +[grpc.auth_svc.user_manager] +driver = "demo" + +[grpc.storage_provider_svc.eos] +mgm = "root://nowhere.org" +root_uid = 0 +root_gid = 0 + +[grpc.storage_provider_svc.local] +root = "/var/tmp/owncloud/data" + +[grpc.storage_broker_svc] +driver = "static" + +[grpc.storage_broker_svc.static.rules] +"/" = "localhost:9999" +"123e4567-e89b-12d3-a456-426655440000" = "localhost:9999" + +[grpc.app_registry_svc] +driver = "static" + +[grpc.app_registry_svc.static.rules] +".txt" = "localhost:9999" +"txt/plain" = "localhost:9999" + +[grpc.app_provider_svc] +driver = "demo" + +[grpc.app_provider_svc.demo] +iframe_ui_provider = "http://localhost:9998/iframeuisvc" + +[http.prometheus_svc] +prefix = "metrics" + +[http.webui_svc] +prefix = "ui" + +[http.ocdav_svc] +prefix = "owncloud" +chunk_folder = "/var/tmp/owncloud/chunks" +storageprovidersvc = "localhost:9999" + +[http.iframe_ui_svc] +prefix = "iframeuisvc" diff --git a/pkg/app/app.go b/pkg/app/app.go new file mode 100644 index 0000000000..39667df29c --- /dev/null +++ b/pkg/app/app.go @@ -0,0 +1,21 @@ +package app + +import "context" + +// Registry is the interface that application registries implement +// for discovering application providers +type Registry interface { + FindProvider(ctx context.Context, ext, mimetype string) (*ProviderInfo, error) +} + +// ProviderInfo contains the information +// about a Application Provider +type ProviderInfo struct { + Location string +} + +// Provider is the interface that application providers implement +// for providing the iframe location to a iframe UI Provider +type Provider interface { + GetIFrame(ctx context.Context, fn, mimetype, token string) (string, error) +} diff --git a/pkg/app/provider/demo/demo.go b/pkg/app/provider/demo/demo.go new file mode 100644 index 0000000000..615230822e --- /dev/null +++ b/pkg/app/provider/demo/demo.go @@ -0,0 +1,49 @@ +package demo + +import ( + "context" + "fmt" + + "github.com/cernbox/reva/pkg/app" + "github.com/cernbox/reva/pkg/log" + + "github.com/mitchellh/mapstructure" +) + +var logger = log.New("demo") + +type provider struct { + iframeUIProvider string +} + +func (p *provider) GetIFrame(ctx context.Context, filename, mimetype, token string) (string, error) { + msg := fmt.Sprintf("", p.iframeUIProvider, filename, token) + return msg, nil +} + +type config struct { + IFrameUIProvider string `mapstructure:"iframe_ui_provider"` +} + +func parseConfig(m map[string]interface{}) (*config, error) { + c := &config{} + if err := mapstructure.Decode(m, c); err != nil { + return nil, err + } + return c, nil +} + +// New returns an implementation to of the storage.FS interface that talk to +// a local filesystem. +func New(m map[string]interface{}) (app.Provider, error) { + c, err := parseConfig(m) + if err != nil { + return nil, err + } + return &provider{iframeUIProvider: c.IFrameUIProvider}, nil +} + +type notFoundError string + +func (e notFoundError) Error() string { return string(e) } +func (e notFoundError) IsNotFound() {} diff --git a/pkg/app/registry/static/static.go b/pkg/app/registry/static/static.go new file mode 100644 index 0000000000..5b2acdfb64 --- /dev/null +++ b/pkg/app/registry/static/static.go @@ -0,0 +1,72 @@ +package static + +import ( + "context" + "strings" + + "github.com/cernbox/reva/pkg/app" + "github.com/cernbox/reva/pkg/log" + + "github.com/mitchellh/mapstructure" +) + +var logger = log.New("static") + +type registry struct { + rules map[string]string +} + +func (b *registry) FindProvider(ctx context.Context, ext, mimetype string) (*app.ProviderInfo, error) { + // find longest match + var match string + for prefix := range b.rules { + if strings.HasPrefix(ext, prefix) && len(prefix) > len(match) { + match = prefix + } + } + + if match == "" { + // try with mimetype + for prefix := range b.rules { + if strings.HasPrefix(mimetype, prefix) && len(prefix) > len(match) { + match = prefix + } + } + } + + if match == "" { + return nil, notFoundError("application provider not found for extension " + ext + " and mimetype " + mimetype) + } + + p := &app.ProviderInfo{ + Location: b.rules[match], + } + return p, nil +} + +type config struct { + Rules map[string]string +} + +func parseConfig(m map[string]interface{}) (*config, error) { + c := &config{} + if err := mapstructure.Decode(m, c); err != nil { + return nil, err + } + return c, nil +} + +// New returns an implementation to of the storage.FS interface that talk to +// a local filesystem. +func New(m map[string]interface{}) (app.Registry, error) { + c, err := parseConfig(m) + if err != nil { + return nil, err + } + return ®istry{rules: c.Rules}, nil +} + +type notFoundError string + +func (e notFoundError) Error() string { return string(e) } +func (e notFoundError) IsNotFound() {} diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go new file mode 100644 index 0000000000..4ffdcbf436 --- /dev/null +++ b/pkg/auth/auth.go @@ -0,0 +1,10 @@ +package auth + +import ( + "context" +) + +// Manager is the interface to implement to authenticate users +type Manager interface { + Authenticate(ctx context.Context, clientID, clientSecret string) error +} diff --git a/pkg/auth/manager/demo/demo.go b/pkg/auth/manager/demo/demo.go new file mode 100644 index 0000000000..c49b1befc1 --- /dev/null +++ b/pkg/auth/manager/demo/demo.go @@ -0,0 +1,38 @@ +package demo + +import ( + "context" + + "github.com/cernbox/reva/pkg/auth" +) + +type manager struct { + credentials map[string]string +} + +func New(m map[string]interface{}) (auth.Manager, error) { + // m not used + creds := getCredentials() + return &manager{credentials: creds}, nil +} + +func (m *manager) Authenticate(ctx context.Context, clientID, clientSecret string) error { + if secret, ok := m.credentials[clientID]; ok { + if secret == clientSecret { + return nil + } + } + return invalidCredentialsError(clientID) +} + +func getCredentials() map[string]string { + return map[string]string{ + "einstein": "relativity", + "marie": "radioactivity", + "richard": "superfluidity", + } +} + +type invalidCredentialsError string + +func (e invalidCredentialsError) Error() string { return string(e) } diff --git a/pkg/auth/manager/impersonator/impersonator.go b/pkg/auth/manager/impersonator/impersonator.go new file mode 100644 index 0000000000..9e2811f32e --- /dev/null +++ b/pkg/auth/manager/impersonator/impersonator.go @@ -0,0 +1,18 @@ +package impersonator + +import ( + "context" + + "github.com/cernbox/reva/pkg/auth" +) + +type mgr struct{} + +// New returns an auth manager implementation that allows to authenticate with any credentials. +func New() auth.Manager { + return &mgr{} +} + +func (m *mgr) Authenticate(ctx context.Context, clientID, clientSecret string) error { + return nil +} diff --git a/pkg/auth/manager/impersonator/impersonator_test.go b/pkg/auth/manager/impersonator/impersonator_test.go new file mode 100644 index 0000000000..68766e548e --- /dev/null +++ b/pkg/auth/manager/impersonator/impersonator_test.go @@ -0,0 +1,12 @@ +package impersonator + +import ( + "context" + "testing" +) + +func TestImpersonator(t *testing.T) { + ctx := context.Background() + i := New() + i.Authenticate(ctx, "admin", "pwd") +} diff --git a/pkg/auth/manager/ldap/ldap.go b/pkg/auth/manager/ldap/ldap.go new file mode 100644 index 0000000000..7d5f4b13c7 --- /dev/null +++ b/pkg/auth/manager/ldap/ldap.go @@ -0,0 +1,83 @@ +package ldap + +import ( + "context" + "crypto/tls" + "fmt" + + "github.com/cernbox/reva/pkg/auth" + "gopkg.in/ldap.v2" +) + +type mgr struct { + hostname string + port int + baseDN string + filter string + bindUsername string + bindPassword string +} + +// New returns an auth manager implementation that connects to a LDAP server to validate the user. +func New(hostname string, port int, baseDN, filter, bindUsername, bindPassword string) auth.Manager { + return &mgr{ + hostname: hostname, + port: port, + baseDN: baseDN, + filter: filter, + bindUsername: bindUsername, + bindPassword: bindPassword, + } +} + +func (am *mgr) Authenticate(ctx context.Context, clientID, clientSecret string) error { + l, err := ldap.DialTLS("tcp", fmt.Sprintf("%s:%d", am.hostname, am.port), &tls.Config{InsecureSkipVerify: true}) + if err != nil { + return err + } + defer l.Close() + + // First bind with a read only user + err = l.Bind(am.bindUsername, am.bindPassword) + if err != nil { + return err + } + + // Search for the given clientID + searchRequest := ldap.NewSearchRequest( + am.baseDN, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, + fmt.Sprintf(am.filter, clientID), + []string{"dn"}, + nil, + ) + + sr, err := l.Search(searchRequest) + if err != nil { + return err + } + + if len(sr.Entries) != 1 { + return userNotFoundError(clientID) + } + + for _, e := range sr.Entries { + e.Print() + } + + userdn := sr.Entries[0].DN + + // Bind as the user to verify their password + err = l.Bind(userdn, clientSecret) + if err != nil { + return err + } + + return nil + +} + +type userNotFoundError string + +func (e userNotFoundError) Error() string { return string(e) } +func (e userNotFoundError) IsUserNotFound() {} diff --git a/pkg/config/config.go b/pkg/config/config.go new file mode 100644 index 0000000000..7c6bf69786 --- /dev/null +++ b/pkg/config/config.go @@ -0,0 +1,38 @@ +package config + +import ( + "encoding/json" + "io/ioutil" +) + +type Config struct { + Network string `json:"network"` + Address string `json:"address"` + Services []string `json:"services"` + + AuthSVC struct { + Driver string `json:"driver"` + Options interface{} `json:"options"` + } `json:"auth_svc"` + + StorageProviderSVC struct { + TemporaryFolder string `json:"temporary_folder"` + Driver string `json:"driver"` + Options interface{} `json:"options"` + } `json:"storage_provider_svc"` +} + +func LoadFromFile(fn string) (*Config, error) { + data, err := ioutil.ReadFile(fn) + if err != nil { + return nil, err + } + + cfg := &Config{} + + if err := json.Unmarshal(data, cfg); err != nil { + return nil, err + } + + return cfg, nil +} diff --git a/pkg/eosclient/eosclient.go b/pkg/eosclient/eosclient.go new file mode 100644 index 0000000000..71bccbd969 --- /dev/null +++ b/pkg/eosclient/eosclient.go @@ -0,0 +1,1028 @@ +package eosclient + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + gouser "os/user" + "path" + "strconv" + "strings" + "syscall" + + "github.com/cernbox/reva/pkg/log" + + "github.com/gofrs/uuid" + "github.com/pkg/errors" +) + +const ( + rootUser = "root" + rootGroup = "root" + versionPrefix = ".sys.v#." +) + +/* +type ACLMode string + +// ACLType represents the type of of the acl (user, e-group, unix-group, ...) +type ACLType string + +const ( + // ACLModeInvalid specifies an invalid acl mode. + ACLModeInvalid = ACLMode("invalid") + // ACLModeRead specifies that only read and list operations will be allowed on the directory. + ACLModeRead = ACLMode("rx") + // ACLModeReadWrite specifies that the directory will be writable. + ACLModeReadWrite = ACLMode("rwx!d") + + ACLTypeUnknown = ACLType(iota) + // ACLTypeUser specifies that the acl will be set for an individual user. + ACLTypeUser + // ACLTypeGroup specifies that the acl will be set for a CERN e-group. + ACLTypeGroup + // ACLTypeUnixGroup specifies that the acl will be set for a unix group. + ACLTypeUnixGroup + + rootUser = "root" + rootGroup = "root" + versionPrefix = ".sys.v#." +) +*/ + +var ( + errInvalidACL = errors.New("invalid acl") +) + +// ACL represents an EOS ACL. +type ACL struct { + Target string + Mode string + Type string +} + +// Options to configure the Client. +type Options struct { + // Location of the eos binary. + // Default is /usr/bin/eos. + EosBinary string + + // Location of the xrdcopy binary. + // Default is /usr/bin/xrdcopy. + XrdcopyBinary string + + // URL of the EOS MGM. + // Default is root://eos-test.org + URL string + + // Location on the local fs where to store reads. + // Defaults to os.TempDir() + CacheDirectory string + + // Writter to write logs to + LogOutput io.Writer + + // Key to get the trace Id from. + TraceKey interface{} +} + +func (opt *Options) init() { + if opt.EosBinary == "" { + opt.EosBinary = "/usr/bin/eos" + } + + if opt.XrdcopyBinary == "" { + opt.XrdcopyBinary = "/usr/bin/xrdcopy" + } + + if opt.URL == "" { + opt.URL = "root://eos-example.org" + } + + if opt.CacheDirectory == "" { + opt.CacheDirectory = os.TempDir() + } + + if opt.LogOutput == nil { + opt.LogOutput = ioutil.Discard + } + + if opt.TraceKey == nil { + opt.TraceKey = "traceid" + } +} + +// Client performs actions against a EOS management node (MGM). +// It requires the eos-client and xrootd-client packages installed to work. +type Client struct { + opt *Options + logger *log.Logger +} + +// New creates a new client with the given options. +func New(opt *Options) *Client { + opt.init() + c := new(Client) + c.opt = opt + c.logger = log.New("eosclient") + return c +} + +func getUnixUser(username string) (*gouser.User, error) { + return gouser.Lookup(username) +} + +// exec executes the command and returns the stdout, stderr and return code +func (c *Client) execute(ctx context.Context, cmd *exec.Cmd) (string, string, error) { + outBuf := &bytes.Buffer{} + errBuf := &bytes.Buffer{} + cmd.Stdout = outBuf + cmd.Stderr = errBuf + cmd.Env = []string{ + "EOS_MGM_URL=" + c.opt.URL, + } + + err := cmd.Run() + + var exitStatus int + if exiterr, ok := err.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + // This works on both Unix and Windows. Although package + // syscall is generally platform dependent, WaitStatus is + // defined for both Unix and Windows and in both cases has + // an ExitStatus() method with the same signature. + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + + exitStatus = status.ExitStatus() + switch exitStatus { + case 0: + err = nil + case 2: + err = notFoundError(errBuf.String()) + case 22: + // eos reports back error code 22 when the user is not allowed to enter the instance + err = notFoundError(errBuf.String()) + } + } + } + + msg := fmt.Sprintf("cmd=%v env=%v exit=%d", cmd.Args, cmd.Env, exitStatus) + c.logger.Println(ctx, msg) + + if err != nil { + err = errors.Wrap(err, "eosclient: error while executing command") + } + + return outBuf.String(), errBuf.String(), err +} + +// AddACL adds an new acl to EOS with the given aclType. +func (c *Client) AddACL(ctx context.Context, username, path string, a *ACL) error { + aclManager, err := c.getACLForPath(ctx, username, path) + if err != nil { + return err + } + + aclManager.deleteEntry(ctx, a.Type, a.Target) + newEntry, err := newACLEntry(ctx, strings.Join([]string{a.Type, a.Target, a.Mode}, ":")) + if err != nil { + return err + } + aclManager.aclEntries = append(aclManager.aclEntries, newEntry) + sysACL := aclManager.serialize() + + // setting of the sys.acl is only possible from root user + unixUser, err := getUnixUser(rootUser) + if err != nil { + return err + } + + cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", unixUser.Uid, unixUser.Gid, "attr", "-r", "set", fmt.Sprintf("sys.acl=%s", sysACL), path) + _, _, err = c.execute(ctx, cmd) + return err + +} + +// deleteEntry will be called with username but acl is stored with uid, we need to convert back uid +// to username. +func (m *aclManager) deleteEntry(ctx context.Context, aclType, target string) { + for i, e := range m.aclEntries { + username, err := getUsername(e.recipient) + if err != nil { + continue + } + if username == target && e.aclType == aclType { + m.aclEntries = append(m.aclEntries[:i], m.aclEntries[i+1:]...) + return + } + } +} + +// RemoveACL removes the acl from EOS. +func (c *Client) RemoveACL(ctx context.Context, username, path string, aclType string, recipient string) error { + aclManager, err := c.getACLForPath(ctx, username, path) + if err != nil { + return err + } + + aclManager.deleteEntry(ctx, aclType, recipient) + sysACL := aclManager.serialize() + + // setting of the sys.acl is only possible from root user + unixUser, err := getUnixUser(rootUser) + if err != nil { + return err + } + + cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", unixUser.Uid, unixUser.Gid, "attr", "-r", "set", fmt.Sprintf("sys.acl=%s", sysACL), path) + _, _, err = c.execute(ctx, cmd) + return err + +} + +// UpdateACL updates the EOS acl. +func (c *Client) UpdateACL(ctx context.Context, username, path string, a *ACL) error { + return c.AddACL(ctx, username, path, a) +} + +func (c *Client) GetACL(ctx context.Context, username, path, aclType, target string) (*ACL, error) { + acls, err := c.ListACLs(ctx, username, path) + if err != nil { + return nil, err + } + for _, a := range acls { + if a.Type == aclType && a.Target == target { + return a, nil + } + } + return nil, notFoundError(fmt.Sprintf("%s:%s", aclType, target)) + +} + +func getUsername(uid string) (string, error) { + user, err := gouser.LookupId(uid) + if err != nil { + return "", err + } + return user.Username, nil +} + +// ListACLS returns the list of ACLs present under the given path. +// EOS returns uids/gid for Citrine version and usernames for older versions. +// For Citire we need to convert back the uid back to username. +func (c *Client) ListACLs(ctx context.Context, username, path string) ([]*ACL, error) { + finfo, err := c.GetFileInfoByPath(ctx, username, path) + if err != nil { + return nil, err + } + + aclManager := c.newACLManager(ctx, finfo.SysACL) + acls := []*ACL{} + for _, a := range aclManager.getEntries() { + username, err := getUsername(a.recipient) + if err != nil { + c.logger.Error(ctx, err) + continue + } + acl := &ACL{ + Target: username, + Mode: a.mode, + Type: a.aclType, + } + acls = append(acls, acl) + } + return acls, nil +} + +func (c *Client) getACLForPath(ctx context.Context, username, path string) (*aclManager, error) { + finfo, err := c.GetFileInfoByPath(ctx, username, path) + if err != nil { + return nil, err + } + + aclManager := c.newACLManager(ctx, finfo.SysACL) + return aclManager, nil +} + +// GetFileInfoByInode returns the FileInfo by the given inode +func (c *Client) GetFileInfoByInode(ctx context.Context, username string, inode uint64) (*FileInfo, error) { + unixUser, err := getUnixUser(username) + if err != nil { + return nil, err + } + cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", unixUser.Uid, unixUser.Gid, "file", "info", fmt.Sprintf("inode:%d", inode), "-m") + stdout, _, err := c.execute(ctx, cmd) + if err != nil { + return nil, err + } + return c.parseFileInfo(stdout) +} + +// GetFileInfoByPath returns the FilInfo at the given path +func (c *Client) GetFileInfoByPath(ctx context.Context, username, path string) (*FileInfo, error) { + unixUser, err := getUnixUser(username) + if err != nil { + return nil, err + } + cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", unixUser.Uid, unixUser.Gid, "file", "info", path, "-m") + stdout, _, err := c.execute(ctx, cmd) + if err != nil { + return nil, err + } + return c.parseFileInfo(stdout) +} + +// GetQuota gets the quota of a user on the quota node defined by path +func (c *Client) GetQuota(ctx context.Context, username, path string) (int, int, error) { + // setting of the sys.acl is only possible from root user + unixUser, err := getUnixUser(rootUser) + if err != nil { + return 0, 0, err + } + cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", unixUser.Uid, unixUser.Gid, "quota", "ls", "-u", username, "-m") + stdout, _, err := c.execute(ctx, cmd) + if err != nil { + return 0, 0, err + } + return c.parseQuota(path, stdout) +} + +// CreateDir creates a directory at the given path +func (c *Client) CreateDir(ctx context.Context, username, path string) error { + unixUser, err := getUnixUser(username) + if err != nil { + return err + } + + cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", unixUser.Uid, unixUser.Gid, "mkdir", "-p", path) + _, _, err = c.execute(ctx, cmd) + return err +} + +// Remove removes the resource at the given path +func (c *Client) Remove(ctx context.Context, username, path string) error { + unixUser, err := getUnixUser(username) + if err != nil { + return err + } + cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", unixUser.Uid, unixUser.Gid, "rm", "-r", path) + _, _, err = c.execute(ctx, cmd) + return err +} + +// Rename renames the resource referenced by oldPath to newPath +func (c *Client) Rename(ctx context.Context, username, oldPath, newPath string) error { + unixUser, err := getUnixUser(username) + if err != nil { + return err + } + cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", unixUser.Uid, unixUser.Gid, "file", "rename", oldPath, newPath) + _, _, err = c.execute(ctx, cmd) + return err +} + +// List the contents of the directory given by path +func (c *Client) List(ctx context.Context, username, path string) ([]*FileInfo, error) { + unixUser, err := getUnixUser(username) + if err != nil { + return nil, err + } + cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", unixUser.Uid, unixUser.Gid, "find", "--fileinfo", "--maxdepth", "1", path) + stdout, _, err := c.execute(ctx, cmd) + if err != nil { + return nil, errors.Wrapf(err, "eosclient: error listing fn=%s", path) + } + return c.parseFind(path, stdout) +} + +// Read reads a file from the mgm +func (c *Client) Read(ctx context.Context, username, path string) (io.ReadCloser, error) { + unixUser, err := getUnixUser(username) + if err != nil { + return nil, err + } + uuid := uuid.Must(uuid.NewV4()) + rand := "eosread-" + uuid.String() + localTarget := fmt.Sprintf("%s/%s", c.opt.CacheDirectory, rand) + xrdPath := fmt.Sprintf("%s//%s", c.opt.URL, path) + cmd := exec.CommandContext(ctx, "/usr/bin/xrdcopy", "--nopbar", "--silent", "-f", xrdPath, localTarget, fmt.Sprintf("-OSeos.ruid=%s&eos.rgid=%s", unixUser.Uid, unixUser.Gid)) + _, _, err = c.execute(ctx, cmd) + if err != nil { + return nil, err + } + return os.Open(localTarget) +} + +// Write writes a file to the mgm +func (c *Client) Write(ctx context.Context, username, path string, stream io.ReadCloser) error { + unixUser, err := getUnixUser(username) + if err != nil { + return err + } + fd, err := ioutil.TempFile(c.opt.CacheDirectory, "eoswrite-") + if err != nil { + return err + } + defer fd.Close() + defer os.RemoveAll(fd.Name()) + + // copy stream to local temp file + _, err = io.Copy(fd, stream) + if err != nil { + return err + } + xrdPath := fmt.Sprintf("%s//%s", c.opt.URL, path) + cmd := exec.CommandContext(ctx, "/usr/bin/xrdcopy", "--nopbar", "--silent", "-f", fd.Name(), xrdPath, fmt.Sprintf("-ODeos.ruid=%s&eos.rgid=%s", unixUser.Uid, unixUser.Gid)) + _, _, err = c.execute(ctx, cmd) + return err +} + +// ListDeletedEntries returns a list of the deleted entries. +func (c *Client) ListDeletedEntries(ctx context.Context, username string) ([]*DeletedEntry, error) { + unixUser, err := getUnixUser(username) + if err != nil { + return nil, err + } + // TODO(labkode): add protection if slave is configured and alive to count how many files are in the trashbin before + // triggering the recycle ls call that could break the instance because of unavailable memory. + cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", unixUser.Uid, unixUser.Gid, "recycle", "ls", "-m") + stdout, _, err := c.execute(ctx, cmd) + if err != nil { + return nil, err + } + return parseRecycleList(stdout) +} + +// RestoreDeletedEntry restores a deleted entry. +func (c *Client) RestoreDeletedEntry(ctx context.Context, username, key string) error { + unixUser, err := getUnixUser(username) + if err != nil { + return err + } + cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", unixUser.Uid, unixUser.Gid, "recycle", "restore", key) + _, _, err = c.execute(ctx, cmd) + return err +} + +// PurgeDeletedEntries purges all entries from the recycle bin. +func (c *Client) PurgeDeletedEntries(ctx context.Context, username string) error { + unixUser, err := getUnixUser(username) + if err != nil { + return err + } + cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", unixUser.Uid, unixUser.Gid, "recycle", "purge") + _, _, err = c.execute(ctx, cmd) + return err +} + +func getVersionFolder(p string) string { + basename := path.Base(p) + versionFolder := path.Join(path.Dir(p), versionPrefix+basename) + return versionFolder +} + +// ListVersions list all the versions for a given file. +func (c *Client) ListVersions(ctx context.Context, username, p string) ([]*FileInfo, error) { + basename := path.Base(p) + versionFolder := path.Join(path.Dir(p), versionPrefix+basename) + finfos, err := c.List(ctx, username, versionFolder) + if err != nil { + // we send back an empty list + return []*FileInfo{}, nil + } + return finfos, nil +} + +// RollbackToVersion rollbacks a file to a previous version. +func (c *Client) RollbackToVersion(ctx context.Context, username, path, version string) error { + unixUser, err := getUnixUser(username) + if err != nil { + return err + } + cmd := exec.CommandContext(ctx, "/usr/bin/eos", "-r", unixUser.Uid, unixUser.Gid, "file", "versions", path, version) + _, _, err = c.execute(ctx, cmd) + return err +} + +// ReadVersion reads the version for the given file. +func (c *Client) ReadVersion(ctx context.Context, username, p, version string) (io.ReadCloser, error) { + basename := path.Base(p) + versionFile := path.Join(path.Dir(p), versionPrefix+basename, version) + return c.Read(ctx, username, versionFile) +} + +func parseRecycleList(raw string) ([]*DeletedEntry, error) { + entries := []*DeletedEntry{} + rawLines := strings.Split(raw, "\n") + for _, rl := range rawLines { + if rl == "" { + continue + } + entry, err := parseRecycleEntry(rl) + if err != nil { + return nil, err + } + entries = append(entries, entry) + } + return entries, nil +} + +// parse entries like these: +// recycle=ls recycle-bin=/eos/backup/proc/recycle/ uid=gonzalhu gid=it size=0 deletion-time=1510823151 type=recursive-dir keylength.restore-path=45 restore-path=/eos/scratch/user/g/gonzalhu/.sys.v#.app.ico/ restore-key=0000000000a35100 +// recycle=ls recycle-bin=/eos/backup/proc/recycle/ uid=gonzalhu gid=it size=381038 deletion-time=1510823151 type=file keylength.restore-path=36 restore-path=/eos/scratch/user/g/gonzalhu/app.ico restore-key=000000002544fdb3 +func parseRecycleEntry(raw string) (*DeletedEntry, error) { + partsBySpace := strings.Split(raw, " ") + restoreKeyPair, partsBySpace := partsBySpace[len(partsBySpace)-1], partsBySpace[:len(partsBySpace)-1] + restorePathPair := strings.Join(partsBySpace[9:], " ") + + partsBySpace = partsBySpace[:9] + partsBySpace = append(partsBySpace, restorePathPair) + partsBySpace = append(partsBySpace, restoreKeyPair) + + kv := getMap(partsBySpace) + size, err := strconv.ParseUint(kv["size"], 10, 64) + if err != nil { + return nil, err + } + isDir := false + if kv["type"] == "recursive-dir" { + isDir = true + } + deletionMTime, err := strconv.ParseUint(strings.Split(kv["deletion-time"], ".")[0], 10, 64) + if err != nil { + return nil, err + } + entry := &DeletedEntry{ + RestorePath: kv["restore-path"], + RestoreKey: kv["restore-key"], + Size: size, + DeletionMTime: deletionMTime, + IsDir: isDir, + } + return entry, nil +} + +func getMap(partsBySpace []string) map[string]string { + kv := map[string]string{} + for _, pair := range partsBySpace { + parts := strings.Split(pair, "=") + if len(parts) > 1 { + kv[parts[0]] = parts[1] + } + + } + return kv +} + +func (c *Client) parseFind(dirPath, raw string) ([]*FileInfo, error) { + finfos := []*FileInfo{} + rawLines := strings.Split(raw, "\n") + for _, rl := range rawLines { + if rl == "" { + continue + } + fi, err := c.parseFileInfo(rl) + if err != nil { + return nil, err + } + // dirs in eos end with a slash, like /eos/user/g/gonzalhu/ + // we skip the current directory as eos find will return the directory we + // ask to find + if fi.File == path.Clean(dirPath) { + continue + } + finfos = append(finfos, fi) + } + return finfos, nil +} + +func (c Client) parseQuotaLine(line string) map[string]string { + partsBySpace := strings.Split(line, " ") + m := getMap(partsBySpace) + return m +} +func (c *Client) parseQuota(path, raw string) (int, int, error) { + rawLines := strings.Split(raw, "\n") + for _, rl := range rawLines { + if rl == "" { + continue + } + + m := c.parseQuotaLine(rl) + // map[maxbytes:2000000000000 maxlogicalbytes:1000000000000 percentageusedbytes:0.49 quota:node uid:gonzalhu space:/eos/scratch/user/ usedbytes:9829986500 usedlogicalbytes:4914993250 statusfiles:ok usedfiles:334 maxfiles:1000000 statusbytes:ok] + + space := m["space"] + if strings.HasPrefix(path, space) { + maxBytesString, _ := m["maxlogicalbytes"] + usedBytesString, _ := m["usedlogicalbytes"] + maxBytes, _ := strconv.ParseInt(maxBytesString, 10, 64) + usedBytes, _ := strconv.ParseInt(usedBytesString, 10, 64) + return int(maxBytes), int(usedBytes), nil + } + } + return 0, 0, nil +} + +func (c *Client) parseFileInfo(raw string) (*FileInfo, error) { + + line := raw[15:] + index := strings.Index(line, " file=/") + lengthString := line[0:index] + length, err := strconv.ParseUint(lengthString, 10, 64) + if err != nil { + return nil, err + } + + line = line[index+6:] // skip ' file=' + name := line[0:length] + + kv := make(map[string]string) + // strip trailing slash + kv["file"] = strings.TrimSuffix(name, "/") + + line = line[length+1:] + partsBySpace := strings.Split(line, " ") // we have [size=45 container=3 ...} + var previousXAttr = "" + for _, p := range partsBySpace { + partsByEqual := strings.Split(p, "=") // we have kv pairs like [size 14] + if len(partsByEqual) == 2 { + // handle xattrn and xattrv special cases + if partsByEqual[0] == "xattrn" { + previousXAttr = partsByEqual[1] + } else if partsByEqual[0] == "xattrv" { + kv[previousXAttr] = partsByEqual[1] + previousXAttr = "" + } else { + kv[partsByEqual[0]] = partsByEqual[1] + } + } + } + + fi, err := c.mapToFileInfo(kv) + if err != nil { + return nil, err + } + return fi, nil +} + +// mapToFileInfo converts the dictionary to an usable structure. +// The kv has format: +// map[sys.forced.space:default files:0 mode:42555 ino:5 sys.forced.blocksize:4k sys.forced.layout:replica uid:0 fid:5 sys.forced.blockchecksum:crc32c sys.recycle:/eos/backup/proc/recycle/ fxid:00000005 pid:1 etag:5:0.000 keylength.file:4 file:/eos treesize:1931593933849913 container:3 gid:0 mtime:1498571294.108614409 ctime:1460121992.294326762 pxid:00000001 sys.forced.checksum:adler sys.forced.nstripes:2] +func (c *Client) mapToFileInfo(kv map[string]string) (*FileInfo, error) { + inode, err := strconv.ParseUint(kv["ino"], 10, 64) + if err != nil { + return nil, err + } + fid, err := strconv.ParseUint(kv["fid"], 10, 64) + if err != nil { + return nil, err + } + + var treeSize uint64 + // treeSize is only for containers, so we check + if val, ok := kv["treesize"]; ok { + treeSize, err = strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + } + var fileCounter uint64 + // fileCounter is only for containers + if val, ok := kv["files"]; ok { + fileCounter, err = strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + } + var dirCounter uint64 + // dirCounter is only for containers + if val, ok := kv["container"]; ok { + dirCounter, err = strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + } + + // treeCount is the number of entries under the tree + treeCount := fileCounter + dirCounter + + var size uint64 + if val, ok := kv["size"]; ok { + size, err = strconv.ParseUint(val, 10, 64) + if err != nil { + return nil, err + } + } + + // mtime is split by a dot, we only take the first part, do we need subsec precision? + mtime, err := strconv.ParseUint(strings.Split(kv["mtime"], ".")[0], 10, 64) + if err != nil { + return nil, err + } + + isDir := false + if _, ok := kv["files"]; ok { + isDir = true + } + + fi := &FileInfo{ + File: kv["file"], + Inode: inode, + FID: fid, + ETag: kv["etag"], + Size: size, + TreeSize: treeSize, + MTime: mtime, + IsDir: isDir, + Instance: c.opt.URL, + SysACL: kv["sys.acl"], + TreeCount: treeCount, + } + return fi, nil +} + +// FileInfo represents the metadata information returned by querying the EOS namespace. +type FileInfo struct { + File string `json:"eos_file"` + Inode uint64 `json:"inode"` + FID uint64 `json:"fid"` + ETag string + TreeSize uint64 + MTime uint64 + Size uint64 + IsDir bool + Instance string + SysACL string + TreeCount uint64 +} + +// DeletedEntry represents an entry from the trashbin. +type DeletedEntry struct { + RestorePath string + RestoreKey string + Size uint64 + DeletionMTime uint64 + IsDir bool +} + +type aclManager struct { + aclEntries []*aclEntry +} + +func (c *Client) newACLManager(ctx context.Context, sysACL string) *aclManager { + tokens := strings.Split(sysACL, ",") + aclEntries := []*aclEntry{} + for _, t := range tokens { + aclEntry, err := newACLEntry(ctx, t) + if err == nil { + aclEntries = append(aclEntries, aclEntry) + } + } + + return &aclManager{aclEntries: aclEntries} +} + +func (m *aclManager) getEntries() []*aclEntry { + return m.aclEntries +} + +/* +func (m *aclManager) getUsers() []*aclEntry { + entries := []*aclEntry{} + for _, e := range m.aclEntries { + if e.aclType == ACLTypeUser { + entries = append(entries, e) + } + } + return entries +} + +func (m *aclManager) getUsersWithReadPermission() []*aclEntry { + entries := []*aclEntry{} + for _, e := range m.aclEntries { + if e.aclType == ACLTypeUser && e.hasReadPermissions() { + entries = append(entries, e) + } + } + return entries +} + +func (m *aclManager) getUsersWithWritePermission() []*aclEntry { + entries := []*aclEntry{} + for _, e := range m.aclEntries { + if e.aclType == ACLTypeUser && e.hasWritePermissions() { + entries = append(entries, e) + } + } + return entries +} + +func (m *aclManager) getGroups() []*aclEntry { + entries := []*aclEntry{} + for _, e := range m.aclEntries { + if e.aclType == ACLTypeGroup { + entries = append(entries, e) + } + } + return entries +} + +func (m *aclManager) getGroupsWithReadPermission() []*aclEntry { + entries := []*aclEntry{} + for _, e := range m.aclEntries { + if e.aclType == ACLTypeGroup && e.hasReadPermissions() { + entries = append(entries, e) + } + } + return entries +} + +func (m *aclManager) getGroupsWithWritePermission() []*aclEntry { + entries := []*aclEntry{} + for _, e := range m.aclEntries { + if e.aclType == ACLTypeGroup && e.hasWritePermissions() { + entries = append(entries, e) + } + } + return entries +} + +func (m *aclManager) getUnixGroups() []*aclEntry { + entries := []*aclEntry{} + for _, e := range m.aclEntries { + if e.aclType == ACLTypeUnixGroup { + entries = append(entries, e) + } + } + return entries +} + +func (m *aclManager) getUnixGroupsWithReadPermission() []*aclEntry { + entries := []*aclEntry{} + for _, e := range m.aclEntries { + if e.aclType == ACLTypeUnixGroup && e.hasReadPermissions() { + entries = append(entries, e) + } + } + return entries +} + +func (m *aclManager) getUnixGroupsWithWritePermission() []*aclEntry { + entries := []*aclEntry{} + for _, e := range m.aclEntries { + if e.aclType == ACLTypeUnixGroup && e.hasWritePermissions() { + entries = append(entries, e) + } + } + return entries +} + +func (m *aclManager) getUser(username string) *aclEntry { + for _, u := range m.getUsers() { + if u.recipient == username { + return u + } + } + return nil +} + +func (m *aclManager) getGroup(group string) *aclEntry { + for _, e := range m.getGroups() { + if e.recipient == group { + return e + } + } + return nil +} + +func (m *aclManager) getUnixGroup(unixGroup string) *aclEntry { + for _, e := range m.getUnixGroups() { + if e.recipient == unixGroup { + return e + } + } + return nil +} + +func (m *aclManager) deleteUser(ctx context.Context, username string) { + for i, e := range m.aclEntries { + if e.recipient == username && e.aclType == ACLTypeUser { + m.aclEntries = append(m.aclEntries[:i], m.aclEntries[i+1:]...) + return + } + } +} + +func (m *aclManager) addUser(ctx context.Context, username string, mode ACLMode) error { + m.deleteUser(ctx, username) + sysACL := strings.Join([]string{string(ACLTypeUser), username, string(mode)}, ":") + newEntry, err := newACLEntry(ctx, sysACL) + if err != nil { + return err + } + m.aclEntries = append(m.aclEntries, newEntry) + return nil +} + +func (m *aclManager) deleteGroup(ctx context.Context, group string) { + for i, e := range m.aclEntries { + if e.recipient == group && e.aclType == ACLTypeGroup { + m.aclEntries = append(m.aclEntries[:i], m.aclEntries[i+1:]...) + return + } + } +} + +func (m *aclManager) addGroup(ctx context.Context, group string, mode ACLMode) error { + m.deleteGroup(ctx, group) + sysACL := strings.Join([]string{string(ACLTypeGroup), group, string(mode)}, ":") + newEntry, err := newACLEntry(ctx, sysACL) + if err != nil { + return err + } + m.aclEntries = append(m.aclEntries, newEntry) + return nil +} + +func (m *aclManager) deleteUnixGroup(ctx context.Context, unixGroup string) { + for i, e := range m.aclEntries { + if e.recipient == unixGroup && e.aclType == ACLTypeUnixGroup { + m.aclEntries = append(m.aclEntries[:i], m.aclEntries[i+1:]...) + return + } + } +} + +func (m *aclManager) addUnixGroup(ctx context.Context, unixGroup string, mode ACLMode) error { + m.deleteUnixGroup(ctx, unixGroup) + sysACL := strings.Join([]string{string(ACLTypeUnixGroup), unixGroup, string(mode)}, ":") + newEntry, err := newACLEntry(ctx, sysACL) + if err != nil { + return err + } + m.aclEntries = append(m.aclEntries, newEntry) + return nil +} +*/ + +func (m *aclManager) readOnlyToEOSPermissions(readOnly bool) string { + if readOnly { + return "rx" + } + return "rwx+d" +} + +func (m *aclManager) serialize() string { + sysACL := []string{} + for _, e := range m.aclEntries { + sysACL = append(sysACL, e.serialize()) + } + return strings.Join(sysACL, ",") +} + +type aclEntry struct { + aclType string + recipient string + mode string +} + +// u:gonzalhu:rw +func newACLEntry(ctx context.Context, singleSysACL string) (*aclEntry, error) { + tokens := strings.Split(singleSysACL, ":") + if len(tokens) != 3 { + return nil, errInvalidACL + } + + aclType := tokens[0] + target := tokens[1] + mode := tokens[2] + + return &aclEntry{ + aclType: aclType, + recipient: target, + mode: mode, + }, nil +} + +/* +func (a *aclEntry) hasWritePermissions() bool { + return a.mode == ACLModeReadWrite +} + +func (a *aclEntry) hasReadPermissions() bool { + return a.mode == ACLModeRead || a.mode == ACLModeReadWrite +} +*/ + +func (a *aclEntry) serialize() string { + return strings.Join([]string{string(a.aclType), a.recipient, a.mode}, ":") +} + +type notFoundError string + +func (e notFoundError) IsNotFound() {} +func (e notFoundError) Error() string { return string(e) } diff --git a/pkg/err/err.go b/pkg/err/err.go new file mode 100644 index 0000000000..948aad4faf --- /dev/null +++ b/pkg/err/err.go @@ -0,0 +1,41 @@ +package err + +import ( + "github.com/pkg/errors" +) + +type Err struct { + prefix string +} + +func New(prefix string) *Err { + return &Err{prefix: prefix} +} + +func (e *Err) Wrap(err error, msg string) error { + msg = e.build(msg) + return errors.Wrap(err, msg) +} + +func (e *Err) build(msg string) string { + return e.prefix + ": " + msg +} + +func (e *Err) Wrapf(err error, format string, args ...interface{}) error { + format = e.build(format) + return errors.Wrapf(err, format, args...) +} + +func (e *Err) Cause(err error) error { + return errors.Cause(err) +} + +func (e *Err) New(msg string) error { + msg = e.build(msg) + return errors.New(msg) +} + +func (e *Err) Errorf(format string, args ...interface{}) error { + format = e.build(format) + return errors.Errorf(format, args...) +} diff --git a/pkg/log/log.go b/pkg/log/log.go new file mode 100644 index 0000000000..8fa195661f --- /dev/null +++ b/pkg/log/log.go @@ -0,0 +1,164 @@ +package log + +import ( + "context" + "fmt" + "io" + "os" + "runtime/debug" + + "github.com/rs/zerolog" +) + +func init() { + zerolog.CallerSkipFrameCount = 3 +} + +var pkgs = []string{} +var enabledLoggers = map[string]*zerolog.Logger{} + +// Out is the log output writer +var Out io.Writer = os.Stderr + +// Mode dev prints in console format and prod in json output +var Mode = "dev" + +// Logger is the main logging element +type Logger struct { + pkg string + pid int +} + +// ListRegisteredPackages returns the name of the packages a log has been registered.s +func ListRegisteredPackages() []string { + return pkgs +} + +// ListEnabledPackages returns a list with the name of log-enabled packages. +func ListEnabledPackages() []string { + pkgs := []string{} + for k := range enabledLoggers { + pkgs = append(pkgs, k) + } + return pkgs +} + +// EnableAll enables all registered loggers +func EnableAll() error { + for _, v := range pkgs { + if err := Enable(v); err != nil { + return err + } + } + return nil +} + +// Enable enables a specific logger with its package name +func Enable(pkg string) error { + l := create(pkg) + enabledLoggers[pkg] = l + return nil +} + +// Disable a specific logger by its package name +func Disable(prefix string) { + nop := zerolog.Nop() + enabledLoggers[prefix] = &nop +} + +func create(pkg string) *zerolog.Logger { + pid := os.Getpid() + zl := createLog(pkg, pid) + l := zl.With().Str("pkg", pkg).Int("pid", pid).Logger() + return &l +} + +// New returns a new Logger +func New(pkg string) *Logger { + pkgs = append(pkgs, pkg) + nop := zerolog.Nop() + enabledLoggers[pkg] = &nop + logger := &Logger{pkg: pkg} + return logger +} + +func find(pkg string) *zerolog.Logger { + l := enabledLoggers[pkg] + return l +} + +// Builder allows to contruct log step by step +type Builder struct { + event *zerolog.Event + l *Logger +} + +// Str add a string to the builder +func (b *Builder) Str(key, val string) *Builder { + b.event = b.event.Str(key, val) + return b +} + +// Int adds an int to the builder +func (b *Builder) Int(key string, val int) *Builder { + b.event = b.event.Int(key, val) + return b +} + +// Msg write the message with any fields stored +func (b *Builder) Msg(ctx context.Context, msg string) { + b.event.Str("trace", getTrace(ctx)).Msg(msg) +} + +// Build allocates a new Builder +func (l *Logger) Build() *Builder { + return &Builder{l: l, event: enabledLoggers[l.pkg].Info()} +} + +// BuildError allocates a new Builder with error level +func (l *Logger) BuildError() *Builder { + return &Builder{l: l, event: enabledLoggers[l.pkg].Error()} +} + +// Println prints in info level +func (l *Logger) Println(ctx context.Context, args ...interface{}) { + zl := find(l.pkg) + zl.Info().Str("trace", getTrace(ctx)).Msg(fmt.Sprint(args...)) +} + +// Printf prints in info level +func (l *Logger) Printf(ctx context.Context, format string, args ...interface{}) { + zl := find(l.pkg) + zl.Info().Str("trace", getTrace(ctx)).Msg(fmt.Sprintf(format, args...)) +} + +// Error prints in error level +func (l *Logger) Error(ctx context.Context, err error) { + zl := find(l.pkg) + zl.Error().Str("trace", getTrace(ctx)).Msg(err.Error()) +} + +// Panic prints in error levzel a stack trace +func (l *Logger) Panic(ctx context.Context, reason string) { + zl := find(l.pkg) + stack := debug.Stack() + msg := reason + "\n" + string(stack) + zl.Error().Str("trace", getTrace(ctx)).Bool("panic", true).Msg(msg) +} + +func createLog(pkg string, pid int) *zerolog.Logger { + zlog := zerolog.New(os.Stderr).With().Str("pkg", pkg).Int("pid", pid).Timestamp().Caller().Logger() + if Mode == "" || Mode == "dev" { + zlog = zlog.Output(zerolog.ConsoleWriter{Out: Out}) + } else { + zlog = zlog.Output(Out) + } + return &zlog +} + +func getTrace(ctx context.Context) string { + if v, ok := ctx.Value("trace").(string); ok { + return v + } + return "" +} diff --git a/pkg/mime/mime.go b/pkg/mime/mime.go new file mode 100644 index 0000000000..5ad645d069 --- /dev/null +++ b/pkg/mime/mime.go @@ -0,0 +1,41 @@ +package mime + +import ( + gomime "mime" + "path" +) + +const defaultMimeDir = "httpd/unix-directory" + +var mimeMap map[string]string + +func init() { + mimeMap = map[string]string{} +} + +// RegisterMime is a package level function that registers +// a mimetype with the given extension. +func RegisterMime(ext, mime string) { + mimeMap[ext] = mime +} + +// Detect returns the mimetype associated with the given filename. +func Detect(isDir bool, fn string) string { + if isDir { + return defaultMimeDir + } + + ext := path.Ext(fn) + + mimeType := getCustomMime(ext) + + if mimeType == "" { + mimeType = gomime.TypeByExtension(ext) + } + + return mimeType +} + +func getCustomMime(ext string) string { + return mimeMap[ext] +} diff --git a/pkg/project/manager/projdb/projdb.go b/pkg/project/manager/projdb/projdb.go new file mode 100644 index 0000000000..b5082c9ac0 --- /dev/null +++ b/pkg/project/manager/projdb/projdb.go @@ -0,0 +1,112 @@ +package projdb + +import ( + "context" + "database/sql" + "fmt" + + "github.com/cernbox/reva/pkg/project" + _ "github.com/go-sql-driver/mysql" // import mysql driver + "github.com/pkg/errors" +) + +type manager struct { + db *sql.DB + dbUsername, dbPassword, dbHost, dbName string + dbPort int +} + +func (m *manager) getDB() (*sql.DB, error) { + if m.db != nil { + return m.db, nil + } + + db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", m.dbUsername, m.dbPassword, m.dbHost, m.dbPort, m.dbName)) + if err != nil { + return nil, errors.Wrapf(err, "projdb: error creating connection to dbName=%s dbHost=%s dbPort=%d", m.dbName, m.dbHost, m.dbPort) + } + + m.db = db + return m.db, nil +} + +// New returns a new project manager that stores the project information in a mysql database. +func New(dbUsername, dbPassword, dbHost string, dbPort int, dbName string) project.Manager { + return &manager{dbUsername: dbUsername, dbPassword: dbPassword, dbHost: dbHost, dbName: dbName, dbPort: dbPort} +} + +func (m *manager) GetProject(ctx context.Context, projectName string) (*project.Project, error) { + var ( + owner string + path string + ) + + query := "select eos_relative_path, project_owner from cernbox_project_mapping where project_name=?" + if err := m.db.QueryRow(query, projectName).Scan(&path, &owner); err != nil { + if err == sql.ErrNoRows { + err := projectNotFoundError(projectName) + return nil, errors.Wrapf(err, "projdb: projectName=%s not found", projectName) + } + return nil, errors.Wrapf(err, "projdb: error querying db for projectName=%s", projectName) + } + + adminGroup := getAdminGroup(projectName) + writersGroup := getWritersGroup(projectName) + readersGroup := getReadersGroup(projectName) + + project := &project.Project{Name: projectName, + Owner: owner, + Path: path, + AdminGroup: adminGroup, + ReadersGroup: readersGroup, + WritersGroup: writersGroup} + + return project, nil + +} + +func getAdminGroup(name string) string { return "cernbox-project-" + name + "-admins" } +func getReadersGroup(name string) string { return "cernbox-project-" + name + "-readers" } +func getWritersGroup(name string) string { return "cernbox-project-" + name + "-writers" } + +func (m *manager) GetAllProjects(ctx context.Context) ([]*project.Project, error) { + query := "select project_name, project_owner, eos_relative_path from cernbox_project_mapping" + rows, err := m.db.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + var ( + name string + owner string + path string + ) + + projects := []*project.Project{} + for rows.Next() { + err := rows.Scan(&name, &owner, &path) + if err != nil { + return nil, err + } + + adminGroup := getAdminGroup(name) + writersGroup := getWritersGroup(name) + readersGroup := getReadersGroup(name) + + project := &project.Project{Owner: owner, Path: path, Name: name, AdminGroup: adminGroup, ReadersGroup: readersGroup, WritersGroup: writersGroup} + projects = append(projects, project) + + } + + err = rows.Err() + if err != nil { + return nil, err + } + + return projects, nil +} + +type projectNotFoundError string + +func (e projectNotFoundError) Error() string { return string(e) } diff --git a/pkg/project/project.go b/pkg/project/project.go new file mode 100644 index 0000000000..f1c5a96696 --- /dev/null +++ b/pkg/project/project.go @@ -0,0 +1,25 @@ +package project + +import ( + "context" +) + +type ( + // Project represents a collaborative shared space owned by an account + // with three groups for its management. + Project struct { + Name string + Path string + Owner string + AdminGroup string + ReadersGroup string + WritersGroup string + } + + // Manager manipulates the registered projects. + // TODO(labkode): add CRUD + Manager interface { + GetAllProjects(ctx context.Context) ([]*Project, error) + GetProject(ctx context.Context, name string) (*Project, error) + } +) diff --git a/pkg/publicshare/publicshare.go b/pkg/publicshare/publicshare.go new file mode 100644 index 0000000000..3126083493 --- /dev/null +++ b/pkg/publicshare/publicshare.go @@ -0,0 +1,73 @@ +package publicshare + +import ( + "context" + + "github.com/cernbox/reva/pkg/storage" + "github.com/cernbox/reva/pkg/user" +) + +const ( + // ACLModeReadOnly specifies that the share is read-only. + ACLModeReadOnly ACLMode = "read-only" + + // ACLModeReadWrite specifies that the share is read-writable. + ACLModeReadWrite ACLMode = "read-write" + + // ACLTypeDirectory specifies that the share points to a directory. + ACLTypeDirectory ACLType = "directory" + + // ACLTypeFile specifies that the share points to a file. + ACLTypeFile ACLType = "file" +) + +type ( + // Manager manipulates public shares. + Manager interface { + CreatePublicShare(ctx context.Context, u *user.User, md *storage.MD, a *ACL) (*PublicShare, error) + UpdatePublicShare(ctx context.Context, u *user.User, id string, up *UpdatePolicy, a *ACL) (*PublicShare, error) + GetPublicShare(ctx context.Context, u *user.User, id string) (*PublicShare, error) + ListPublicShares(ctx context.Context, u *user.User, md *storage.MD) ([]*PublicShare, error) + RevokePublicShare(ctx context.Context, u *user.User, id string) error + + GetPublicShareByToken(ctx context.Context, token string) (*PublicShare, error) + } + + // PublicShare represents a public share. + PublicShare struct { + ID string + Token string + Filename string + Modified uint64 + Owner string + DisplayName string + ACL *ACL + } + + // ACL is the the acl to use when creating or updating public shares. + ACL struct { + Password string + Expiration uint64 + SetMode bool + Mode ACLMode + Type ACLType + } + + // UpdatePolicy specifies which attributes to update when calling UpdateACL. + UpdatePolicy struct { + SetPassword bool + SetExpiration bool + SetMode bool + } + + // ACLMode represents the mode for the share (read-only, read-write, ...) + ACLMode string + + // ACLType represents the type of file the share points to (file, directory, ...) + ACLType string +) + +/* +AuthenticatePublicShare(ctx context.Context, token, password string) (*PublicShare, error) + IsPublicShareProtected(ctx context.Context, token string) (bool, error) +*/ diff --git a/pkg/share/share.go b/pkg/share/share.go new file mode 100644 index 0000000000..acfa95c243 --- /dev/null +++ b/pkg/share/share.go @@ -0,0 +1,97 @@ +package share + +import ( + "context" + + "github.com/cernbox/reva/pkg/storage" + "github.com/cernbox/reva/pkg/user" +) + +const ( + // StateAccepted means the share has been accepted and it can be accessed. + StateAccepted State = "accepted" + // StatePending means the share needs to be accepted or rejected. + StatePending State = "pending" + // StateRejected means the share has been rejected and is not accessible. + StateRejected State = "rejected" + + // ACLModeReadOnly means the receiver will only be able to browse and download contents. + ACLModeReadOnly ACLMode = "read-only" + // ACLModeReadWrite means the receiver will be able to manipulate the contents (write, delete, rename...) + ACLModeReadWrite ACLMode = "read-write" + + // ACLTypeUser means the receiver of the share is an individual user. + ACLTypeUser ACLType = "user" + // ACLTypeGroup means the receiver of the share is a group of people. + ACLTypeGroup ACLType = "group" +) + +type ( + // ACLMode is the permission for the share. + ACLMode string + + // ACLType is the type of the share. + ACLType string + + // State represents the state of the share. + State string + + // Manager is the interface that manipulates shares. + Manager interface { + // Create a new share in fn with the given acl. + Share(ctx context.Context, u *user.User, md *storage.MD, a *ACL) (*Share, error) + + // GetShare gets the information for a share by the given id. + GetShare(ctx context.Context, u *user.User, id string) (*Share, error) + + // Unshare deletes the share pointed by id. + Unshare(ctx context.Context, u *user.User, id string) error + + // UpdateShare updates the mode of the given share. + UpdateShare(ctx context.Context, u *user.User, id string, mode ACLMode) (*Share, error) + + // ListShares returns the shares created by the user. If forPath is not empty, + // it returns only shares attached to the given path. + ListShares(ctx context.Context, u *user.User, md *storage.MD) ([]*Share, error) + + // ListReceivedShares returns the list of shares the user has access. + ListReceivedShares(ctx context.Context, u *user.User) ([]*Share, error) + + // GetReceivedShare returns the information for the share received with + // the given id. + GetReceivedShare(ctx context.Context, u *user.User, id string) (*Share, error) + + // RejectReceivedShare rejects the share by the given id. + RejectReceivedShare(ctx context.Context, u *user.User, id string) error + } + + // ACL represents the information about the nature of the share. + ACL struct { + // Target is the recipient of the share. + Target string + + // Mode is the mode for the share. + Mode ACLMode + + // Type is the type of the share. + Type ACLType + } + + // Share represents the information stored in a share. + Share struct { + // ID represents the ID of the share. + ID string + // Filename points to the source of the share. + Filename string + // Owner is the account name owning the share. + Owner string + // ACL represents the information about the target of the share. + ACL *ACL + // Created represents the creation time in seconds from unix epoch. + Created uint64 + // Modified represents the modification time in seconds from unix epoch. + Modified uint64 + // State represents the state of the share. + State State + } +) diff --git a/pkg/storage/broker/static/static.go b/pkg/storage/broker/static/static.go new file mode 100644 index 0000000000..af0652567e --- /dev/null +++ b/pkg/storage/broker/static/static.go @@ -0,0 +1,63 @@ +package static + +import ( + "context" + "strings" + + "github.com/cernbox/reva/pkg/log" + + "github.com/cernbox/reva/pkg/storage" + "github.com/mitchellh/mapstructure" +) + +var logger = log.New("static") + +type broker struct { + rules map[string]string +} + +func (b *broker) FindProvider(ctx context.Context, fn string) (*storage.ProviderInfo, error) { + // find longest match + var match string + for prefix := range b.rules { + if strings.HasPrefix(fn, prefix) && len(prefix) > len(match) { + match = prefix + } + } + + if match == "" { + return nil, notFoundError("storage provider not found for path " + fn) + } + + p := &storage.ProviderInfo{ + Location: b.rules[match], + } + return p, nil +} + +type config struct { + Rules map[string]string +} + +func parseConfig(m map[string]interface{}) (*config, error) { + c := &config{} + if err := mapstructure.Decode(m, c); err != nil { + return nil, err + } + return c, nil +} + +// New returns an implementation to of the storage.FS interface that talk to +// a local filesystem. +func New(m map[string]interface{}) (storage.Broker, error) { + c, err := parseConfig(m) + if err != nil { + return nil, err + } + return &broker{rules: c.Rules}, nil +} + +type notFoundError string + +func (e notFoundError) Error() string { return string(e) } +func (e notFoundError) IsNotFound() {} diff --git a/pkg/storage/eos/eos.go b/pkg/storage/eos/eos.go new file mode 100644 index 0000000000..52e7746856 --- /dev/null +++ b/pkg/storage/eos/eos.go @@ -0,0 +1,549 @@ +package eos + +import ( + "context" + "fmt" + "io" + "os" + "path" + "regexp" + "strconv" + "strings" + + "github.com/cernbox/reva/pkg/eosclient" + "github.com/cernbox/reva/pkg/log" + "github.com/cernbox/reva/pkg/mime" + "github.com/cernbox/reva/pkg/storage" + "github.com/cernbox/reva/pkg/user" + "github.com/pkg/errors" +) + +var hiddenReg = regexp.MustCompile(`\.sys\..#.`) + +var logger = log.New("eos") + +type contextUserRequiredErr string + +func (err contextUserRequiredErr) Error() string { return string(err) } +func (err contextUserRequiredErr) IsUserRequired() {} + +type eosStorage struct { + c *eosclient.Client + mountpoint string + showHiddenSys bool +} + +// Options are the configuration options to pass to the New function. +type Options struct { + // Namespace for fn operations + Namespace string `json:"namespace"` + + // Where to write the logs + LogOut io.Writer + + // LogKey key to use for storing log traces + LogKey interface{} + + // Location of the eos binary. + // Default is /usr/bin/eos. + EosBinary string `json:"eos_binary"` + + // Location of the xrdcopy binary. + // Default is /usr/bin/xrdcopy. + XrdcopyBinary string `json:"xrdcopy_binary"` + + // URL of the Master EOS MGM. + // Default is root://eos-test.org + MasterURL string `json:"master_url"` + + // URL of the Slave EOS MGM. + // Default is root://eos-test.org + SlaveURL string `json:"slave_url"` + + // Location on the local fs where to store reads. + // Defaults to os.TempDir() + CacheDirectory string `json:"cache_directory"` + + // Enables logging of the commands executed + // Defaults to false + EnableLogging bool `json:"enable_logging"` + + // ShowHiddenSysFiles shows internal EOS files like + // .sys.v# and .sys.a# files. + ShowHiddenSysFiles bool `json:"show_hidden_sys_files"` +} + +func getUser(ctx context.Context) (*user.User, error) { + u, ok := user.ContextGetUser(ctx) + if !ok { + err := errors.Wrap(contextUserRequiredErr("userrequired"), "storage_eos: error getting user from ctx") + return nil, err + } + return u, nil +} + +func (opt *Options) init() { + opt.Namespace = path.Clean(opt.Namespace) + if !strings.HasPrefix(opt.Namespace, "/") { + opt.Namespace = "/" + } + + if opt.EosBinary == "" { + opt.EosBinary = "/usr/bin/eos" + } + + if opt.XrdcopyBinary == "" { + opt.XrdcopyBinary = "/usr/bin/xrdcopy" + } + + if opt.MasterURL == "" { + opt.MasterURL = "root://eos-example.org" + } + + if opt.SlaveURL == "" { + opt.SlaveURL = opt.MasterURL + } + + if opt.CacheDirectory == "" { + opt.CacheDirectory = os.TempDir() + } +} + +// New returns a new implementation of the storage.FS interface that connects to EOS. +func New(opt *Options) storage.FS { + opt.init() + + eosClientOpts := &eosclient.Options{ + XrdcopyBinary: opt.XrdcopyBinary, + URL: opt.MasterURL, + EosBinary: opt.EosBinary, + CacheDirectory: opt.CacheDirectory, + LogOutput: opt.LogOut, + TraceKey: opt.LogKey, + } + + eosClient := eosclient.New(eosClientOpts) + + eosStorage := &eosStorage{ + c: eosClient, + mountpoint: opt.Namespace, + showHiddenSys: opt.ShowHiddenSysFiles, + } + + return eosStorage +} + +func (fs *eosStorage) getInternalPath(ctx context.Context, fn string) string { + internalPath := path.Join(fs.mountpoint, fn) + msg := fmt.Sprintf("func=getInternalPath outter=%s inner=%s", fn, internalPath) + logger.Println(ctx, msg) + return internalPath +} + +func (fs *eosStorage) removeNamespace(ctx context.Context, np string) string { + p := strings.TrimPrefix(np, fs.mountpoint) + if p == "" { + p = "/" + } + + msg := fmt.Sprintf("func=removeNamespace inner=%s outter=%s", np, p) + logger.Println(ctx, msg) + return p +} + +func (fs *eosStorage) GetPathByID(ctx context.Context, id string) (string, error) { + u, err := getUser(ctx) + if err != nil { + return "", errors.Wrap(err, "storage_eos: no user in ctx") + } + + // parts[0] = 868317, parts[1] = photos, ... + parts := strings.Split(id, "/") + fileID, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return "", errors.Wrap(err, "storage_eos: error parsing fileid string") + } + + eosFileInfo, err := fs.c.GetFileInfoByInode(ctx, u.Username, fileID) + if err != nil { + return "", errors.Wrap(err, "storage_eos: error getting file info by inode") + } + + fi := fs.convertToMD(ctx, eosFileInfo) + return fi.Path, nil +} + +func (fs *eosStorage) SetACL(ctx context.Context, fn string, a *storage.ACL) error { + u, err := getUser(ctx) + if err != nil { + return errors.Wrap(err, "storage_eos: no user in ctx") + } + + fn = fs.getInternalPath(ctx, fn) + + eosACL := fs.getEosACL(a) + + err = fs.c.AddACL(ctx, u.Username, fn, eosACL) + if err != nil { + return errors.Wrap(err, "storage_eos: error adding acl") + } + + return nil +} + +func getEosACLType(aclType storage.ACLType) string { + switch aclType { + case storage.ACLTypeUser: + return "u" + case storage.ACLTypeGroup: + return "g" + default: + panic(aclType) + } +} + +func getEosACLPerm(mode storage.ACLMode) string { + switch mode { + case storage.ACLModeReadOnly: + return "rx" + case storage.ACLModeReadWrite: + return "rwx!d" + default: + panic(mode) + } +} + +func (fs *eosStorage) getEosACL(a *storage.ACL) *eosclient.ACL { + eosACL := &eosclient.ACL{Target: a.Target} + eosACL.Mode = getEosACLPerm(a.Mode) + eosACL.Type = getEosACLType(a.Type) + return eosACL +} + +func (fs *eosStorage) UnsetACL(ctx context.Context, fn string, a *storage.ACL) error { + u, err := getUser(ctx) + if err != nil { + return errors.Wrap(err, "storage_eos: no user in ctx") + } + + eosACLType := getEosACLType(a.Type) + + fn = fs.getInternalPath(ctx, fn) + + err = fs.c.RemoveACL(ctx, u.Username, fn, eosACLType, a.Target) + if err != nil { + return errors.Wrap(err, "storage_eos: error removing acl") + } + return nil +} + +func (fs *eosStorage) UpdateACL(ctx context.Context, fn string, a *storage.ACL) error { + u, err := getUser(ctx) + if err != nil { + return errors.Wrap(err, "storage_eos: no user in ctx") + } + + eosACL := fs.getEosACL(a) + + fn = fs.getInternalPath(ctx, fn) + err = fs.c.AddACL(ctx, u.Username, fn, eosACL) + if err != nil { + return errors.Wrap(err, "storage_eos: error updating acl") + } + return nil +} + +func (fs *eosStorage) GetACL(ctx context.Context, fn string, aclType storage.ACLType, target string) (*storage.ACL, error) { + u, err := getUser(ctx) + if err != nil { + return nil, err + } + + fn = fs.getInternalPath(ctx, fn) + eosACL, err := fs.c.GetACL(ctx, u.Username, fn, getEosACLType(aclType), target) + if err != nil { + return nil, err + } + + acl := &storage.ACL{ + Target: eosACL.Target, + Mode: fs.getACLMode(eosACL.Mode), + Type: fs.getACLType(eosACL.Type), + } + return acl, nil +} + +func (fs *eosStorage) ListACLs(ctx context.Context, fn string) ([]*storage.ACL, error) { + u, err := getUser(ctx) + if err != nil { + return nil, err + } + + fn = fs.getInternalPath(ctx, fn) + eosACLs, err := fs.c.ListACLs(ctx, u.Username, fn) + if err != nil { + return nil, err + } + + acls := []*storage.ACL{} + for _, a := range eosACLs { + acl := &storage.ACL{ + Target: a.Target, + Mode: fs.getACLMode(a.Mode), + Type: fs.getACLType(a.Type), + } + acls = append(acls, acl) + } + + return acls, nil +} + +func (fs *eosStorage) getACLType(aclType string) storage.ACLType { + switch aclType { + case "u": + return storage.ACLTypeUser + case "g": + return storage.ACLTypeGroup + default: + return storage.ACLTypeInvalid + } +} +func (fs *eosStorage) getACLMode(mode string) storage.ACLMode { + switch mode { + case "rx": + return storage.ACLModeReadOnly + case "rwx!d": + return storage.ACLModeReadWrite + default: + return storage.ACLModeInvalid + } +} + +func (fs *eosStorage) GetMD(ctx context.Context, fn string) (*storage.MD, error) { + u, err := getUser(ctx) + if err != nil { + return nil, err + } + + fn = fs.getInternalPath(ctx, fn) + eosFileInfo, err := fs.c.GetFileInfoByPath(ctx, u.Username, fn) + if err != nil { + return nil, err + } + fi := fs.convertToMD(ctx, eosFileInfo) + return fi, nil +} + +func (fs *eosStorage) ListFolder(ctx context.Context, fn string) ([]*storage.MD, error) { + u, err := getUser(ctx) + if err != nil { + return nil, errors.Wrap(err, "storage_eos: no user in ctx") + } + + fn = fs.getInternalPath(ctx, fn) + eosFileInfos, err := fs.c.List(ctx, u.Username, fn) + if err != nil { + return nil, errors.Wrap(err, "storage_eos: errong listing") + } + + finfos := []*storage.MD{} + for _, eosFileInfo := range eosFileInfos { + // filter out sys files + if !fs.showHiddenSys { + base := path.Base(eosFileInfo.File) + if hiddenReg.MatchString(base) { + continue + } + + } + finfos = append(finfos, fs.convertToMD(ctx, eosFileInfo)) + } + return finfos, nil +} + +func (fs *eosStorage) GetQuota(ctx context.Context, fn string) (int, int, error) { + u, err := getUser(ctx) + if err != nil { + return 0, 0, errors.Wrap(err, "storage_eos: no user in ctx") + } + fn = fs.getInternalPath(ctx, fn) + return fs.c.GetQuota(ctx, u.Username, fn) +} + +func (fs *eosStorage) CreateDir(ctx context.Context, fn string) error { + u, err := getUser(ctx) + if err != nil { + return errors.Wrap(err, "storage_eos: no user in ctx") + } + fn = fs.getInternalPath(ctx, fn) + return fs.c.CreateDir(ctx, u.Username, fn) +} + +func (fs *eosStorage) Delete(ctx context.Context, fn string) error { + u, err := getUser(ctx) + if err != nil { + return errors.Wrap(err, "storage_eos: no user in ctx") + } + fn = fs.getInternalPath(ctx, fn) + return fs.c.Remove(ctx, u.Username, fn) +} + +func (fs *eosStorage) Move(ctx context.Context, oldPath, newPath string) error { + u, err := getUser(ctx) + if err != nil { + return errors.Wrap(err, "storage_eos: no user in ctx") + } + oldPath = fs.getInternalPath(ctx, oldPath) + newPath = fs.getInternalPath(ctx, newPath) + return fs.c.Rename(ctx, u.Username, oldPath, newPath) +} + +func (fs *eosStorage) Download(ctx context.Context, fn string) (io.ReadCloser, error) { + u, err := getUser(ctx) + if err != nil { + return nil, errors.Wrap(err, "storage_eos: no user in ctx") + } + fn = fs.getInternalPath(ctx, fn) + return fs.c.Read(ctx, u.Username, fn) +} + +func (fs *eosStorage) Upload(ctx context.Context, fn string, r io.ReadCloser) error { + u, err := getUser(ctx) + if err != nil { + return errors.Wrap(err, "storage_eos: no user in ctx") + } + fn = fs.getInternalPath(ctx, fn) + return fs.c.Write(ctx, u.Username, fn, r) +} + +func (fs *eosStorage) ListRevisions(ctx context.Context, fn string) ([]*storage.Revision, error) { + u, err := getUser(ctx) + if err != nil { + return nil, errors.Wrap(err, "storage_eos: no user in ctx") + } + fn = fs.getInternalPath(ctx, fn) + eosRevisions, err := fs.c.ListVersions(ctx, u.Username, fn) + if err != nil { + return nil, errors.Wrap(err, "storage_eos: error listing versions") + } + revisions := []*storage.Revision{} + for _, eosRev := range eosRevisions { + rev := fs.convertToRevision(ctx, eosRev) + revisions = append(revisions, rev) + } + return revisions, nil +} + +func (fs *eosStorage) DownloadRevision(ctx context.Context, fn, revisionKey string) (io.ReadCloser, error) { + u, err := getUser(ctx) + if err != nil { + return nil, errors.Wrap(err, "storage_eos: no user in ctx") + } + fn = fs.getInternalPath(ctx, fn) + return fs.c.ReadVersion(ctx, u.Username, fn, revisionKey) +} + +func (fs *eosStorage) RestoreRevision(ctx context.Context, fn, revisionKey string) error { + u, err := getUser(ctx) + if err != nil { + return errors.Wrap(err, "storage_eos: no user in ctx") + } + fn = fs.getInternalPath(ctx, fn) + return fs.c.RollbackToVersion(ctx, u.Username, fn, revisionKey) +} + +func (fs *eosStorage) EmptyRecycle(ctx context.Context, fn string) error { + u, err := getUser(ctx) + if err != nil { + return errors.Wrap(err, "storage_eos: no user in ctx") + } + return fs.c.PurgeDeletedEntries(ctx, u.Username) +} + +func (fs *eosStorage) ListRecycle(ctx context.Context, fn string) ([]*storage.RecycleItem, error) { + u, err := getUser(ctx) + if err != nil { + return nil, errors.Wrap(err, "storage_eos: no user in ctx") + } + eosDeletedEntries, err := fs.c.ListDeletedEntries(ctx, u.Username) + if err != nil { + return nil, errors.Wrap(err, "storage_eos: error listing deleted entries") + } + recycleEntries := []*storage.RecycleItem{} + for _, entry := range eosDeletedEntries { + if !fs.showHiddenSys { + base := path.Base(entry.RestorePath) + if hiddenReg.MatchString(base) { + continue + } + + } + recycleItem := fs.convertToRecycleItem(ctx, entry) + recycleEntries = append(recycleEntries, recycleItem) + } + return recycleEntries, nil +} + +func (fs *eosStorage) RestoreRecycleItem(ctx context.Context, fn, key string) error { + u, err := getUser(ctx) + if err != nil { + return errors.Wrap(err, "storage_eos: no user in ctx") + } + return fs.c.RestoreDeletedEntry(ctx, u.Username, key) +} + +func (fs *eosStorage) convertToRecycleItem(ctx context.Context, eosDeletedItem *eosclient.DeletedEntry) *storage.RecycleItem { + recycleItem := &storage.RecycleItem{ + RestorePath: fs.removeNamespace(ctx, eosDeletedItem.RestorePath), + RestoreKey: eosDeletedItem.RestoreKey, + Size: eosDeletedItem.Size, + DelMtime: eosDeletedItem.DeletionMTime, + IsDir: eosDeletedItem.IsDir, + } + return recycleItem +} + +func (fs *eosStorage) convertToRevision(ctx context.Context, eosFileInfo *eosclient.FileInfo) *storage.Revision { + md := fs.convertToMD(ctx, eosFileInfo) + revision := &storage.Revision{ + RevKey: path.Base(md.Path), + Size: md.Size, + Mtime: md.Mtime, + IsDir: md.IsDir, + } + return revision +} +func (fs *eosStorage) convertToMD(ctx context.Context, eosFileInfo *eosclient.FileInfo) *storage.MD { + finfo := new(storage.MD) + finfo.ID = fmt.Sprintf("%d", eosFileInfo.Inode) + finfo.Path = fs.removeNamespace(ctx, eosFileInfo.File) + finfo.Mtime = eosFileInfo.MTime + finfo.IsDir = eosFileInfo.IsDir + finfo.Etag = eosFileInfo.ETag + finfo.Mime = mime.Detect(finfo.IsDir, finfo.Path) + finfo.Sys = fs.getEosMetadata(eosFileInfo) + finfo.Permissions = &storage.Permissions{Read: true, Write: true, Share: true} + finfo.Size = eosFileInfo.Size + return finfo +} + +type eosSysMetadata struct { + TreeSize uint64 + TreeCount uint64 + File string + Instance string +} + +func (fs *eosStorage) getEosMetadata(finfo *eosclient.FileInfo) map[string]interface{} { + sys := &eosSysMetadata{ + File: finfo.File, + Instance: finfo.Instance, + } + + if finfo.IsDir { + sys.TreeCount = finfo.TreeCount + sys.TreeSize = finfo.TreeSize + } + + return map[string]interface{}{"eos": sys} +} diff --git a/pkg/storage/fstable/fstable.go b/pkg/storage/fstable/fstable.go new file mode 100644 index 0000000000..4c8d863737 --- /dev/null +++ b/pkg/storage/fstable/fstable.go @@ -0,0 +1,46 @@ +package fstable + +import ( + "strings" + + "github.com/cernbox/reva/pkg/storage" +) + +type fsTable struct { + mounts map[string]storage.Mount +} + +func New() storage.FSTable { + return &fsTable{} +} + +func (fs *fsTable) AddMount(m storage.Mount) error { + fs.mounts[m.GetDir()] = m + return nil +} + +func (fs *fsTable) RemoveMount(m storage.Mount) error { + delete(fs.mounts, m.GetDir()) + return nil +} + +func (fs *fsTable) ListMounts() ([]storage.Mount, error) { + mounts := []storage.Mount{} + for _, v := range mounts { + mounts = append(mounts, v) + } + return mounts, nil +} + +func (fs *fsTable) GetMount(dir string) (storage.Mount, error) { + for k, v := range fs.mounts { + if strings.HasPrefix(dir, k) { + return v, nil + } + } + return nil, notFoundError(dir) +} + +type notFoundError string + +func (e notFoundError) Error() string { return string(e) } diff --git a/pkg/storage/local/local.go b/pkg/storage/local/local.go new file mode 100644 index 0000000000..367c34daaa --- /dev/null +++ b/pkg/storage/local/local.go @@ -0,0 +1,231 @@ +package local + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/cernbox/reva/pkg/storage" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" +) + +type config struct { + Root string +} + +func parseConfig(m map[string]interface{}) (*config, error) { + c := &config{} + if err := mapstructure.Decode(m, c); err != nil { + return nil, err + } + return c, nil +} + +// New returns an implementation to of the storage.FS interface that talk to +// a local filesystem. +func New(m map[string]interface{}) (storage.FS, error) { + c, err := parseConfig(m) + if err != nil { + return nil, err + } + + // create root if it does not exist + os.MkdirAll(c.Root, 0700) + + return &localFS{root: c.Root}, nil +} + +func (fs *localFS) addRoot(p string) string { + np := path.Join(fs.root, p) + return np +} + +func (fs *localFS) removeRoot(np string) string { + p := strings.TrimPrefix(np, fs.root) + if p == "" { + p = "/" + } + return p +} + +type localFS struct{ root string } + +func (fs *localFS) normalize(fi os.FileInfo, fn string) *storage.MD { + fn = fs.removeRoot(path.Join("/", fn)) + md := &storage.MD{ + IsDir: fi.IsDir(), + Path: fn, + Size: uint64(fi.Size()), + ID: "fileid-" + strings.TrimPrefix(fn, "/"), + Etag: fmt.Sprintf("%d", fi.ModTime().Unix()), + Permissions: &storage.Permissions{Read: true, Write: true, Share: true}, + Mtime: uint64(fi.ModTime().Unix()), + } + return md +} + +// GetPathByID returns the path pointed by the file id +// In this implementation the file id is that path of the file without the first slash +// thus the file id always points to the filename +func (fs *localFS) GetPathByID(ctx context.Context, id string) (string, error) { + return path.Join("/", strings.TrimPrefix(id, "fileid-")), nil +} + +func (fs *localFS) SetACL(ctx context.Context, path string, a *storage.ACL) error { + return notSupportedError("op not supported") +} + +func (fs *localFS) GetACL(ctx context.Context, path string, aclType storage.ACLType, target string) (*storage.ACL, error) { + return nil, notSupportedError("op not supported") +} + +func (fs *localFS) ListACLs(ctx context.Context, path string) ([]*storage.ACL, error) { + return nil, notSupportedError("op not supported") +} + +func (fs *localFS) UnsetACL(ctx context.Context, path string, a *storage.ACL) error { + return notSupportedError("op not supported") +} +func (fs *localFS) UpdateACL(ctx context.Context, path string, a *storage.ACL) error { + return notSupportedError("op not supported") +} + +func (fs *localFS) GetQuota(ctx context.Context, fn string) (int, int, error) { + return 0, 0, nil +} + +func (fs *localFS) CreateDir(ctx context.Context, fn string) error { + fn = fs.addRoot(fn) + err := os.Mkdir(fn, 0700) + if err != nil { + if os.IsNotExist(err) { + return notFoundError(fn) + } + return errors.Wrap(err, "localfs: error creating dir "+fn) + } + return nil +} + +func (fs *localFS) Delete(ctx context.Context, fn string) error { + fn = fs.addRoot(fn) + err := os.Remove(fn) + if err != nil { + if os.IsNotExist(err) { + return notFoundError(fn) + } + return errors.Wrap(err, "localfs: error deleting "+fn) + } + return nil +} + +func (fs *localFS) Move(ctx context.Context, oldName, newName string) error { + oldName = fs.addRoot(oldName) + newName = fs.addRoot(newName) + if err := os.Rename(oldName, newName); err != nil { + return errors.Wrap(err, "localfs: error moving "+oldName+" to "+newName) + } + return nil +} + +func (fs *localFS) GetMD(ctx context.Context, fn string) (*storage.MD, error) { + fn = fs.addRoot(fn) + md, err := os.Stat(fn) + if err != nil { + if os.IsNotExist(err) { + return nil, notFoundError(fn) + } + return nil, errors.Wrap(err, "localfs: error stating "+fn) + } + + return fs.normalize(md, fn), nil +} + +func (fs *localFS) ListFolder(ctx context.Context, fn string) ([]*storage.MD, error) { + fn = fs.addRoot(fn) + mds, err := ioutil.ReadDir(fn) + if err != nil { + if os.IsNotExist(err) { + return nil, notFoundError(fn) + } + return nil, errors.Wrap(err, "localfs: error listing "+fn) + } + + finfos := []*storage.MD{} + for _, md := range mds { + finfos = append(finfos, fs.normalize(md, path.Join(fn, md.Name()))) + } + return finfos, nil +} + +func (fs *localFS) Upload(ctx context.Context, fn string, r io.ReadCloser) error { + fn = fs.addRoot(fn) + + // we cannot rely on /tmp as it can live in another partition and we can + // hit invalid cross-device link errors, so we create the tmp file in the same directory + // the file is supposed to be written. + tmp, err := ioutil.TempFile(path.Dir(fn), "._reva_atomic_upload") + if err != nil { + return errors.Wrap(err, "localfs: error creating tmp fn at "+path.Dir(fn)) + } + + _, err = io.Copy(tmp, r) + if err != nil { + return errors.Wrap(err, "localfs: eror writing to tmp file "+tmp.Name()) + } + + // TODO(labkode): make sure rename is atomic, missing fsync ... + if err := os.Rename(tmp.Name(), fn); err != nil { + return errors.Wrap(err, "localfs: error renaming from "+tmp.Name()+" to "+fn) + } + + return nil +} + +func (fs *localFS) Download(ctx context.Context, fn string) (io.ReadCloser, error) { + fn = fs.addRoot(fn) + r, err := os.Open(fn) + if err != nil { + if os.IsNotExist(err) { + return nil, notFoundError(fn) + } + return nil, errors.Wrap(err, "localfs: error reading "+fn) + } + return r, nil +} + +func (fs *localFS) ListRevisions(ctx context.Context, path string) ([]*storage.Revision, error) { + return nil, notSupportedError("list revisions") +} + +func (fs *localFS) DownloadRevision(ctx context.Context, path, revisionKey string) (io.ReadCloser, error) { + return nil, notSupportedError("download revision") +} + +func (fs *localFS) RestoreRevision(ctx context.Context, path, revisionKey string) error { + return notSupportedError("restore revision") +} + +func (fs *localFS) EmptyRecycle(ctx context.Context, path string) error { + return notSupportedError("empty recycle") +} + +func (fs *localFS) ListRecycle(ctx context.Context, path string) ([]*storage.RecycleItem, error) { + return nil, notSupportedError("list recycle") +} + +func (fs *localFS) RestoreRecycleItem(ctx context.Context, fn, restoreKey string) error { + return notSupportedError("restore recycle") +} + +type notSupportedError string +type notFoundError string + +func (e notSupportedError) Error() string { return string(e) } +func (e notSupportedError) IsNotSupported() {} +func (e notFoundError) Error() string { return string(e) } +func (e notFoundError) IsNotFound() {} diff --git a/pkg/storage/mount/mount.go b/pkg/storage/mount/mount.go new file mode 100644 index 0000000000..c12c7eff1c --- /dev/null +++ b/pkg/storage/mount/mount.go @@ -0,0 +1,25 @@ +package mount + +import ( + "github.com/cernbox/reva/pkg/storage" +) + +type mount struct { + name, dir string + fs storage.FS + opts *storage.MountOptions +} + +func New(name, dir string, fs storage.FS) storage.Mount { + return &mount{ + name: name, + dir: dir, + fs: fs, + opts: nil, + } +} + +func (m *mount) GetName() string { return m.name } +func (m *mount) GetDir() string { return m.dir } +func (m *mount) GetFS() storage.FS { return m.fs } +func (m *mount) GetOptions() *storage.MountOptions { return m.opts } diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go new file mode 100644 index 0000000000..51291949d5 --- /dev/null +++ b/pkg/storage/storage.go @@ -0,0 +1,128 @@ +package storage + +import ( + "context" + "io" +) + +// ACLMode represents the mode for the ACL (read, write, ...). +type ACLMode uint32 + +// ACLType represents the type of the ACL (user, group, ...). +type ACLType uint32 + +const ( + // ACLModeInvalid specifies an invalid permission. + ACLModeInvalid = ACLMode(0) // default is invalid. + // ACLModeReadOnly specifies read permissions. + ACLModeReadOnly = ACLMode(1) // 1 + // ACLModeWrite specifies write-permissions. + ACLModeReadWrite = ACLMode(2) // 2 + + // ACLTypeInvalid specifies that the acl is invalid + ACLTypeInvalid ACLType = ACLType(0) + // ACLTypeUser specifies that the acl is set for an individual user. + ACLTypeUser ACLType = ACLType(1) + // ACLTypeGroup specifies that the acl is set for a group. + ACLTypeGroup ACLType = ACLType(2) +) + +// FS is the interface to implement access to the storage. +type FS interface { + CreateDir(ctx context.Context, fn string) error + Delete(ctx context.Context, fn string) error + Move(ctx context.Context, old, new string) error + GetMD(ctx context.Context, fn string) (*MD, error) + ListFolder(ctx context.Context, fn string) ([]*MD, error) + Upload(ctx context.Context, fn string, r io.ReadCloser) error + Download(ctx context.Context, fn string) (io.ReadCloser, error) + ListRevisions(ctx context.Context, fn string) ([]*Revision, error) + DownloadRevision(ctx context.Context, fn, key string) (io.ReadCloser, error) + RestoreRevision(ctx context.Context, fn, key string) error + ListRecycle(ctx context.Context, fn string) ([]*RecycleItem, error) + RestoreRecycleItem(ctx context.Context, fn, key string) error + EmptyRecycle(ctx context.Context, fn string) error + GetPathByID(ctx context.Context, id string) (string, error) + SetACL(ctx context.Context, fn string, a *ACL) error + UnsetACL(ctx context.Context, fn string, a *ACL) error + UpdateACL(ctx context.Context, fn string, a *ACL) error + ListACLs(ctx context.Context, fn string) ([]*ACL, error) + GetACL(ctx context.Context, fn string, aclType ACLType, target string) (*ACL, error) + GetQuota(ctx context.Context, fn string) (int, int, error) +} + +// MD represents the metadata about a file/directory. +type MD struct { + ID string + Path string + Size uint64 + Mtime uint64 + IsDir bool + Etag string + Checksum string + Mime string + Permissions *Permissions + Sys map[string]interface{} +} + +type Permissions struct { + Read, Write, Share bool +} + +// ACL represents an ACL to persist on the storage. +type ACL struct { + Target string + Type ACLType + Mode ACLMode +} + +// RecycleItem represents an entry in the recycle bin of the user. +type RecycleItem struct { + RestorePath string + RestoreKey string + Size uint64 + DelMtime uint64 + IsDir bool +} + +// Revision represents a version of the file in the past. +type Revision struct { + RevKey string + Size uint64 + Mtime uint64 + IsDir bool +} + +// Broker is the interface that storage brokers implement +// for discovering storage providers +type Broker interface { + FindProvider(ctx context.Context, fn string) (*ProviderInfo, error) +} + +// ProviderInfo contains the information +// about a StorageProvider +type ProviderInfo struct { + Location string +} + +// FSTable contains descriptive information about the various file systems. +// It follows the same logic as unix fstab. +type FSTable interface { + AddMount(m Mount) error + ListMounts() ([]Mount, error) + RemoveMount(m Mount) error + GetMount(dir string) (Mount, error) +} + +// Mount contains the information on how to mount a filesystem. +type Mount interface { + GetName() string + GetDir() string + GetFS() FS + GetOptions() *MountOptions +} + +// MountOptions are the options for the mount. +type MountOptions struct { + ForceReadOnly bool +} diff --git a/pkg/storage/vfs/vfs.go b/pkg/storage/vfs/vfs.go new file mode 100644 index 0000000000..8a886f82d9 --- /dev/null +++ b/pkg/storage/vfs/vfs.go @@ -0,0 +1,304 @@ +package vfs + +import ( + "context" + "io" + "path" + "strings" + + "github.com/cernbox/reva/pkg/storage" + "github.com/pkg/errors" +) + +type vfs struct { + fsTable storage.FSTable +} + +func New(fsTable storage.FSTable) storage.FS { + fs := &vfs{fsTable: fsTable} + return fs +} + +func validate(fn string) error { + if !strings.HasPrefix(fn, "/") { + return invalidFilenameError(fn) + } + return nil +} + +// findFS finds the mount for the given filename. +// it assumes the filename starts with slash and the path is +// already cleaned. The cases to handle are the following: +// - / +// - /docs +// - /docs/ +// - /docs/one +func (fs *vfs) findFS(fn string) (storage.FS, string, error) { + if err := validate(fn); err != nil { + return nil, "", errors.Wrap(err, "vfs: invalid fn") + } + + mount, err := fs.fsTable.GetMount(fn) + if err != nil { + // if mount is not found and fn is /, return list of mounts + return nil, "", errors.Wrap(err, "vfs: error finding mount") + } + + thefs := mount.GetFS() + fsfn := path.Join("/", strings.TrimPrefix(fn, mount.GetDir())) + return thefs, fsfn, nil +} + +func (vfs *vfs) CreateDir(ctx context.Context, fn string) error { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return errors.Wrap(err, "vfs: fs not found") + } + + if err := fs.CreateDir(ctx, fsfn); err != nil { + return errors.Wrap(err, "vfs: error creating dir") + } + return nil +} + +func (vfs *vfs) Delete(ctx context.Context, fn string) error { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return errors.Wrap(err, "vfs: fs not found") + } + + if err := fs.Delete(ctx, fsfn); err != nil { + return errors.Wrap(err, "vfs: error deleting file") + } + return nil +} + +func (vfs *vfs) Download(ctx context.Context, fn string) (io.ReadCloser, error) { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return nil, errors.Wrap(err, "vfs: fs not found") + } + + rc, err := fs.Download(ctx, fsfn) + if err != nil { + return nil, errors.Wrap(err, "vfs: error downloading file") + } + return rc, nil +} + +func (vfs *vfs) GetPathByID(ctx context.Context, id string) (string, error) { + return "", errors.New("todo") +} + +func (vfs *vfs) SetACL(ctx context.Context, fn string, a *storage.ACL) error { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return errors.Wrap(err, "vfs: fs not found") + } + + if err = fs.SetACL(ctx, fsfn, a); err != nil { + return errors.Wrap(err, "vfs: error setting acl") + } + return nil +} + +func (vfs *vfs) UnsetACL(ctx context.Context, fn string, a *storage.ACL) error { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return errors.Wrap(err, "vfs: fs not found") + } + if err = fs.UnsetACL(ctx, fsfn, a); err != nil { + return errors.Wrap(err, "vfs: error unsetting acl") + } + return nil +} + +func (vfs *vfs) UpdateACL(ctx context.Context, fn string, a *storage.ACL) error { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return errors.Wrap(err, "vfs: fs not found") + } + if err = fs.UnsetACL(ctx, fsfn, a); err != nil { + return errors.Wrap(err, "vfs: error updating acl") + } + return nil +} + +func (vfs *vfs) GetACL(ctx context.Context, fn string, aclType storage.ACLType, target string) (*storage.ACL, error) { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return nil, errors.Wrap(err, "vfs: fs not found") + } + acl, err := fs.GetACL(ctx, fsfn, aclType, target) + if err != nil { + return nil, errors.Wrap(err, "vfs: error getting acl") + } + return acl, nil +} + +func (vfs *vfs) ListACLs(ctx context.Context, fn string) ([]*storage.ACL, error) { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return nil, errors.Wrap(err, "vfs: fs not found") + } + acls, err := fs.ListACLs(ctx, fsfn) + if err != nil { + return nil, errors.Wrap(err, "vfs: error listing acls") + } + return acls, nil +} + +func (vfs *vfs) GetMD(ctx context.Context, fn string) (*storage.MD, error) { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return nil, errors.Wrap(err, "vfs: fs not found") + } + + md, err := fs.GetMD(ctx, fsfn) + if err != nil { + return nil, errors.Wrap(err, "vfs: error getting md") + } + return md, nil +} + +func (vfs *vfs) ListFolder(ctx context.Context, fn string) ([]*storage.MD, error) { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return nil, errors.Wrap(err, "vfs: fs not found") + } + + mds, err := fs.ListFolder(ctx, fsfn) + if err != nil { + return nil, errors.Wrap(err, "vfs: error listing folder") + } + return mds, nil +} + +func (vfs *vfs) GetQuota(ctx context.Context, fn string) (int, int, error) { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return 0, 0, errors.Wrap(err, "vfs: fs not found") + } + a, b, err := fs.GetQuota(ctx, fsfn) + if err != nil { + return 0, 0, errors.Wrap(err, "vfs: error getting quota") + } + return a, b, nil + +} + +func (vfs *vfs) Move(ctx context.Context, oldFn, newFn string) error { + oldFS, oldFn, err := vfs.findFS(oldFn) + if err != nil { + return errors.Wrap(err, "vfs: fs not found") + } + + newFS, newFn, err := vfs.findFS(newFn) + if err != nil { + return errors.Wrap(err, "vfs: fs not found") + } + + if oldFS != newFS { + return errors.New("cross storage move not supported") + } + + if err = oldFS.Move(ctx, oldFn, newFn); err != nil { + return errors.Wrap(err, "vfs: error moving file") + } + return nil +} + +func (vfs *vfs) Upload(ctx context.Context, fn string, r io.ReadCloser) error { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return errors.Wrap(err, "vfs: fs not found") + } + + if err = fs.Upload(ctx, fsfn, r); err != nil { + return errors.Wrap(err, "vfs: error uploading file") + } + return nil +} + +func (vfs *vfs) ListRevisions(ctx context.Context, fn string) ([]*storage.Revision, error) { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return nil, errors.Wrap(err, "vfs: fs not found") + } + + revs, err := fs.ListRevisions(ctx, fsfn) + if err != nil { + return nil, errors.Wrap(err, "vfs: error listing revs") + } + return revs, nil +} + +func (vfs *vfs) DownloadRevision(ctx context.Context, fn, revisionKey string) (io.ReadCloser, error) { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return nil, errors.Wrap(err, "vfs: fs not found") + } + + rc, err := fs.Download(ctx, fsfn) + if err != nil { + return nil, errors.Wrap(err, "vfs: error downloading rev") + } + return rc, nil +} + +func (vfs *vfs) RestoreRevision(ctx context.Context, fn, revisionKey string) error { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return errors.Wrap(err, "vfs: fs not found") + } + + if err = fs.RestoreRevision(ctx, fsfn, revisionKey); err != nil { + return errors.Wrap(err, "vfs: error restoring rev") + } + return nil +} + +func (vfs *vfs) EmptyRecycle(ctx context.Context, fn string) error { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return errors.Wrap(err, "vfs: fs not found") + } + + if err = fs.EmptyRecycle(ctx, fsfn); err != nil { + return errors.Wrap(err, "vfs: error emptying recycle") + } + return nil +} + +func (vfs *vfs) ListRecycle(ctx context.Context, fn string) ([]*storage.RecycleItem, error) { + fs, fsfn, err := vfs.findFS(fn) + if err != nil { + return nil, errors.Wrap(err, "vfs: fs not found") + } + + items, err := fs.ListRecycle(ctx, fsfn) + if err != nil { + return nil, errors.Wrap(err, "vfs: error listing recycle") + } + return items, nil +} + +func (vfs *vfs) RestoreRecycleItem(ctx context.Context, fsfn, key string) error { + fs, fsfn, err := vfs.findFS(key) + if err != nil { + return errors.Wrap(err, "vfs: fs not found") + } + + if err = fs.RestoreRecycleItem(ctx, fsfn, key); err != nil { + return errors.Wrap(err, "vfs: error restoring recycle item") + } + return nil +} + +type invalidFilenameError string +type mountNotFoundError string + +func (e invalidFilenameError) Error() string { return string(e) } +func (e invalidFilenameError) IsNotFound() {} +func (e mountNotFoundError) Error() string { return string(e) } +func (e mountNotFoundError) IsNotFound() {} diff --git a/pkg/token/manager/demo/demo.go b/pkg/token/manager/demo/demo.go new file mode 100644 index 0000000000..c7d1c00323 --- /dev/null +++ b/pkg/token/manager/demo/demo.go @@ -0,0 +1,69 @@ +package demo + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/gob" + + "github.com/cernbox/reva/pkg/token" +) + +type manager struct { + vault map[string]token.Claims +} + +func New() token.Manager { + v := getVault() + return &manager{vault: v} +} + +func (m *manager) ForgeToken(ctx context.Context, claims token.Claims) (string, error) { + encoded, err := encode(claims) + if err != nil { + return "", err + } + return encoded, nil +} + +func (m *manager) DismantleToken(ctx context.Context, token string) (token.Claims, error) { + decoded, err := decode(token) + if err != nil { + return nil, err + } + return decoded, nil +} + +func getVault() map[string]token.Claims { + return nil +} + +// from https://stackoverflow.com/questions/28020070/golang-serialize-and-deserialize-back +// go binary encoder +func encode(m token.Claims) (string, error) { + b := bytes.Buffer{} + e := gob.NewEncoder(&b) + err := e.Encode(m) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(b.Bytes()), nil +} + +// from https://stackoverflow.com/questions/28020070/golang-serialize-and-deserialize-back +// go binary decoder +func decode(str string) (token.Claims, error) { + m := token.Claims{} + by, err := base64.StdEncoding.DecodeString(str) + if err != nil { + return nil, err + } + b := bytes.Buffer{} + b.Write(by) + d := gob.NewDecoder(&b) + err = d.Decode(&m) + if err != nil { + return nil, err + } + return m, nil +} diff --git a/pkg/token/manager/demo/demo_test.go b/pkg/token/manager/demo/demo_test.go new file mode 100644 index 0000000000..8832e33771 --- /dev/null +++ b/pkg/token/manager/demo/demo_test.go @@ -0,0 +1,50 @@ +package demo + +import ( + "context" + //"fmt" + "testing" + + "github.com/cernbox/reva/pkg/token" +) + +func TestEncodeDecode(t *testing.T) { + ctx := context.Background() + m := New() + groups := []string{"radium-lovers"} + claims := token.Claims{ + "username": "marie", + "groups": groups, + "display_name": "Marie Curie", + "mail": "marie@example.org", + } + + encoded, err := m.ForgeToken(ctx, claims) + if err != nil { + t.Fatal(err) + } + + decodedClaims, err := m.DismantleToken(ctx, encoded) + if err != nil { + t.Fatal(err) + } + + if claims["username"] != decodedClaims["username"] { + t.Fatalf("username claims differ: expected=%s got=%s", claims["username"], decodedClaims["username"]) + } + if claims["display_name"] != decodedClaims["display_name"] { + t.Fatalf("display_name claims differ: expected=%s got=%s", claims["display_name"], decodedClaims["display_name"]) + } + if claims["mail"] != decodedClaims["mail"] { + t.Fatalf("mail claims differ: expected=%s got=%s", claims["mail"], decodedClaims["mail"]) + } + + decodedGroups, ok := decodedClaims["groups"].([]string) + if !ok { + t.Fatal("groups key in decoded claims is not []string") + } + + if len(groups) != len(groups) { + t.Fatalf("groups claims differ in length: expected=%d got=%d", len(groups), len(decodedGroups)) + } +} diff --git a/pkg/token/manager/jwt/jwt.go b/pkg/token/manager/jwt/jwt.go new file mode 100644 index 0000000000..032f99ee24 --- /dev/null +++ b/pkg/token/manager/jwt/jwt.go @@ -0,0 +1,225 @@ +package jwt + +import ( + "context" + + "github.com/cernbox/reva/pkg/token" + + "github.com/dgrijalva/jwt-go" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" +) + +type config struct { + Secret string `mapstructure:"secret"` +} + +func parseConfig(m map[string]interface{}) (*config, error) { + c := &config{} + if err := mapstructure.Decode(m, c); err != nil { + return nil, err + } + return c, nil +} + +// New returns an implementation of the token manager that uses JWT as tokens. +func New(m map[string]interface{}) (token.Manager, error) { + c, err := parseConfig(m) + if err != nil { + return nil, err + } + return &manager{secret: c.Secret}, nil +} + +type manager struct { + secret string +} + +func (tm *manager) ForgeToken(ctx context.Context, claims token.Claims) (string, error) { + jwtClaims := jwt.MapClaims(claims) + token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), jwtClaims) + signedToken, err := token.SignedString([]byte(tm.secret)) + if err != nil { + return "", errors.Wrapf(err, "jwt: error signing token with claims=%+v", jwtClaims) + } + return signedToken, nil +} + +func (tm *manager) DismantleToken(ctx context.Context, t string) (token.Claims, error) { + jwtToken, err := jwt.Parse(t, func(token *jwt.Token) (interface{}, error) { + return []byte(tm.secret), nil + }) + + if err != nil { + return nil, errors.Wrap(err, "jwt: error parsing token") + } + if !jwtToken.Valid { + return nil, errors.Wrap(err, "jwt: token is invalid") + + } + + jwtClaims := jwtToken.Claims.(jwt.MapClaims) + claims := token.Claims(jwtClaims) + return claims, nil + +} + +/* +func (tm *manager) ForgeUserToken(ctx context.Context, user *api.User) (string, error) { + token := jwt.New(jwt.GetSigningMethod("HS256")) + claims := token.Claims.(jwt.MapClaims) + claims["account_id"] = user.AccountId + claims["display_name"] = user.DisplayName + claims["groups"] = user.Groups + claims["exp"] = time.Now().Add(time.Second * time.Duration(3600)) + tokenString, err := token.SignedString([]byte(tm.secret)) + if err != nil { + l.Error("", zap.Error(err)) + return "", err + } + return tokenString, nil +} + +func (tm *manager) DismantleUserToken(ctx context.Context, token string) (*api.User, error) { + l := ctx_zap.Extract(ctx) + rawToken, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { + return []byte(tm.secret), nil + }) + if err != nil { + l.Error("invalid token", zap.Error(err), zap.String("token", token)) + return nil, err + } + if !rawToken.Valid { + l.Error("invalid token", zap.Error(err), zap.String("token", token)) + return nil, err + + } + + claims := rawToken.Claims.(jwt.MapClaims) + accountID, ok := claims["account_id"].(string) + if !ok { + return nil, errors.New("account_id claim is not a string") + } + + displayName, _ := claims["display_name"].(string) // no displayname is not an error + + rawGroups, ok := claims["groups"].([]interface{}) + if !ok { + return nil, errors.New("groups claim is not a []interface{}") + } + groups := []string{} + for _, g := range rawGroups { + group, ok := g.(string) + if !ok { + err := errors.New(fmt.Sprintf("group %+v can not be casted to string", g)) + l.Error("", zap.Error(err)) + return nil, err + } + groups = append(groups, group) + } + + user := &api.User{ + AccountId: accountID, + Groups: groups, + DisplayName: displayName, + } + return user, nil +} + +func (tm *manager) ForgePublicLinkToken(ctx context.Context, pl *api.PublicLink) (string, error) { + l := ctx_zap.Extract(ctx) + token := jwt.New(jwt.GetSigningMethod("HS256")) + claims := token.Claims.(jwt.MapClaims) + claims["token"] = pl.Token + claims["owner"] = pl.OwnerId + claims["id"] = pl.Id + claims["path"] = pl.Path + claims["protected"] = pl.Protected + claims["expires"] = pl.Expires + claims["read_only"] = pl.ReadOnly + claims["mtime"] = pl.Mtime + claims["item_type"] = pl.ItemType + claims["share_name"] = pl.Name + claims["exp"] = time.Now().Add(time.Second * time.Duration(3600)) + tokenString, err := token.SignedString([]byte(tm.secret)) + if err != nil { + l.Error("", zap.Error(err)) + return "", err + } + return tokenString, nil +} + +func (tm *manager) DismantlePublicLinkToken(ctx context.Context, token string) (*api.PublicLink, error) { + l := ctx_zap.Extract(ctx) + rawToken, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { + return []byte(tm.secret), nil + }) + if err != nil { + l.Error("invalid token", zap.Error(err), zap.String("token", token)) + return nil, err + } + if !rawToken.Valid { + l.Error("invalid token", zap.Error(err), zap.String("token", token)) + return nil, err + + } + + // + //"exp": "2018-07-24T10:11:11.827901148+02:00", + // "expires": 0, + // "id": "103", + // "item_type": 0, + // "mtime": 1532362779, + // "owner": "gonzalhu", + // "path": "oldhome:22510091102060544", + // "protected": false, + // "read_only": true, + // "token": "fgDsc2WD8F2qNfH" + // + claims := rawToken.Claims.(jwt.MapClaims) + token, ok := claims["token"].(string) + if !ok { + return nil, errors.New("token claim is not a string") + } + owner, ok := claims["owner"].(string) + if !ok { + return nil, errors.New("owner claim is not a string") + } + readOnly, ok := claims["read_only"].(bool) + if !ok { + return nil, errors.New("read_only claim is not a bool") + } + path, ok := claims["path"].(string) + if !ok { + return nil, errors.New("path claim is not a string") + } + protected, ok := claims["protected"].(bool) + if !ok { + return nil, errors.New("protected claim is not a bool") + } + mtime, ok := claims["mtime"].(float64) + if !ok { + return nil, errors.New("mtime claim is not a float64") + } + itemType, ok := claims["item_type"].(float64) + if !ok { + return nil, errors.New("item_type claim is not a float64") + } + shareName, ok := claims["share_name"].(string) + if !ok { + return nil, errors.New("share_name claim is not a string") + } + + pl := &api.PublicLink{ + Token: token, + OwnerId: owner, + ReadOnly: readOnly, + Path: path, + Protected: protected, + Mtime: uint64(mtime), + ItemType: api.PublicLink_ItemType(itemType), + Name: shareName, + } + return pl, nil +} +*/ diff --git a/pkg/token/token.go b/pkg/token/token.go new file mode 100644 index 0000000000..8d5e66b845 --- /dev/null +++ b/pkg/token/token.go @@ -0,0 +1,22 @@ +package token + +import ( + "context" +) + +// Claims is the map of attributes to encode into a token +type Claims map[string]interface{} + +// Manager is the interface to implement to sign and verify tokens +type Manager interface { + ForgeToken(ctx context.Context, claims Claims) (string, error) + DismantleToken(ctx context.Context, token string) (Claims, error) +} + +/* + ForgeUserToken(ctx context.Context, user *User) (string, error) + DismantleUserToken(ctx context.Context, token string) (*User, error) + + ForgePublicLinkToken(ctx context.Context, pl *PublicLink) (string, error) + DismantlePublicLinkToken(ctx context.Context, token string) (*PublicLink, error) +*/ diff --git a/pkg/user/manager/demo/demo.go b/pkg/user/manager/demo/demo.go new file mode 100644 index 0000000000..80b32047e1 --- /dev/null +++ b/pkg/user/manager/demo/demo.go @@ -0,0 +1,72 @@ +package demo + +import ( + "context" + + "github.com/cernbox/reva/pkg/user" +) + +type manager struct { + catalog map[string]*user.User +} + +func New(m map[string]interface{}) (user.Manager, error) { + cat := getUsers() + return &manager{catalog: cat}, nil +} + +func (m *manager) GetUser(ctx context.Context, username string) (*user.User, error) { + if user, ok := m.catalog[username]; ok { + return user, nil + } + return nil, userNotFoundError(username) +} + +func (m *manager) GetUserGroups(ctx context.Context, username string) ([]string, error) { + user, err := m.GetUser(ctx, username) + if err != nil { + return nil, err + } + return user.Groups, nil +} + +func (m *manager) IsInGroup(ctx context.Context, username, group string) (bool, error) { + user, err := m.GetUser(ctx, username) + if err != nil { + return false, err + } + + for _, g := range user.Groups { + if group == g { + return true, nil + } + } + return false, nil +} + +type userNotFoundError string + +func (e userNotFoundError) Error() string { return string(e) } + +func getUsers() map[string]*user.User { + return map[string]*user.User{ + "einstein": &user.User{ + Username: "einstein", + Groups: []string{"sailing-lovers", "violin-haters"}, + Mail: "einstein@example.org", + DisplayName: "Albert Einstein", + }, + "marie": &user.User{ + Username: "marie", + Groups: []string{"radium-lovers", "polonium-lovers"}, + Mail: "marie@example.org", + DisplayName: "Marie Curie", + }, + "richard": &user.User{ + Username: "richard", + Groups: []string{"quantum-lovers", "philosophy-haters"}, + Mail: "richard@example.org", + DisplayName: "Richard Feynman", + }, + } +} diff --git a/pkg/user/user.go b/pkg/user/user.go new file mode 100644 index 0000000000..22d6f6639c --- /dev/null +++ b/pkg/user/user.go @@ -0,0 +1,44 @@ +package user + +import ( + "context" +) + +type key int + +const userKey key = iota + +// User represents a userof the system. +type User struct { + Username string + Groups []string + Mail string + DisplayName string +} + +// ContextGetUser returns the user if set in the given context. +func ContextGetUser(ctx context.Context) (*User, bool) { + u, ok := ctx.Value(userKey).(*User) + return u, ok +} + +// ContextMustGetUser panics if user it not in context. +func ContextMustGetUser(ctx context.Context) *User { + u, ok := ContextGetUser(ctx) + if !ok { + panic("user not found in context") + } + return u +} + +// ContextSetUser stores the user in the context. +func ContextSetUser(ctx context.Context, u *User) context.Context { + return context.WithValue(ctx, userKey, u) +} + +// Manager is the interface to implement to manipulate users. +type Manager interface { + GetUser(ctx context.Context, username string) (*User, error) + GetUserGroups(ctx context.Context, username string) ([]string, error) + IsInGroup(ctx context.Context, username, group string) (bool, error) +} diff --git a/services/grpcsvc/appprovidersvc/appprovidersvc.go b/services/grpcsvc/appprovidersvc/appprovidersvc.go new file mode 100644 index 0000000000..d6bcfe634e --- /dev/null +++ b/services/grpcsvc/appprovidersvc/appprovidersvc.go @@ -0,0 +1,85 @@ +package appprovidersvc + +import ( + "context" + "fmt" + + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + + appproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/appprovider/v0alpha" + "github.com/cernbox/reva/pkg/app" + "github.com/cernbox/reva/pkg/app/provider/demo" + "github.com/cernbox/reva/pkg/err" + "github.com/cernbox/reva/pkg/log" + "github.com/mitchellh/mapstructure" +) + +var logger = log.New("appregistry") +var errors = err.New("appregistry") + +type service struct { + provider app.Provider +} +type config struct { + Driver string `mapstructure:"driver"` + Demo map[string]interface{} `mapstructure:"demo"` +} + +// New creates a new StorageRegistryService +func New(m map[string]interface{}) (appproviderv0alphapb.AppProviderServiceServer, error) { + + c, err := parseConfig(m) + if err != nil { + return nil, errors.Wrap(err, "unable to parse config") + } + + provider, err := getProvider(c) + if err != nil { + return nil, errors.Wrap(err, "unable to init registry") + } + + service := &service{ + provider: provider, + } + + return service, nil +} + +func parseConfig(m map[string]interface{}) (*config, error) { + c := &config{} + if err := mapstructure.Decode(m, c); err != nil { + return nil, err + } + return c, nil +} + +func getProvider(c *config) (app.Provider, error) { + switch c.Driver { + case "demo": + return demo.New(c.Demo) + default: + return nil, fmt.Errorf("driver not found: %s", c.Driver) + } +} +func (s *service) GetIFrame(ctx context.Context, req *appproviderv0alphapb.GetIFrameRequest) (*appproviderv0alphapb.GetIFrameResponse, error) { + + fn := req.Filename + mime := req.Miemtype + token := req.AccessToken + + s.provider.GetIFrame(ctx, fn, mime, token) + iframeLocation, err := s.provider.GetIFrame(ctx, fn, mime, token) + if err != nil { + logger.Error(ctx, err) + res := &appproviderv0alphapb.GetIFrameResponse{ + Status: &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL}, + } + return res, nil + } + + res := &appproviderv0alphapb.GetIFrameResponse{ + Status: &rpcpb.Status{Code: rpcpb.Code_CODE_OK}, + IframeLocation: iframeLocation, + } + return res, nil +} diff --git a/services/grpcsvc/appregistrysvc/appregistrysvc.go b/services/grpcsvc/appregistrysvc/appregistrysvc.go new file mode 100644 index 0000000000..7c27a2a715 --- /dev/null +++ b/services/grpcsvc/appregistrysvc/appregistrysvc.go @@ -0,0 +1,88 @@ +package appregistrysvc + +import ( + "context" + "fmt" + + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + + appregistryv0alphapb "github.com/cernbox/go-cs3apis/cs3/appregistry/v0alpha" + "github.com/cernbox/reva/pkg/app" + "github.com/cernbox/reva/pkg/app/registry/static" + "github.com/cernbox/reva/pkg/err" + "github.com/cernbox/reva/pkg/log" + "github.com/mitchellh/mapstructure" +) + +var logger = log.New("appregistry") +var errors = err.New("appregistry") + +type service struct { + registry app.Registry +} +type config struct { + Driver string `mapstructure:"driver"` + Static map[string]interface{} `mapstructure:"static"` +} + +// New creates a new StorageRegistryService +func New(m map[string]interface{}) (appregistryv0alphapb.AppRegistryServiceServer, error) { + + c, err := parseConfig(m) + if err != nil { + return nil, errors.Wrap(err, "unable to parse config") + } + + registry, err := getRegistry(c) + if err != nil { + return nil, errors.Wrap(err, "unable to init registry") + } + + service := &service{ + registry: registry, + } + + return service, nil +} + +func parseConfig(m map[string]interface{}) (*config, error) { + c := &config{} + if err := mapstructure.Decode(m, c); err != nil { + return nil, err + } + return c, nil +} + +func getRegistry(c *config) (app.Registry, error) { + switch c.Driver { + case "static": + return static.New(c.Static) + default: + return nil, fmt.Errorf("driver not found: %s", c.Driver) + } +} +func (s *service) Find(ctx context.Context, req *appregistryv0alphapb.FindRequest) (*appregistryv0alphapb.FindResponse, error) { + ext := req.FilenameExtension + mime := req.FilenameMimetype + p, err := s.registry.FindProvider(ctx, ext, mime) + if err != nil { + logger.Error(ctx, err) + res := &appregistryv0alphapb.FindResponse{ + Status: &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL}, + } + return res, nil + } + + provider := format(p) + res := &appregistryv0alphapb.FindResponse{ + Status: &rpcpb.Status{Code: rpcpb.Code_CODE_OK}, + AppProviderInfo: provider, + } + return res, nil +} + +func format(p *app.ProviderInfo) *appregistryv0alphapb.AppProviderInfo { + return &appregistryv0alphapb.AppProviderInfo{ + Location: p.Location, + } +} diff --git a/services/grpcsvc/authsvc/authsvc.go b/services/grpcsvc/authsvc/authsvc.go new file mode 100644 index 0000000000..8b147921b1 --- /dev/null +++ b/services/grpcsvc/authsvc/authsvc.go @@ -0,0 +1,303 @@ +package authsvc + +import ( + "context" + + "github.com/cernbox/reva/pkg/auth" + "github.com/cernbox/reva/pkg/auth/manager/demo" + "github.com/cernbox/reva/pkg/err" + "github.com/cernbox/reva/pkg/log" + "github.com/cernbox/reva/pkg/token" + "github.com/cernbox/reva/pkg/token/manager/jwt" + "github.com/cernbox/reva/pkg/user" + usrmgrdemo "github.com/cernbox/reva/pkg/user/manager/demo" + + authv0alphapb "github.com/cernbox/go-cs3apis/cs3/auth/v0alpha" + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + + "github.com/mitchellh/mapstructure" +) + +var logger = log.New("authsvc") +var errors = err.New("authsvc") +var ctx = context.Background() + +type config struct { + AuthManager map[string]interface{} `mapstructure:"auth_manager"` + TokenManager map[string]interface{} `mapstructure:"token_manager"` + UserManager map[string]interface{} `mapstructure:"user_manager"` +} + +type authManagerConfig struct { + Driver string `mapstructure:"driver"` + Demo map[string]interface{} `mapstructure:"demo"` + LDAP map[string]interface{} `mapstructure:"ldap"` +} + +type tokenManagerConfig struct { + Driver string `mapstructure:"driver"` + JWT map[string]interface{} `mapstructure:"jwt"` +} + +type userManagerConfig struct { + Driver string `mapstructure:"driver"` + Demo map[string]interface{} `mapstructure:"demo"` +} + +func parseConfig(m map[string]interface{}) (*config, error) { + c := &config{} + if err := mapstructure.Decode(m, c); err != nil { + return nil, err + } + return c, nil + +} + +func getUserManager(m map[string]interface{}) (user.Manager, error) { + c := &userManagerConfig{} + if err := mapstructure.Decode(m, c); err != nil { + return nil, err + } + + switch c.Driver { + case "demo": + mgr, err := usrmgrdemo.New(c.Demo) + if err != nil { + return nil, errors.Wrap(err, "unable to create demo user manager") + } + return mgr, nil + case "": + return nil, errors.Errorf("driver for user manager is empty") + + default: + return nil, errors.Errorf("driver %s not found for user manager", c.Driver) + } +} + +func getAuthManager(m map[string]interface{}) (auth.Manager, error) { + c := &authManagerConfig{} + if err := mapstructure.Decode(m, c); err != nil { + return nil, err + } + + switch c.Driver { + case "demo": + mgr, err := demo.New(c.Demo) + if err != nil { + return nil, errors.Wrap(err, "unable to create demo auth manager") + } + return mgr, nil + case "": + return nil, errors.Errorf("driver for auth manager is empty") + + default: + return nil, errors.Errorf("driver %s not found for auth manager", c.Driver) + } +} + +func getTokenManager(m map[string]interface{}) (token.Manager, error) { + c := &tokenManagerConfig{} + if err := mapstructure.Decode(m, c); err != nil { + return nil, err + } + + switch c.Driver { + case "jwt": + mgr, err := jwt.New(c.JWT) + if err != nil { + return nil, errors.Wrap(err, "unable to create jwt token manager") + } + return mgr, nil + case "": + return nil, errors.Errorf("driver for token manager is empty") + + default: + return nil, errors.Errorf("driver %s not found for token manager", c.Driver) + } +} + +func New(m map[string]interface{}) (authv0alphapb.AuthServiceServer, error) { + c, err := parseConfig(m) + if err != nil { + return nil, err + } + + authManager, err := getAuthManager(c.AuthManager) + if err != nil { + return nil, err + } + + tokenManager, err := getTokenManager(c.TokenManager) + if err != nil { + return nil, err + } + + userManager, err := getUserManager(c.UserManager) + if err != nil { + return nil, err + } + + svc := &service{authmgr: authManager, tokenmgr: tokenManager, usermgr: userManager} + return svc, nil + +} + +type service struct { + authmgr auth.Manager + tokenmgr token.Manager + usermgr user.Manager +} + +func (s *service) GenerateAccessToken(ctx context.Context, req *authv0alphapb.GenerateAccessTokenRequest) (*authv0alphapb.GenerateAccessTokenResponse, error) { + username := req.GetUsername() + password := req.GetPassword() + + err := s.authmgr.Authenticate(ctx, username, password) + if err != nil { + err = errors.Wrap(err, "error authenticating user") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_UNAUTHENTICATED} + res := &authv0alphapb.GenerateAccessTokenResponse{Status: status} + return res, nil + } + + user, err := s.usermgr.GetUser(ctx, username) + if err != nil { + err = errors.Wrap(err, "error getting user information") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_UNAUTHENTICATED} + res := &authv0alphapb.GenerateAccessTokenResponse{Status: status} + return res, nil + } + + claims := token.Claims{ + "username": user.Username, + "groups": user.Groups, + "mail": user.Mail, + "display_name": user.DisplayName, + } + + accessToken, err := s.tokenmgr.ForgeToken(ctx, claims) + if err != nil { + err = errors.Wrap(err, "error creating access token") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_UNAUTHENTICATED} + res := &authv0alphapb.GenerateAccessTokenResponse{Status: status} + return res, nil + } + + logger.Printf(ctx, "user %s authenticated", user.Username) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &authv0alphapb.GenerateAccessTokenResponse{Status: status, AccessToken: accessToken} + return res, nil + +} + +func (s *service) WhoAmI(ctx context.Context, req *authv0alphapb.WhoAmIRequest) (*authv0alphapb.WhoAmIResponse, error) { + token := req.AccessToken + claims, err := s.tokenmgr.DismantleToken(ctx, token) + if err != nil { + err = errors.Wrap(err, "error dismantling access token") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_UNAUTHENTICATED} + res := &authv0alphapb.WhoAmIResponse{Status: status} + return res, nil + } + + up := &struct { + Username string `mapstructure:"username"` + DisplayName string `mapstructure:"display_name"` + Mail string `mapstructure:"mail"` + Groups []string `mapstructure:"groups"` + }{} + + if err := mapstructure.Decode(claims, up); err != nil { + err = errors.Wrap(err, "error parsing token claims") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_UNAUTHENTICATED} + res := &authv0alphapb.WhoAmIResponse{Status: status} + return res, nil + } + + user := &authv0alphapb.User{ + Username: up.Username, + DisplayName: up.DisplayName, + Mail: up.Mail, + Groups: up.Groups, + } + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &authv0alphapb.WhoAmIResponse{Status: status, User: user} + return res, nil +} + +/* +func (s *service) ForgeUserToken(ctx context.Context, req *api.ForgeUserTokenReq) (*api.TokenResponse, error) { + l := ctx_zap.Extract(ctx) + user, err := s.authmgr.Authenticate(ctx, req.ClientId, req.ClientSecret) + if err != nil { + l.Error("", zap.Error(err)) + return nil, err + } + + token, err := s.tokenmgr.ForgeUserToken(ctx, user) + if err != nil { + l.Error("", zap.Error(err)) + return nil, err + } + tokenResponse := &api.TokenResponse{Token: token} + return tokenResponse, nil +} + +func (s *service) DismantleUserToken(ctx context.Context, req *api.TokenReq) (*api.UserResponse, error) { + l := ctx_zap.Extract(ctx) + token := req.Token + u, err := s.tokenmgr.DismantleUserToken(ctx, token) + if err != nil { + l.Warn("token invalid", zap.Error(err)) + res := &api.UserResponse{Status: api.StatusCode_TOKEN_INVALID} + return res, nil + //return nil, api.NewError(api.TokenInvalidErrorCode).WithMessage(err.Error()) + } + userRes := &api.UserResponse{User: u} + return userRes, nil +} + +func (s *service) ForgePublicLinkToken(ctx context.Context, req *api.ForgePublicLinkTokenReq) (*api.TokenResponse, error) { + l := ctx_zap.Extract(ctx) + pl, err := s.lm.AuthenticatePublicLink(ctx, req.Token, req.Password) + if err != nil { + if api.IsErrorCode(err, api.PublicLinkInvalidPasswordErrorCode) { + return &api.TokenResponse{Status: api.StatusCode_PUBLIC_LINK_INVALID_PASSWORD}, nil + } + l.Error("", zap.Error(err)) + return nil, err + } + + token, err := s.tokenmgr.ForgePublicLinkToken(ctx, pl) + if err != nil { + l.Warn("", zap.Error(err)) + return nil, err + } + tokenResponse := &api.TokenResponse{Token: token} + return tokenResponse, nil +} + +func (s *service) DismantlePublicLinkToken(ctx context.Context, req *api.TokenReq) (*api.PublicLinkResponse, error) { + l := ctx_zap.Extract(ctx) + token := req.Token + u, err := s.tokenmgr.DismantlePublicLinkToken(ctx, token) + if err != nil { + l.Error("token invalid", zap.Error(err)) + return nil, api.NewError(api.TokenInvalidErrorCode).WithMessage(err.Error()) + } + userRes := &api.PublicLinkResponse{PublicLink: u} + return userRes, nil +} + +// Override the Auth function to avoid checking the bearer token for this service +// https://github.com/grpc-ecosystem/go-grpc-middleware/tree/master/auth#type-serviceauthfuncoverride +func (s *service) AuthFuncOverride(ctx context.Context, fullMethodNauthmgre string) (context.Context, error) { + return ctx, nil +} +*/ diff --git a/services/grpcsvc/interceptors/interceptors.go b/services/grpcsvc/interceptors/interceptors.go new file mode 100644 index 0000000000..6b0bb4a4ea --- /dev/null +++ b/services/grpcsvc/interceptors/interceptors.go @@ -0,0 +1,69 @@ +package interceptors + +import ( + "context" + + "github.com/cernbox/reva/pkg/log" + "github.com/gofrs/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +var logger = log.New("grpc-interceptor") + +func LogUnaryServerInterceptor() grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + logger.Println(ctx, info.FullMethod, req) + return handler(ctx, req) + } +} + +func TraceUnaryServerInterceptor() grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + var trace string + md, ok := metadata.FromIncomingContext(ctx) + if ok && md != nil { + if val, ok := md["x-trace"]; ok { + if len(val) > 0 { + trace = val[0] + } + } + } else { + trace = uuid.Must(uuid.NewV4()).String() + } + ctx = context.WithValue(ctx, "trace", trace) + return handler(ctx, req) + } +} + +func TraceStreamServerInterceptor() grpc.StreamServerInterceptor { + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + var trace string + md, ok := metadata.FromIncomingContext(ss.Context()) + if ok && md != nil { + if val, ok := md["x-trace"]; ok { + if len(val) > 0 { + trace = val[0] + } + } + } else { + trace = uuid.Must(uuid.NewV4()).String() + } + ctx := context.WithValue(ss.Context(), "trace", trace) + wrapped := newWrappedServerStream(ss, ctx) + return handler(srv, wrapped) + } +} + +func newWrappedServerStream(ss grpc.ServerStream, ctx context.Context) *wrappedServerStream { + return &wrappedServerStream{ServerStream: ss, newCtx: ctx} +} + +type wrappedServerStream struct { + grpc.ServerStream + newCtx context.Context +} + +func (ss *wrappedServerStream) Context() context.Context { + return ss.newCtx +} diff --git a/services/grpcsvc/storagebrokersvc/storagebrokersvc.go b/services/grpcsvc/storagebrokersvc/storagebrokersvc.go new file mode 100644 index 0000000000..a48c01e550 --- /dev/null +++ b/services/grpcsvc/storagebrokersvc/storagebrokersvc.go @@ -0,0 +1,87 @@ +package storagebrokersvc + +import ( + "context" + "fmt" + + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + + storagebrokerv0alphapb "github.com/cernbox/go-cs3apis/cs3/storagebroker/v0alpha" + "github.com/cernbox/reva/pkg/err" + "github.com/cernbox/reva/pkg/log" + "github.com/cernbox/reva/pkg/storage" + "github.com/cernbox/reva/pkg/storage/broker/static" + "github.com/mitchellh/mapstructure" +) + +var logger = log.New("storagebrokersvc") +var errors = err.New("storagebrokersvc") + +type service struct { + broker storage.Broker +} +type config struct { + Driver string `mapstructure:"driver"` + Static map[string]interface{} `mapstructure:"static"` +} + +// New creates a new StorageBrokerService +func New(m map[string]interface{}) (storagebrokerv0alphapb.StorageBrokerServiceServer, error) { + + c, err := parseConfig(m) + if err != nil { + return nil, errors.Wrap(err, "unable to parse config") + } + + broker, err := getBroker(c) + if err != nil { + return nil, errors.Wrap(err, "unable to init broker") + } + + service := &service{ + broker: broker, + } + + return service, nil +} + +func parseConfig(m map[string]interface{}) (*config, error) { + c := &config{} + if err := mapstructure.Decode(m, c); err != nil { + return nil, err + } + return c, nil +} + +func getBroker(c *config) (storage.Broker, error) { + switch c.Driver { + case "static": + return static.New(c.Static) + default: + return nil, fmt.Errorf("driver not found: %s", c.Driver) + } +} +func (s *service) Find(ctx context.Context, req *storagebrokerv0alphapb.FindRequest) (*storagebrokerv0alphapb.FindResponse, error) { + fn := req.Filename + p, err := s.broker.FindProvider(ctx, fn) + if err != nil { + logger.Error(ctx, err) + res := &storagebrokerv0alphapb.FindResponse{ + Status: &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL}, + } + return res, nil + } + + provider := format(p) + res := &storagebrokerv0alphapb.FindResponse{ + Status: &rpcpb.Status{Code: rpcpb.Code_CODE_OK}, + ProviderInfo: provider, + } + return res, nil +} + +func format(p *storage.ProviderInfo) *storagebrokerv0alphapb.ProviderInfo { + return &storagebrokerv0alphapb.ProviderInfo{ + Location: p.Location, + } +} diff --git a/services/grpcsvc/storageprovidersvc/storageprovidersvc.go b/services/grpcsvc/storageprovidersvc/storageprovidersvc.go new file mode 100644 index 0000000000..c6ca3bcc5f --- /dev/null +++ b/services/grpcsvc/storageprovidersvc/storageprovidersvc.go @@ -0,0 +1,1031 @@ +package storageprovidersvc + +import ( + "bytes" + "crypto/md5" + "fmt" + "io" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/cernbox/reva/pkg/err" + "github.com/cernbox/reva/pkg/log" + "github.com/cernbox/reva/pkg/storage" + "github.com/cernbox/reva/pkg/storage/local" + + rpcpb "github.com/cernbox/go-cs3apis/cs3/rpc" + storageproviderv0alphapb "github.com/cernbox/go-cs3apis/cs3/storageprovider/v0alpha" + + "github.com/gofrs/uuid" + "github.com/mitchellh/mapstructure" + "golang.org/x/net/context" +) + +var logger = log.New("storageprovidersvc") +var errors = err.New("storageprovidersvc") + +type config struct { + Driver string `mapstructure:"driver"` + MountPath string `mapstructure:"mount_path"` + MountID string `mapstructure:"mount_id"` + TmpFolder string `mapstructure:"tmp_folder"` + EOS map[string]interface{} `mapstructure:"eos"` + S3 map[string]interface{} `mapstructure:"s3"` + Local map[string]interface{} `mapstructure:"local"` +} + +type service struct { + storage storage.FS + mountPath, mountID string + tmpFolder string +} + +// New creates a new storage provider svc +func New(m map[string]interface{}) (storageproviderv0alphapb.StorageProviderServiceServer, error) { + + c, err := parseConfig(m) + if err != nil { + return nil, errors.Wrap(err, "unable to parse config") + } + + // use os temporary folder if empty + tmpFolder := c.TmpFolder + if tmpFolder == "" { + tmpFolder = os.TempDir() + } + + mountPath := c.MountPath + mountID := c.MountID + + fs, err := getFS(c) + if err != nil { + return nil, errors.Wrap(err, "unable to obtain a filesystem") + } + + service := &service{ + storage: fs, + tmpFolder: tmpFolder, + mountPath: mountPath, + mountID: mountID, + } + + return service, nil +} + +func (s *service) Deref(ctx context.Context, req *storageproviderv0alphapb.DerefRequest) (*storageproviderv0alphapb.DerefResponse, error) { + return nil, nil +} + +func (s *service) CreateDirectory(ctx context.Context, req *storageproviderv0alphapb.CreateDirectoryRequest) (*storageproviderv0alphapb.CreateDirectoryResponse, error) { + fn := req.Filename + fsfn, _, err := s.unwrap(ctx, fn) + if err != nil { + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INVALID} + res := &storageproviderv0alphapb.CreateDirectoryResponse{Status: status} + return res, nil + } + + if err := s.storage.CreateDir(ctx, fsfn); err != nil { + if _, ok := err.(notFoundError); ok { + status := &rpcpb.Status{Code: rpcpb.Code_CODE_NOT_FOUND} + res := &storageproviderv0alphapb.CreateDirectoryResponse{Status: status} + return res, nil + } + err = errors.Wrap(err, "error creating folder "+fn) + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.CreateDirectoryResponse{Status: status} + return res, nil + } + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.CreateDirectoryResponse{Status: status} + return res, nil +} + +func (s *service) Delete(ctx context.Context, req *storageproviderv0alphapb.DeleteRequest) (*storageproviderv0alphapb.DeleteResponse, error) { + fn := req.GetFilename() + + fsfn, _, err := s.unwrap(ctx, fn) + if err != nil { + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.DeleteResponse{Status: status} + return res, nil + } + + if err := s.storage.Delete(ctx, fsfn); err != nil { + err := errors.Wrap(err, "error deleting file") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.DeleteResponse{Status: status} + return res, nil + } + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.DeleteResponse{Status: status} + return res, nil +} + +func (s *service) Move(ctx context.Context, req *storageproviderv0alphapb.MoveRequest) (*storageproviderv0alphapb.MoveResponse, error) { + source := req.GetSourceFilename() + target := req.GetTargetFilename() + + fss, _, err := s.unwrap(ctx, source) + if err != nil { + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.MoveResponse{Status: status} + return res, nil + } + fst, _, err := s.unwrap(ctx, target) + if err != nil { + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.MoveResponse{Status: status} + return res, nil + } + + if err := s.storage.Move(ctx, fss, fst); err != nil { + err := errors.Wrap(err, "storageprovidersvc: error moving file") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.MoveResponse{Status: status} + return res, nil + } + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.MoveResponse{Status: status} + return res, nil +} + +func (s *service) Stat(ctx context.Context, req *storageproviderv0alphapb.StatRequest) (*storageproviderv0alphapb.StatResponse, error) { + fn := req.GetFilename() + + fsfn, fctx, err := s.unwrap(ctx, fn) + if err != nil { + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INVALID} + res := &storageproviderv0alphapb.StatResponse{Status: status} + return res, nil + } + + md, err := s.storage.GetMD(ctx, fsfn) + if err != nil { + if _, ok := err.(notFoundError); ok { + status := &rpcpb.Status{Code: rpcpb.Code_CODE_NOT_FOUND} + res := &storageproviderv0alphapb.StatResponse{Status: status} + return res, nil + } + err := errors.Wrap(err, "error stating file") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.StatResponse{Status: status} + return res, nil + } + md.Path = s.wrap(ctx, md.Path, fctx) + + meta := s.toMeta(md) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.StatResponse{Status: status, Metadata: meta} + return res, nil +} + +func (s *service) List(req *storageproviderv0alphapb.ListRequest, stream storageproviderv0alphapb.StorageProviderService_ListServer) error { + ctx := stream.Context() + fn := req.GetFilename() + + fsfn, fctx, err := s.unwrap(ctx, fn) + if err != nil { + logger.Println(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.ListResponse{Status: status} + if err = stream.Send(res); err != nil { + return errors.Wrap(err, "error unwrapping") + } + return nil + } + + mds, err := s.storage.ListFolder(ctx, fsfn) + if err != nil { + err := errors.Wrap(err, "error listing folder") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.ListResponse{Status: status} + if err = stream.Send(res); err != nil { + return errors.Wrap(err, "storageprovidersvc: error streaming list response") + } + return nil + } + + for _, md := range mds { + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + md.Path = s.wrap(ctx, md.Path, fctx) + meta := s.toMeta(md) + res := &storageproviderv0alphapb.ListResponse{ + Status: status, + Metadata: meta, + } + + if err := stream.Send(res); err != nil { + return errors.Wrap(err, "error streaming list response") + } + } + + return nil +} + +func (s *service) getSessionFolder(sessionID string) string { + return filepath.Join(s.tmpFolder, sessionID) +} + +func (s *service) StartWriteSession(ctx context.Context, req *storageproviderv0alphapb.StartWriteSessionRequest) (*storageproviderv0alphapb.StartWriteSessionResponse, error) { + sessionID := uuid.Must(uuid.NewV4()).String() + + // create temporary folder with sesion id to store + // future writes. + sessionFolder := s.getSessionFolder(sessionID) + if err := os.Mkdir(sessionFolder, 0755); err != nil { + err = errors.Wrap(err, "storageprovidersvc: error creating session folder") + logger.Error(ctx, err) + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.StartWriteSessionResponse{Status: status} + return res, nil + } + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.StartWriteSessionResponse{Status: status, SessionId: sessionID} + return res, nil +} + +func (s *service) Write(stream storageproviderv0alphapb.StorageProviderService_WriteServer) error { + ctx := stream.Context() + var numChunks int + var writtenBytes int64 + + for { + req, err := stream.Recv() + + if err == io.EOF { + logger.Println(ctx, "no more chunks to receive") + break + } + + if err != nil { + err = errors.Wrap(err, "error receiving write request") + logger.Error(ctx, err) + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.WriteResponse{Status: status} + if err = stream.SendAndClose(res); err != nil { + err = errors.Wrap(err, "error closing stream for write") + return err + } + return nil + } + + if req.SessionId == "" { + err = errors.New("sesssion id cannot be empty") + logger.Error(ctx, err) + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.WriteResponse{Status: status} + if err = stream.SendAndClose(res); err != nil { + err = errors.Wrap(err, "error closing stream for write") + logger.Error(ctx, err) + return err + } + return nil + } + + sessionFolder := s.getSessionFolder(req.SessionId) + chunkFile := filepath.Join(sessionFolder, fmt.Sprintf("%d-%d", req.Offset, req.Length)) + fd, err := os.OpenFile(chunkFile, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600) + defer fd.Close() + if err != nil { + err = errors.Wrap(err, "error creating chunk file") + logger.Error(ctx, err) + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.WriteResponse{Status: status} + if err = stream.SendAndClose(res); err != nil { + err = errors.Wrap(err, "error closing stream for write") + return err + } + return nil + } + + reader := bytes.NewReader(req.Data) + n, err := io.CopyN(fd, reader, int64(req.Length)) + if err != nil { + err = errors.Wrap(err, "error writing chunk file") + logger.Error(ctx, err) + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.WriteResponse{Status: status} + if err = stream.SendAndClose(res); err != nil { + err = errors.Wrap(err, "error closing stream for write") + return err + } + return nil + } + + numChunks++ + writtenBytes += n + fd.Close() + } + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.WriteResponse{Status: status, WrittenBytes: uint64(writtenBytes), NumberChunks: uint64(numChunks)} + if err := stream.SendAndClose(res); err != nil { + err = errors.Wrap(err, "error closing stream for write") + return err + } + return nil +} + +func (s *service) FinishWriteSession(ctx context.Context, req *storageproviderv0alphapb.FinishWriteSessionRequest) (*storageproviderv0alphapb.FinishWriteSessionResponse, error) { + sessionFolder := s.getSessionFolder(req.SessionId) + + fd, err := os.Open(sessionFolder) + defer fd.Close() + if os.IsNotExist(err) { + err = errors.Wrap(err, "error opening session folder") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.FinishWriteSessionResponse{Status: status} + return res, nil + } + + defer os.RemoveAll(sessionFolder) // remove txFolder once assembled file is returned + + // list all the chunk files in the directory + names, err := fd.Readdirnames(0) + if err != nil { + err = errors.Wrap(err, "storageprovidersvc: error listing session folder") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.FinishWriteSessionResponse{Status: status} + return res, nil + } + + // sort the chunks by offset so they are in order when they need to be assembled. + names = s.getSortedChunkSlice(names) + + //l.Debug("chunk sorted names", zap.String("names", fmt.Sprintf("%+v", names))) + //l.Info("number of chunks", zap.String("nchunks", fmt.Sprintf("%d", len(names)))) + + rand := uuid.Must(uuid.NewV4()).String() + assembledFilename := filepath.Join(sessionFolder, fmt.Sprintf("assembled-%s", rand)) + //l.Info("", zap.String("assembledfn", assembledFilename)) + + assembledFile, err := os.OpenFile(assembledFilename, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600) + if err != nil { + err = errors.Wrap(err, "storageprovidersvc: error opening assembly file") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.FinishWriteSessionResponse{Status: status} + return res, nil + } + + xs := md5.New() + for _, n := range names { + //l.Debug("processing chunk", zap.String("name", n), zap.Int("int", i)) + chunkFilename := filepath.Join(sessionFolder, n) + //l.Info(fmt.Sprintf("processing chunk %d", i), zap.String("chunk", chunkFilename)) + + chunkInfo, err := parseChunkFilename(filepath.Base(chunkFilename)) + if err != nil { + err = errors.Wrap(err, "storageprovidersvc: error parsing chunk fn") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.FinishWriteSessionResponse{Status: status} + return res, nil + } + + chunk, err := os.Open(chunkFilename) + defer chunk.Close() + if err != nil { + err = errors.Wrap(err, "storageprovidersvc: error opening chunk file") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.FinishWriteSessionResponse{Status: status} + return res, nil + } + + mw := io.MultiWriter(assembledFile, xs) + n, err := io.CopyN(mw, chunk, int64(chunkInfo.ClientLength)) + if err != nil && err != io.EOF { + err = errors.Wrap(err, "error copying data to chunkfile") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.FinishWriteSessionResponse{Status: status} + return res, nil + } + if n != int64(chunkInfo.ClientLength) { + err := fmt.Errorf("chunk size in disk is different from chunk size sent from client. Read: %d Sent: %d", n, chunkInfo.ClientLength) + err = errors.Wrap(err, "chunk size is invalid") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.FinishWriteSessionResponse{Status: status} + return res, nil + } + chunk.Close() + } + assembledFile.Close() + + fd, err = os.Open(assembledFilename) + if err != nil { + err = errors.Wrap(err, "error opening assembled file") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.FinishWriteSessionResponse{Status: status} + return res, nil + } + + xsComputed := fmt.Sprintf("%x", xs.Sum(nil)) + logger.Printf(ctx, "computed checksum: %s", xsComputed) + if req.Checksum != "" && "md5:"+xsComputed != req.Checksum { + err := fmt.Errorf("checksum mismatch between sent=%s and computed=%s", req.Checksum, xsComputed) + err = errors.Wrap(err, "file corrupted got corrupted") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.FinishWriteSessionResponse{Status: status} + return res, nil + } + + fsfn, _, err := s.unwrap(ctx, req.Filename) + if err != nil { + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.FinishWriteSessionResponse{Status: status} + return res, nil + } + + if err := s.storage.Upload(ctx, fsfn, fd); err != nil { + err = errors.Wrap(err, "error uploading assembled file") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.FinishWriteSessionResponse{Status: status} + return res, nil + } + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.FinishWriteSessionResponse{Status: status} + return res, nil +} + +func (s *service) getSortedChunkSlice(names []string) []string { + // sort names numerically by chunk + sort.Slice(names, func(i, j int) bool { + previous := names[i] + next := names[j] + + previousOffset, err := strconv.ParseInt(strings.Split(previous, "-")[0], 10, 64) + if err != nil { + panic("chunk name cannot be casted to int: " + previous) + } + nextOffset, err := strconv.ParseInt(strings.Split(next, "-")[0], 10, 64) + if err != nil { + panic("chunk name cannot be casted to int: " + next) + } + return previousOffset < nextOffset + }) + return names +} + +type chunkInfo struct { + Offset uint64 + ClientLength uint64 +} + +func parseChunkFilename(fn string) (*chunkInfo, error) { + parts := strings.Split(fn, "-") + if len(parts) < 2 { + return nil, fmt.Errorf("chunk fn is wrong: %s", fn) + } + + offset, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + clientLength, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + return &chunkInfo{Offset: offset, ClientLength: clientLength}, nil +} + +func (s *service) Read(req *storageproviderv0alphapb.ReadRequest, stream storageproviderv0alphapb.StorageProviderService_ReadServer) error { + ctx := stream.Context() + fsfn, _, err := s.unwrap(ctx, req.Filename) + if err != nil { + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.ReadResponse{Status: status} + if err = stream.Send(res); err != nil { + return errors.Wrap(err, "error streaming read response") + } + return nil + } + + fd, err := s.storage.Download(ctx, fsfn) + if err != nil { + err = errors.Wrap(err, "storageprovidersvc: error downloading file") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.ReadResponse{Status: status} + if err = stream.Send(res); err == nil { + return errors.Wrap(err, "storageprovidersvc: error streaming read response") + } + return nil + } + + // close fd when finish reading + // continue on failure + defer func() { + if err := fd.Close(); err != nil { + err = errors.Wrap(err, "storageprovidersvc: error closing fd after reading - leak") + logger.Error(ctx, err) + } + }() + + // send data chunks of maximum 3 MiB + buffer := make([]byte, 1024*1024*3) + for { + n, err := fd.Read(buffer) + if n > 0 { + dc := &storageproviderv0alphapb.DataChunk{Data: buffer[:n], Length: uint64(n)} + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.ReadResponse{Status: status, DataChunk: dc} + if err = stream.Send(res); err != nil { + return errors.Wrap(err, "storageprovidersvc: error streaming read response") + } + } + + // nothing more to send + if err == io.EOF { + break + } + + if err != nil { + err = errors.Wrap(err, "storageprovidersvc: error reading from fd") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.ReadResponse{Status: status} + if err = stream.Send(res); err != nil { + return errors.Wrap(err, "storageprovidersvc: error streaming read response") + } + return nil + } + } + + return nil +} + +func (s *service) ListVersions(req *storageproviderv0alphapb.ListVersionsRequest, stream storageproviderv0alphapb.StorageProviderService_ListVersionsServer) error { + ctx := stream.Context() + revs, err := s.storage.ListRevisions(ctx, req.Filename) + if err != nil { + err = errors.Wrap(err, "storageprovidersvc: error listing revisions") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.ListVersionsResponse{Status: status} + if err = stream.Send(res); err != nil { + return errors.Wrap(err, "storageprovidersvc: error streaming list versions response") + } + return nil + } + + for _, rev := range revs { + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + version := &storageproviderv0alphapb.Version{ + Key: rev.RevKey, + IsDir: rev.IsDir, + Mtime: rev.Mtime, + Size: rev.Size, + } + res := &storageproviderv0alphapb.ListVersionsResponse{Status: status, Version: version} + if err := stream.Send(res); err != nil { + return errors.Wrap(err, "storageprovidersvc: error streaming list versions response") + } + } + return nil +} + +func (s *service) ReadVersion(req *storageproviderv0alphapb.ReadVersionRequest, stream storageproviderv0alphapb.StorageProviderService_ReadVersionServer) error { + ctx := stream.Context() + fd, err := s.storage.DownloadRevision(ctx, req.Filename, req.VersionKey) + defer func() { + if err := fd.Close(); err != nil { + err = errors.Wrap(err, "storageprovidersvc: error closing fd for version file - leak") + logger.Error(ctx, err) + // continue + } + }() + + if err != nil { + err = errors.Wrap(err, "storageprovidersvc: error downloading revision") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.ReadVersionResponse{Status: status} + if err = stream.Send(res); err != nil { + return errors.Wrap(err, "storageprovidersvc: error streaming read version response") + } + return nil + } + + // send data chunks of maximum 1 MiB + buffer := make([]byte, 1024*1024*3) + for { + n, err := fd.Read(buffer) + if n > 0 { + dc := &storageproviderv0alphapb.DataChunk{Data: buffer[:n], Length: uint64(n)} + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.ReadVersionResponse{Status: status, DataChunk: dc} + if err = stream.Send(res); err != nil { + return errors.Wrap(err, "storageprovidersvc: error streaming read version response") + } + } + + // nothing more to send + if err == io.EOF { + break + } + + if err != nil { + err = errors.Wrap(err, "storageprovidersvc: error reading from fd") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.ReadVersionResponse{Status: status} + if err = stream.Send(res); err != nil { + return errors.Wrap(err, "storageprovidersvc: error streaming read response") + } + return nil + } + } + + return nil + +} + +func (s *service) RestoreVersion(ctx context.Context, req *storageproviderv0alphapb.RestoreVersionRequest) (*storageproviderv0alphapb.RestoreVersionResponse, error) { + if err := s.storage.RestoreRevision(ctx, req.Filename, req.VersionKey); err != nil { + err = errors.Wrap(err, "storageprovidersvc: error restoring version") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.RestoreVersionResponse{Status: status} + return res, nil + } + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.RestoreVersionResponse{Status: status} + return res, nil +} + +func (s *service) ListRecycle(req *storageproviderv0alphapb.ListRecycleRequest, stream storageproviderv0alphapb.StorageProviderService_ListRecycleServer) error { + ctx := stream.Context() + fn := req.GetFilename() + + items, err := s.storage.ListRecycle(ctx, fn) + if err != nil { + err := errors.Wrap(err, "storageprovidersvc: error listing recycle") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.ListRecycleResponse{Status: status} + if err = stream.Send(res); err != nil { + return errors.Wrap(err, "storageprovidersvc: error streaming list recycle response") + } + } + + for _, item := range items { + recycleItem := &storageproviderv0alphapb.RecycleItem{ + Filename: item.RestorePath, + Key: item.RestoreKey, + Size: item.Size, + Deltime: item.DelMtime, + IsDir: item.IsDir, + } + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.ListRecycleResponse{ + Status: status, + RecycleItem: recycleItem, + } + + if err := stream.Send(res); err != nil { + return errors.Wrap(err, "storageprovidersvc: error streaming list recycle response") + } + } + + return nil +} + +func (s *service) RestoreRecycleItem(ctx context.Context, req *storageproviderv0alphapb.RestoreRecycleItemRequest) (*storageproviderv0alphapb.RestoreRecycleItemResponse, error) { + if err := s.storage.RestoreRecycleItem(ctx, req.Filename, req.RestoreKey); err != nil { + err = errors.Wrap(err, "storageprovidersvc: error restoring recycle item") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.RestoreRecycleItemResponse{Status: status} + return res, nil + } + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.RestoreRecycleItemResponse{Status: status} + return res, nil +} + +func (s *service) PurgeRecycle(ctx context.Context, req *storageproviderv0alphapb.PurgeRecycleRequest) (*storageproviderv0alphapb.PurgeRecycleResponse, error) { + if err := s.storage.EmptyRecycle(ctx, req.Filename); err != nil { + err = errors.Wrap(err, "storageprovidersvc: error purging recycle") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.PurgeRecycleResponse{Status: status} + return res, nil + } + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.PurgeRecycleResponse{Status: status} + return res, nil +} + +func (s *service) SetACL(ctx context.Context, req *storageproviderv0alphapb.SetACLRequest) (*storageproviderv0alphapb.SetACLResponse, error) { + fn := req.Filename + aclTarget := req.Acl.Target + aclMode := s.getPermissions(req.Acl.Mode) + aclType := s.getTargetType(req.Acl.Type) + + // check mode is valid + if aclMode == storage.ACLModeInvalid { + logger.Println(ctx, "acl mode is invalid") + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INVALID_ARGUMENT, Message: "acl mode is invalid"} + res := &storageproviderv0alphapb.SetACLResponse{Status: status} + return res, nil + } + + // check targetType is valid + if aclType == storage.ACLTypeInvalid { + logger.Println(ctx, "acl type is invalid") + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INVALID_ARGUMENT, Message: "acl type is invalid"} + res := &storageproviderv0alphapb.SetACLResponse{Status: status} + return res, nil + } + + acl := &storage.ACL{ + Target: aclTarget, + Mode: aclMode, + Type: aclType, + } + + err := s.storage.SetACL(ctx, fn, acl) + if err != nil { + err = errors.Wrap(err, "storageprovidersvc: error setting acl") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.SetACLResponse{Status: status} + return res, nil + } + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.SetACLResponse{Status: status} + return res, nil +} + +func (s *service) getTargetType(t storageproviderv0alphapb.ACLType) storage.ACLType { + switch t { + case storageproviderv0alphapb.ACLType_ACL_TYPE_USER: + return storage.ACLTypeUser + case storageproviderv0alphapb.ACLType_ACL_TYPE_GROUP: + return storage.ACLTypeGroup + default: + return storage.ACLTypeInvalid + } +} + +func (s *service) getPermissions(mode storageproviderv0alphapb.ACLMode) storage.ACLMode { + switch mode { + case storageproviderv0alphapb.ACLMode_ACL_MODE_READONLY: + return storage.ACLModeReadOnly + case storageproviderv0alphapb.ACLMode_ACL_MODE_READWRITE: + return storage.ACLModeReadWrite + default: + return storage.ACLModeInvalid + } +} + +func (s *service) UpdateACL(ctx context.Context, req *storageproviderv0alphapb.UpdateACLRequest) (*storageproviderv0alphapb.UpdateACLResponse, error) { + fn := req.Filename + target := req.Acl.Target + mode := s.getPermissions(req.Acl.Mode) + targetType := s.getTargetType(req.Acl.Type) + + // check mode is valid + if mode == storage.ACLModeInvalid { + logger.Println(ctx, "acl mode is invalid") + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INVALID_ARGUMENT, Message: "acl mode is invalid"} + res := &storageproviderv0alphapb.UpdateACLResponse{Status: status} + return res, nil + } + + // check targetType is valid + if targetType == storage.ACLTypeInvalid { + logger.Println(ctx, "acl type is invalid") + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INVALID_ARGUMENT, Message: "acl type is invalid"} + res := &storageproviderv0alphapb.UpdateACLResponse{Status: status} + return res, nil + } + + acl := &storage.ACL{ + Target: target, + Mode: mode, + Type: targetType, + } + + if err := s.storage.UpdateACL(ctx, fn, acl); err != nil { + err = errors.Wrap(err, "storageprovidersvc: error updating acl") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.UpdateACLResponse{Status: status} + return res, nil + } + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.UpdateACLResponse{Status: status} + return res, nil +} + +func (s *service) UnsetACL(ctx context.Context, req *storageproviderv0alphapb.UnsetACLRequest) (*storageproviderv0alphapb.UnsetACLResponse, error) { + fn := req.Filename + aclTarget := req.Acl.Target + aclType := s.getTargetType(req.Acl.Type) + + // check targetType is valid + if aclType == storage.ACLTypeInvalid { + logger.Println(ctx, "acl type is invalid") + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INVALID_ARGUMENT, Message: "acl type is invalid"} + res := &storageproviderv0alphapb.UnsetACLResponse{Status: status} + return res, nil + } + + acl := &storage.ACL{ + Target: aclTarget, + Type: aclType, + } + + if err := s.storage.UnsetACL(ctx, fn, acl); err != nil { + err = errors.Wrap(err, "storageprovidersvc: error unsetting acl") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.UnsetACLResponse{Status: status} + return res, nil + } + + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.UnsetACLResponse{Status: status} + return res, nil +} + +func (s *service) GetQuota(ctx context.Context, req *storageproviderv0alphapb.GetQuotaRequest) (*storageproviderv0alphapb.GetQuotaResponse, error) { + total, used, err := s.storage.GetQuota(ctx, req.Filename) + if err != nil { + err = errors.Wrap(err, "storageprovidersvc: error getting quota") + logger.Error(ctx, err) + status := &rpcpb.Status{Code: rpcpb.Code_CODE_INTERNAL} + res := &storageproviderv0alphapb.GetQuotaResponse{Status: status} + return res, nil + } + status := &rpcpb.Status{Code: rpcpb.Code_CODE_OK} + res := &storageproviderv0alphapb.GetQuotaResponse{Status: status, TotalBytes: uint64(total), UsedBytes: uint64(used)} + return res, nil +} + +func (s *service) splitFn(fsfn string) (string, string, error) { + tokens := strings.Split(fsfn, "/") + l := len(tokens) + if l == 0 { + return "", "", errors.New("fsfn is not id-based") + } + + fid := tokens[0] + if l > 1 { + return fid, path.Join(tokens[1:]...), nil + } + return fid, "", nil +} + +type fnCtx struct { + mountPrefix string + *derefCtx +} + +type derefCtx struct { + derefPath string + fid string + rootFidFn string +} + +func (s *service) deref(ctx context.Context, fsfn string) (*derefCtx, error) { + if strings.HasPrefix(fsfn, "/") { + return &derefCtx{derefPath: fsfn}, nil + } + + fid, right, err := s.splitFn(fsfn) + if err != nil { + return nil, err + } + // resolve fid to path in the fs + fnPointByID, err := s.storage.GetPathByID(ctx, fid) + if err != nil { + return nil, err + } + + derefPath := path.Join(fnPointByID, right) + return &derefCtx{derefPath: derefPath, fid: fid, rootFidFn: fnPointByID}, nil +} + +func (s *service) unwrap(ctx context.Context, fn string) (string, *fnCtx, error) { + mp, fsfn, err := s.trimMounPrefix(fn) + if err != nil { + return "", nil, err + } + + derefCtx, err := s.deref(ctx, fsfn) + if err != nil { + return "", nil, err + } + + fctx := &fnCtx{ + derefCtx: derefCtx, + mountPrefix: mp, + } + return fsfn, fctx, nil +} + +func (s *service) wrap(ctx context.Context, fsfn string, fctx *fnCtx) string { + if !strings.HasPrefix(fsfn, "/") { + fsfn = strings.TrimPrefix(fsfn, fctx.rootFidFn) + fsfn = path.Join(fctx.fid, fsfn) + fsfn = fctx.mountPrefix + ":" + fsfn + } else { + fsfn = path.Join(fctx.mountPrefix, fsfn) + } + + return fsfn +} + +func (s *service) trimMounPrefix(fn string) (string, string, error) { + mountID := s.mountID + ":" + if strings.HasPrefix(fn, s.mountPath) { + return s.mountPath, path.Join("/", strings.TrimPrefix(fn, s.mountPath)), nil + } + if strings.HasPrefix(fn, mountID) { + return mountID, strings.TrimPrefix(fn, mountID), nil + } + return "", "", errors.New("fn does not belong to this storage provider: " + fn) +} + +func parseConfig(m map[string]interface{}) (*config, error) { + c := &config{} + if err := mapstructure.Decode(m, c); err != nil { + return nil, err + } + return c, nil +} + +func getFS(c *config) (storage.FS, error) { + switch c.Driver { + case "local": + return local.New(c.Local) + case "": + return nil, fmt.Errorf("driver is empty") + default: + return nil, fmt.Errorf("driver not found: %s", c.Driver) + } +} + +type notFoundError interface { + IsNotFound() +} + +func toPerm(p *storage.Permissions) *storageproviderv0alphapb.Permissions { + return &storageproviderv0alphapb.Permissions{ + Read: p.Read, + Write: p.Write, + Share: p.Share, + } +} + +func (s *service) toMeta(md *storage.MD) *storageproviderv0alphapb.Metadata { + perm := toPerm(md.Permissions) + meta := &storageproviderv0alphapb.Metadata{ + Filename: md.Path, + Checksum: md.Checksum, + Etag: md.Etag, + Id: s.mountID + ":" + md.ID, + IsDir: md.IsDir, + Mime: md.Mime, + Mtime: md.Mtime, + Size: md.Size, + Permissions: perm, + } + + return meta +} diff --git a/services/httpsvc/handlers/handlers.go b/services/httpsvc/handlers/handlers.go new file mode 100644 index 0000000000..5ac8282f4b --- /dev/null +++ b/services/httpsvc/handlers/handlers.go @@ -0,0 +1 @@ +package handlers diff --git a/services/httpsvc/handlers/log.go b/services/httpsvc/handlers/log.go new file mode 100644 index 0000000000..451fc1c17b --- /dev/null +++ b/services/httpsvc/handlers/log.go @@ -0,0 +1,165 @@ +package handlers + +import ( + "bufio" + "fmt" + "net" + "net/http" + "net/url" + "time" + + "github.com/cernbox/reva/pkg/log" +) + +// LogHandler is a logging middleware +func LogHandler(l *log.Logger, h http.Handler) http.Handler { + return newLoggingHandler(l, h) +} + +func newLoggingHandler(l *log.Logger, h http.Handler) http.Handler { + return loggingHandler{l, h} +} + +type loggingHandler struct { + l *log.Logger + handler http.Handler +} + +func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + t := time.Now() + logger := makeLogger(w) + url := *req.URL + h.handler.ServeHTTP(logger, req) + writeLog(h.l, req, url, t, logger.Status(), logger.Size()) +} + +func makeLogger(w http.ResponseWriter) loggingResponseWriter { + var logger loggingResponseWriter = &responseLogger{w: w, status: http.StatusOK} + if _, ok := w.(http.Hijacker); ok { + logger = &hijackLogger{responseLogger{w: w, status: http.StatusOK}} + } + h, ok1 := logger.(http.Hijacker) + c, ok2 := w.(http.CloseNotifier) + if ok1 && ok2 { + return hijackCloseNotifier{logger, h, c} + } + if ok2 { + return &closeNotifyWriter{logger, c} + } + return logger +} + +func writeLog(l *log.Logger, req *http.Request, url url.URL, ts time.Time, status, size int) { + end := time.Now() + host, _, err := net.SplitHostPort(req.RemoteAddr) + + if err != nil { + host = req.RemoteAddr + } + + uri := req.RequestURI + + if req.ProtoMajor == 2 && req.Method == "CONNECT" { + uri = req.Host + } + if uri == "" { + uri = url.RequestURI() + } + + diff := end.Sub(ts).Nanoseconds() + + var b *log.Builder + if status >= 400 { + b = l.BuildError() + } else { + b = l.Build() + } + b.Str("host", host).Str("method", req.Method) + b = b.Str("uri", uri).Str("proto", req.Proto).Int("status", status) + b = b.Int("size", size) + b = b.Str("start", ts.Format("02/Jan/2006:15:04:05 -0700")) + b = b.Str("end", end.Format("02/Jan/2006:15:04:05 -0700")).Int("time_ns", int(diff)) + b.Msg(req.Context(), "HTTP request finished") +} + +type loggingResponseWriter interface { + commonLoggingResponseWriter + http.Pusher +} + +func (l *responseLogger) Push(target string, opts *http.PushOptions) error { + p, ok := l.w.(http.Pusher) + if !ok { + return fmt.Errorf("responseLogger does not implement http.Pusher") + } + return p.Push(target, opts) +} + +type commonLoggingResponseWriter interface { + http.ResponseWriter + http.Flusher + Status() int + Size() int +} + +// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP +// status code and body size +type responseLogger struct { + w http.ResponseWriter + status int + size int +} + +func (l *responseLogger) Header() http.Header { + return l.w.Header() +} + +func (l *responseLogger) Write(b []byte) (int, error) { + size, err := l.w.Write(b) + l.size += size + return size, err +} + +func (l *responseLogger) WriteHeader(s int) { + l.w.WriteHeader(s) + l.status = s +} + +func (l *responseLogger) Status() int { + return l.status +} + +func (l *responseLogger) Size() int { + return l.size +} + +func (l *responseLogger) Flush() { + f, ok := l.w.(http.Flusher) + if ok { + f.Flush() + } +} + +type hijackLogger struct { + responseLogger +} + +func (l *hijackLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) { + h := l.responseLogger.w.(http.Hijacker) + conn, rw, err := h.Hijack() + if err == nil && l.responseLogger.status == 0 { + l.responseLogger.status = http.StatusSwitchingProtocols + } + return conn, rw, err +} + +type closeNotifyWriter struct { + loggingResponseWriter + http.CloseNotifier +} + +type hijackCloseNotifier struct { + loggingResponseWriter + http.Hijacker + http.CloseNotifier +} diff --git a/services/httpsvc/handlers/trace.go b/services/httpsvc/handlers/trace.go new file mode 100644 index 0000000000..53bafb3a71 --- /dev/null +++ b/services/httpsvc/handlers/trace.go @@ -0,0 +1,39 @@ +package handlers + +import ( + "context" + "net/http" + + "github.com/gofrs/uuid" + "google.golang.org/grpc/metadata" +) + +// TraceHandler is a middlware that checks if there is a trace provided +// as X-Trace header or generates one on the fly +// then the trace is stored in the context +func TraceHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var trace string + val, ok := ctx.Value("trace").(string) + if ok && val != "" { + trace = val + } else { + // try to get it from header + trace = r.Header.Get("x-trace") + if trace == "" { + trace = genTrace() + } + } + + ctx = context.WithValue(ctx, "trace", trace) + header := metadata.New(map[string]string{"x-trace": trace}) + ctx = metadata.NewOutgoingContext(ctx, header) + r = r.WithContext(ctx) + h.ServeHTTP(w, r) + }) +} + +func genTrace() string { + return uuid.Must(uuid.NewV4()).String() +} diff --git a/services/httpsvc/httpsvc.go b/services/httpsvc/httpsvc.go new file mode 100644 index 0000000000..030f8dc351 --- /dev/null +++ b/services/httpsvc/httpsvc.go @@ -0,0 +1,25 @@ +package httpsvc + +import ( + "net/http" + "path" + "strings" +) + +// Service represents a HTTP service. +type Service interface { + Handler() http.Handler + Prefix() string +} + +// ShiftPath splits off the first component of p, which will be cleaned of +// relative components before processing. head will never contain a slash and +// tail will always be a rooted path without trailing slash. +func ShiftPath(p string) (head, tail string) { + p = path.Clean("/" + p) + i := strings.Index(p[1:], "/") + 1 + if i <= 0 { + return p[1:], "/" + } + return p[1:i], p[i:] +} diff --git a/services/httpsvc/iframeuisvc/iframeuisvc.go b/services/httpsvc/iframeuisvc/iframeuisvc.go new file mode 100644 index 0000000000..1a1292028e --- /dev/null +++ b/services/httpsvc/iframeuisvc/iframeuisvc.go @@ -0,0 +1,64 @@ +package iframeuisvc + +import ( + "net/http" + + "github.com/cernbox/reva/services/httpsvc" + "github.com/mitchellh/mapstructure" +) + +type config struct { + Prefix string `mapstructure:"prefix"` +} + +type svc struct { + prefix string + handler http.Handler +} + +// New returns a new webuisvc +func New(m map[string]interface{}) (httpsvc.Service, error) { + conf := &config{} + if err := mapstructure.Decode(m, conf); err != nil { + return nil, err + } + + return &svc{prefix: conf.Prefix, handler: getHandler()}, nil +} + +func (s *svc) Prefix() string { + return s.prefix +} + +func (s *svc) Handler() http.Handler { + return s.handler +} + +func getHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var head string + head, r.URL.Path = httpsvc.ShiftPath(r.URL.Path) + if head == "open" { + doOpen(w, r) + return + } + }) +} + +func doOpen(w http.ResponseWriter, r *http.Request) { + html := ` + + +
+ +