Skip to content

Commit

Permalink
initdata: Merge branch 'main' into e2e
Browse files Browse the repository at this point in the history
- Merge branch 'main' into e2e
  • Loading branch information
Qi Feng Huo committed Aug 20, 2024
2 parents 67d7c3f + 3c2c82e commit 1b3dc78
Show file tree
Hide file tree
Showing 5 changed files with 92 additions and 44 deletions.
17 changes: 6 additions & 11 deletions src/cloud-api-adaptor/docs/SecureComms.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ See [Secure Comms Architecture Slides](./SecureComms.pdf) for more details.
### Deploy CAA
Use any of the option for installing CAA depending on the cloud driver used.

### Deploy KBS-Operator
Deploy KBS-Operator by following instructions at [KBS Operator Getting Started](https://github.com/confidential-containers/kbs-operator?tab=readme-ov-file#getting-started).
### Deploy Trustee-Operator
Deploy Trustee-Operator by following instructions at [trustee Operator Getting Started](https://github.com/confidential-containers/trustee-operator?tab=readme-ov-file#getting-started).

Make sure to uncomment the secret generation as recommended for both public and private key (`kbs-auth-public-key` and `kbs-client` secrets).

Expand All @@ -46,15 +46,10 @@ kubectl get secret kbs-client -n kbs-operator-system -o json|jq --arg ns "confid
For a testing environment, you may need to change the policy of the KBS and AS using the KBS Client to allow all or fit your own policy. One way to do that is:

```sh
kubectl -n kbs-operator-system exec deployment/kbs-deployment --container as -it -- /bin/bash
apt update
apt install vim
vim /opt/confidential-containers/attestation-service/opa/default.rego // replace to `default allow = true`

kubectl -n kbs-operator-system exec deployment/kbs-deployment --container kbs -it -- /bin/bash
apt update
apt install vim
vim /opa/confidential-containers/kbs/policy.rego // replace to `default allow = true`
kubectl -n kbs-operator-system exec deployment/trustee-deployment --container as -it -- /bin/bash
sed -i.bak 's/^default allow = false/default allow = true/' /opt/confidential-containers/attestation-service/opa/default.rego

kubectl -n kbs-operator-system get cm resource-policy -o yaml | sed "s/default allow = false/default allow = true/"|kubectl apply -f -
```

### Build a podvm that enforces Secure-Comms
Expand Down
24 changes: 11 additions & 13 deletions src/cloud-api-adaptor/pkg/adaptor/cloud/cloud.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ const (
var logger = log.New(log.Writer(), "[adaptor/cloud] ", log.LstdFlags|log.Lmsgprefix)

func (s *cloudService) addSandbox(sid sandboxID, sandbox *sandbox) error {

s.mutex.Lock()
defer s.mutex.Unlock()

Expand All @@ -57,7 +56,6 @@ func (s *cloudService) addSandbox(sid sandboxID, sandbox *sandbox) error {
}

func (s *cloudService) getSandbox(sid sandboxID) (*sandbox, error) {

if sid == "" {
return nil, errors.New("empty sandbox id")
}
Expand Down Expand Up @@ -122,7 +120,6 @@ func (s *cloudService) ConfigVerifier() error {
}

func (s *cloudService) setInstance(sid sandboxID, instanceID, instanceName string) error {

s.mutex.Lock()
defer s.mutex.Unlock()

Expand All @@ -140,7 +137,6 @@ func (s *cloudService) setInstance(sid sandboxID, instanceID, instanceName strin
}

func (s *cloudService) GetInstanceID(ctx context.Context, podNamespace, podName string, wait bool) (string, error) {

s.mutex.Lock()
defer s.mutex.Unlock()

Expand Down Expand Up @@ -170,7 +166,6 @@ func (s *cloudService) Version(ctx context.Context, req *pb.VersionRequest) (*pb
}

func (s *cloudService) CreateVM(ctx context.Context, req *pb.CreateVMRequest) (res *pb.CreateVMResponse, err error) {

defer func() {
if err != nil {
logger.Print(err)
Expand Down Expand Up @@ -267,7 +262,7 @@ func (s *cloudService) CreateVM(ctx context.Context, req *pb.CreateVMRequest) (r

// Store daemon.json in worker node for debugging
daemonJSONPath := filepath.Join(podDir, "daemon.json")
if err := os.WriteFile(daemonJSONPath, daemonJSON, 0666); err != nil {
if err := os.WriteFile(daemonJSONPath, daemonJSON, 0o666); err != nil {
return nil, fmt.Errorf("storing %s: %w", daemonJSONPath, err)
}
logger.Printf("stored %s", daemonJSONPath)
Expand Down Expand Up @@ -332,10 +327,9 @@ func (s *cloudService) CreateVM(ctx context.Context, req *pb.CreateVMRequest) (r
}

func (s *cloudService) StartVM(ctx context.Context, req *pb.StartVMRequest) (res *pb.StartVMResponse, err error) {

defer func() {
if err != nil {
logger.Print(err)
logger.Printf("error starting instance: %v", err)
}
}()

Expand All @@ -353,7 +347,7 @@ func (s *cloudService) StartVM(ctx context.Context, req *pb.StartVMRequest) (res

if s.ppService != nil {
if err := s.ppService.OwnPeerPod(sandbox.podName, sandbox.podNamespace, instance.ID); err != nil {
logger.Printf("failed to create PeerPod: %s", err.Error())
logger.Printf("failed to create PeerPod: %v", err)
}
}

Expand All @@ -373,7 +367,7 @@ func (s *cloudService) StartVM(ctx context.Context, req *pb.StartVMRequest) (res
}

if err := ci.Start(); err != nil {
return nil, fmt.Errorf("failed SshClientInstance.Start: %s", err)
return nil, fmt.Errorf("failed SshClientInstance.Start: %w", err)
}

// Set agentProxy
Expand Down Expand Up @@ -406,19 +400,23 @@ func (s *cloudService) StartVM(ctx context.Context, req *pb.StartVMRequest) (res

select {
case <-ctx.Done():
_ = sandbox.agentProxy.Shutdown()
// Start VM operation interrupted (calling context canceled)
logger.Printf("Error: start instance interrupted (%v). Cleaning up...", ctx.Err())
if err := sandbox.agentProxy.Shutdown(); err != nil {
logger.Printf("stopping agent proxy: %v", err)
}
return nil, ctx.Err()
case err := <-errCh:
return nil, err
case <-sandbox.agentProxy.Ready():
}

logger.Printf("agent proxy is ready")
logger.Print("agent proxy is ready")

return &pb.StartVMResponse{}, nil
}

func (s *cloudService) StopVM(ctx context.Context, req *pb.StopVMRequest) (*pb.StopVMResponse, error) {

sid := sandboxID(req.Id)

sandbox, err := s.getSandbox(sid)
Expand Down
13 changes: 5 additions & 8 deletions src/cloud-api-adaptor/pkg/adaptor/proxy/proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ type agentProxy struct {
}

func NewAgentProxy(serverName, socketPath, pauseImage string, tlsConfig *tlsutil.TLSConfig, caService tlsutil.CAService, proxyTimeout time.Duration) AgentProxy {

return &agentProxy{
serverName: serverName,
socketPath: socketPath,
Expand All @@ -68,7 +67,6 @@ func NewAgentProxy(serverName, socketPath, pauseImage string, tlsConfig *tlsutil
}

func (p *agentProxy) dial(ctx context.Context, address string) (net.Conn, error) {

var conn net.Conn

var dialer interface {
Expand Down Expand Up @@ -109,14 +107,15 @@ func (p *agentProxy) dial(ctx context.Context, address string) (net.Conn, error)
err := retry.Do(
func() error {
var err error
conn, err = dialer.DialContext(ctx, "tcp", address)
if conn, err = dialer.DialContext(ctx, "tcp", address); err != nil {
logger.Printf("Retrying agent proxy connection to %s...", address)
}
return err
},
retry.Attempts(0),
retry.Context(ctx),
retry.MaxDelay(5*time.Second),
)

if err != nil {
err = fmt.Errorf("failed to establish agent proxy connection to %s: %w", address, err)
logger.Print(err)
Expand All @@ -128,15 +127,14 @@ func (p *agentProxy) dial(ctx context.Context, address string) (net.Conn, error)
}

func (p *agentProxy) Start(ctx context.Context, serverURL *url.URL) error {

if err := os.MkdirAll(filepath.Dir(p.socketPath), os.ModePerm); err != nil {
return fmt.Errorf("failed to create parent directories for socket: %s", p.socketPath)
}
if err := os.Remove(p.socketPath); err != nil && !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("failed to remove %s: %w", p.socketPath, err)
}

logger.Printf("Listening on %s\n", p.socketPath)
logger.Printf("Listening on %s", p.socketPath)

listener, err := net.Listen("unix", p.socketPath)
if err != nil {
Expand Down Expand Up @@ -203,7 +201,7 @@ func (p *agentProxy) Ready() chan struct{} {
}

func (p *agentProxy) Shutdown() error {
logger.Printf("shutting down socket forwarder")
logger.Print("shutting down socket forwarder")
p.stopOnce.Do(func() {
close(p.stopCh)
})
Expand All @@ -215,7 +213,6 @@ func (p *agentProxy) CAService() tlsutil.CAService {
}

func (p *agentProxy) ClientCA() (certPEM []byte) {

if p.tlsConfig == nil {
return nil
}
Expand Down
33 changes: 24 additions & 9 deletions src/cloud-providers/ibmcloud/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,9 @@ func NewProvider(config *Config) (provider.Provider, error) {

// If this label exists assume we are in an IKS cluster
primarySubnetID, iks := nodeLabels["ibm-provider.kubernetes.io/subnet-id"]
if !iks {
primarySubnetID, iks = nodeLabels["ibm-cloud.kubernetes.io/subnet-id"]
}
if iks {
if config.ZoneName == "" {
config.ZoneName = nodeLabels["topology.kubernetes.io/zone"]
Expand Down Expand Up @@ -249,7 +252,7 @@ func (p *ibmcloudVPCProvider) CreateInstance(ctx context.Context, podName, sandb
return nil, err
}

imageID, err := p.selectImage(ctx, spec)
imageID, err := p.selectImage(ctx, spec, instanceProfile)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -321,11 +324,11 @@ func (p *ibmcloudVPCProvider) updateInstanceProfileSpecList() error {

// Iterate over the instance types and populate the instanceProfileSpecList
for _, profileType := range instanceProfiles {
vcpus, memory, err := p.getProfileNameInformation(profileType)
vcpus, memory, arch, err := p.getProfileNameInformation(profileType)
if err != nil {
return err
}
instanceProfileSpecList = append(instanceProfileSpecList, provider.InstanceTypeSpec{InstanceType: profileType, VCPUs: vcpus, Memory: memory})
instanceProfileSpecList = append(instanceProfileSpecList, provider.InstanceTypeSpec{InstanceType: profileType, VCPUs: vcpus, Memory: memory, Arch: arch})
}

// Sort the instanceProfileSpecList by Memory and update the serviceConfig
Expand All @@ -334,8 +337,8 @@ func (p *ibmcloudVPCProvider) updateInstanceProfileSpecList() error {
return nil
}

// Add a method to retrieve cpu, memory, and storage from the profile name
func (p *ibmcloudVPCProvider) getProfileNameInformation(profileName string) (vcpu int64, memory int64, err error) {
// Add a method to retrieve cpu, memory, and arch from the profile name
func (p *ibmcloudVPCProvider) getProfileNameInformation(profileName string) (vcpu int64, memory int64, arch string, err error) {

// Get the profile information from the instance type using IBMCloud API
result, details, err := p.vpc.GetInstanceProfileWithContext(context.Background(),
Expand All @@ -345,19 +348,31 @@ func (p *ibmcloudVPCProvider) getProfileNameInformation(profileName string) (vcp
)

if err != nil {
return 0, 0, fmt.Errorf("instance profile name %s not found, due to %w\nFurther Details:\n%v", profileName, err, details)
return 0, 0, "", fmt.Errorf("instance profile name %s not found, due to %w\nFurther Details:\n%v", profileName, err, details)
}

vcpu = int64(*result.VcpuCount.(*vpcv1.InstanceProfileVcpu).Value)
// Value returned is in GiB, convert to MiB
memory = int64(*result.Memory.(*vpcv1.InstanceProfileMemory).Value) * 1024
return vcpu, memory, nil
arch = string(*result.VcpuArchitecture.Value)
return vcpu, memory, arch, nil
}

// Select Image from list, invalid image IDs should have already been removed
func (p *ibmcloudVPCProvider) selectImage(ctx context.Context, spec provider.InstanceTypeSpec) (string, error) {
func (p *ibmcloudVPCProvider) selectImage(ctx context.Context, spec provider.InstanceTypeSpec, selectedInstanceProfile string) (string, error) {

specArch := spec.Arch
if specArch == "" {
for _, instanceProfileSpec := range p.serviceConfig.InstanceProfileSpecList {
if instanceProfileSpec.InstanceType == selectedInstanceProfile {
specArch = instanceProfileSpec.Arch
break
}
}
}

for _, image := range p.serviceConfig.Images {
if spec.Arch != "" && image.Arch != spec.Arch {
if specArch != "" && image.Arch != specArch {
continue
}
logger.Printf("selected image with ID <%s> out of %d images", image.ID, len(p.serviceConfig.Images))
Expand Down
Loading

0 comments on commit 1b3dc78

Please sign in to comment.