Skip to content

Commit

Permalink
Use github.com/samber/lo.ToPtr everywhere
Browse files Browse the repository at this point in the history
There's a lot of places where we have ad-hoc ways to construct pointers
to thigns - either via temporary variables, referencing the first
element in a slice, or by a local function to produce pointers.

We can replace those with lo.ToPtr (like what the control plane already
uses) to help with readability and consistency.

Originally discussed here:
github.com//pull/895#discussion_r1576977602
  • Loading branch information
sharnoff committed May 24, 2024
1 parent a82110b commit ba05c42
Show file tree
Hide file tree
Showing 8 changed files with 58 additions and 54 deletions.
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ require (
github.com/onsi/gomega v1.24.2
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/prometheus/client_golang v1.14.0
github.com/samber/lo v1.39.0
github.com/stretchr/testify v1.8.1
github.com/tychoish/fun v0.8.5
github.com/vishvananda/netlink v1.1.1-0.20220125195016-0639e7e787ba
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -484,6 +484,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA=
github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
Expand Down
4 changes: 3 additions & 1 deletion neonvm/apis/neonvm/v1/virtualmachine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ import (
"errors"
"fmt"

"github.com/samber/lo"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -242,7 +244,7 @@ func (s *GuestSettings) GetSwapInfo() (*SwapInfo, error) {
SkipSwapon: nil,
}, nil
} else if s.SwapInfo != nil {
return &[]SwapInfo{*s.SwapInfo}[0], nil
return lo.ToPtr(*s.SwapInfo), nil
}

return nil, nil
Expand Down
23 changes: 12 additions & 11 deletions neonvm/controllers/vm_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
"time"

nadapiv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
"github.com/samber/lo"
"golang.org/x/crypto/ssh"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -1124,7 +1125,7 @@ func sshSecretSpec(vm *vmv1.VirtualMachine) (*corev1.Secret, error) {
Name: vm.Status.SSHSecretName,
Namespace: vm.Namespace,
},
Immutable: &[]bool{true}[0],
Immutable: lo.ToPtr(true),
Type: corev1.SecretTypeSSHAuth,
Data: map[string][]byte{
"ssh-publickey": publicKey,
Expand Down Expand Up @@ -1312,7 +1313,7 @@ func podSpec(vm *vmv1.VirtualMachine, sshSecret *corev1.Secret, config *Reconcil
},
Spec: corev1.PodSpec{
EnableServiceLinks: vm.Spec.ServiceLinks,
AutomountServiceAccountToken: &[]bool{false}[0],
AutomountServiceAccountToken: lo.ToPtr(false),
RestartPolicy: corev1.RestartPolicyNever,
TerminationGracePeriodSeconds: vm.Spec.TerminationGracePeriodSeconds,
NodeSelector: vm.Spec.NodeSelector,
Expand All @@ -1338,7 +1339,7 @@ func podSpec(vm *vmv1.VirtualMachine, sshSecret *corev1.Secret, config *Reconcil
"sysctl -w net.ipv4.ip_forward=1",
},
SecurityContext: &corev1.SecurityContext{
Privileged: &[]bool{true}[0],
Privileged: lo.ToPtr(true),
},
},
},
Expand All @@ -1351,7 +1352,7 @@ func podSpec(vm *vmv1.VirtualMachine, sshSecret *corev1.Secret, config *Reconcil
// Ensure restrictive context for the container
// More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
SecurityContext: &corev1.SecurityContext{
Privileged: &[]bool{false}[0],
Privileged: lo.ToPtr(false),
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{
"NET_ADMIN",
Expand Down Expand Up @@ -1403,7 +1404,7 @@ func podSpec(vm *vmv1.VirtualMachine, sshSecret *corev1.Secret, config *Reconcil
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode corresponds to "private" in Linux terminology.
MountPropagation: &[]corev1.MountPropagationMode{corev1.MountPropagationNone}[0],
MountPropagation: lo.ToPtr(corev1.MountPropagationNone),
}

if config.UseContainerMgr {
Expand Down Expand Up @@ -1485,7 +1486,7 @@ func podSpec(vm *vmv1.VirtualMachine, sshSecret *corev1.Secret, config *Reconcil
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/sys/fs/cgroup",
Type: &[]corev1.HostPathType{corev1.HostPathDirectory}[0],
Type: lo.ToPtr(corev1.HostPathDirectory),
},
},
}
Expand All @@ -1494,7 +1495,7 @@ func podSpec(vm *vmv1.VirtualMachine, sshSecret *corev1.Secret, config *Reconcil
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: config.criEndpointSocketPath(),
Type: &[]corev1.HostPathType{corev1.HostPathSocket}[0],
Type: lo.ToPtr(corev1.HostPathSocket),
},
},
}
Expand Down Expand Up @@ -1529,7 +1530,7 @@ func podSpec(vm *vmv1.VirtualMachine, sshSecret *corev1.Secret, config *Reconcil
{
Key: "ssh-privatekey",
Path: "id_ed25519",
Mode: &[]int32{0600}[0],
Mode: lo.ToPtr[int32](0600),
},
},
},
Expand All @@ -1544,7 +1545,7 @@ func podSpec(vm *vmv1.VirtualMachine, sshSecret *corev1.Secret, config *Reconcil
{
Key: "ssh-publickey",
Path: "authorized_keys",
Mode: &[]int32{0644}[0],
Mode: lo.ToPtr[int32](0644),
},
},
},
Expand Down Expand Up @@ -1575,8 +1576,8 @@ func podSpec(vm *vmv1.VirtualMachine, sshSecret *corev1.Secret, config *Reconcil
}},
SecurityContext: &corev1.SecurityContext{
// uid=36(qemu) gid=34(kvm) groups=34(kvm)
RunAsUser: &[]int64{36}[0],
RunAsGroup: &[]int64{34}[0],
RunAsUser: lo.ToPtr[int64](36),
RunAsGroup: lo.ToPtr[int64](34),
},
})
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/agent/core/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"strings"
"time"

"github.com/samber/lo"
"go.uber.org/zap"

"github.com/neondatabase/autoscaling/pkg/api"
Expand Down Expand Up @@ -403,8 +404,7 @@ func (s *state) calculatePluginAction(
// convert maybe-nil '*Metrics' to maybe-nil '*core.Metrics'
Metrics: func() *api.Metrics {
if s.Metrics != nil {
m := s.Metrics.ToAPI()
return &m
return lo.ToPtr(s.Metrics.ToAPI())
} else {
return nil
}
Expand Down
69 changes: 33 additions & 36 deletions pkg/agent/core/state_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"testing"
"time"

"github.com/samber/lo"
"go.uber.org/zap"
"golang.org/x/exp/slices"

Expand Down Expand Up @@ -231,10 +232,6 @@ func duration(s string) time.Duration {
return d
}

func ptr[T any](t T) *T {
return &t
}

// Thorough checks of a relatively simple flow - scaling from 1 CU to 2 CU and back down.
func TestBasicScaleUpAndDownFlow(t *testing.T) {
a := helpers.NewAssert(t)
Expand Down Expand Up @@ -273,9 +270,9 @@ func TestBasicScaleUpAndDownFlow(t *testing.T) {
// scale-up would be a good idea, we should be contacting the scheduler to get approval.
a.Call(nextActions).Equals(core.ActionSet{
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(1)),
LastPermit: lo.ToPtr(resForCU(1)),
Target: resForCU(2),
Metrics: ptr(lastMetrics.ToAPI()),
Metrics: lo.ToPtr(lastMetrics.ToAPI()),
},
})
// start the request:
Expand Down Expand Up @@ -380,9 +377,9 @@ func TestBasicScaleUpAndDownFlow(t *testing.T) {
// Request to NeonVM completed, it's time to inform the scheduler plugin:
a.Call(nextActions).Equals(core.ActionSet{
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(2)),
LastPermit: lo.ToPtr(resForCU(2)),
Target: resForCU(1),
Metrics: ptr(lastMetrics.ToAPI()),
Metrics: lo.ToPtr(lastMetrics.ToAPI()),
},
// shouldn't have anything to say to the other components
})
Expand Down Expand Up @@ -429,7 +426,7 @@ func TestPeriodicPluginRequest(t *testing.T) {
reqEvery := DefaultInitialStateConfig.Core.PluginRequestTick
endTime := duration("20s")

doInitialPluginRequest(a, state, clock, clockTick, ptr(metrics.ToAPI()), resources)
doInitialPluginRequest(a, state, clock, clockTick, lo.ToPtr(metrics.ToAPI()), resources)

for clock.Elapsed().Duration < endTime {
timeSinceScheduledRequest := (clock.Elapsed().Duration - base) % reqEvery
Expand All @@ -445,7 +442,7 @@ func TestPeriodicPluginRequest(t *testing.T) {
PluginRequest: &core.ActionPluginRequest{
LastPermit: &resources,
Target: resources,
Metrics: ptr(metrics.ToAPI()),
Metrics: lo.ToPtr(metrics.ToAPI()),
},
})
a.Do(state.Plugin().StartingRequest, clock.Now(), resources)
Expand Down Expand Up @@ -597,9 +594,9 @@ func TestDeniedDownscalingIncreaseAndRetry(t *testing.T) {
a.Call(nextActions).Equals(core.ActionSet{
Wait: &core.ActionWait{Duration: duration("3.9s")},
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(6)),
LastPermit: lo.ToPtr(resForCU(6)),
Target: resForCU(3),
Metrics: ptr(metrics.ToAPI()),
Metrics: lo.ToPtr(metrics.ToAPI()),
},
})
a.Do(state.Plugin().StartingRequest, clock.Now(), resForCU(3))
Expand Down Expand Up @@ -640,9 +637,9 @@ func TestDeniedDownscalingIncreaseAndRetry(t *testing.T) {
a.Call(nextActions).Equals(core.ActionSet{
Wait: &core.ActionWait{Duration: duration("1s")}, // still want to retry vm-monitor downscaling
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(3)),
LastPermit: lo.ToPtr(resForCU(3)),
Target: resForCU(3),
Metrics: ptr(metrics.ToAPI()),
Metrics: lo.ToPtr(metrics.ToAPI()),
},
})
a.Do(state.Plugin().StartingRequest, clock.Now(), resForCU(3))
Expand Down Expand Up @@ -706,9 +703,9 @@ func TestDeniedDownscalingIncreaseAndRetry(t *testing.T) {
// Successfully downscaled, so now we should inform the plugin. Not waiting on any retries.
a.Call(nextActions).Equals(core.ActionSet{
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(3)),
LastPermit: lo.ToPtr(resForCU(3)),
Target: resForCU(1),
Metrics: ptr(metrics.ToAPI()),
Metrics: lo.ToPtr(metrics.ToAPI()),
},
})
a.Do(state.Plugin().StartingRequest, clock.Now(), resForCU(1))
Expand Down Expand Up @@ -771,9 +768,9 @@ func TestRequestedUpscale(t *testing.T) {
a.Call(nextActions).Equals(core.ActionSet{
Wait: &core.ActionWait{Duration: duration("6s")}, // if nothing else happens, requested upscale expires.
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(1)),
LastPermit: lo.ToPtr(resForCU(1)),
Target: resForCU(2),
Metrics: ptr(lastMetrics.ToAPI()),
Metrics: lo.ToPtr(lastMetrics.ToAPI()),
},
})
a.Do(state.Plugin().StartingRequest, clock.Now(), resForCU(2))
Expand Down Expand Up @@ -822,9 +819,9 @@ func TestRequestedUpscale(t *testing.T) {
a.Call(nextActions).Equals(core.ActionSet{
Wait: &core.ActionWait{Duration: duration("1s")},
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(2)),
LastPermit: lo.ToPtr(resForCU(2)),
Target: resForCU(2),
Metrics: ptr(lastMetrics.ToAPI()),
Metrics: lo.ToPtr(lastMetrics.ToAPI()),
},
})
a.Do(state.Plugin().StartingRequest, clock.Now(), resForCU(2))
Expand Down Expand Up @@ -963,9 +960,9 @@ func TestDownscalePivotBack(t *testing.T) {
t.Log(" > start plugin downscale")
a.Call(nextActions).Equals(core.ActionSet{
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(2)),
LastPermit: lo.ToPtr(resForCU(2)),
Target: resForCU(1),
Metrics: ptr(initialMetrics.ToAPI()),
Metrics: lo.ToPtr(initialMetrics.ToAPI()),
},
})
a.Do(state.Plugin().StartingRequest, clock.Now(), resForCU(1))
Expand All @@ -983,9 +980,9 @@ func TestDownscalePivotBack(t *testing.T) {
t.Log(" > start plugin upscale")
a.Call(nextActions).Equals(core.ActionSet{
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(1)),
LastPermit: lo.ToPtr(resForCU(1)),
Target: resForCU(2),
Metrics: ptr(newMetrics.ToAPI()),
Metrics: lo.ToPtr(newMetrics.ToAPI()),
},
})
a.Do(state.Plugin().StartingRequest, clock.Now(), resForCU(2))
Expand Down Expand Up @@ -1122,9 +1119,9 @@ func TestBoundsChangeRequiresDownsale(t *testing.T) {
// Do plugin request for that downscaling:
a.Call(nextActions).Equals(core.ActionSet{
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(2)),
LastPermit: lo.ToPtr(resForCU(2)),
Target: resForCU(1),
Metrics: ptr(metrics.ToAPI()),
Metrics: lo.ToPtr(metrics.ToAPI()),
},
})
a.Do(state.Plugin().StartingRequest, clock.Now(), resForCU(1))
Expand Down Expand Up @@ -1192,9 +1189,9 @@ func TestBoundsChangeRequiresUpscale(t *testing.T) {
// We should be making a plugin request to get upscaling:
a.Call(nextActions).Equals(core.ActionSet{
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(2)),
LastPermit: lo.ToPtr(resForCU(2)),
Target: resForCU(3),
Metrics: ptr(metrics.ToAPI()),
Metrics: lo.ToPtr(metrics.ToAPI()),
},
})
a.Do(state.Plugin().StartingRequest, clock.Now(), resForCU(3))
Expand Down Expand Up @@ -1271,9 +1268,9 @@ func TestFailedRequestRetry(t *testing.T) {
// We should be asking the scheduler for upscaling
a.Call(nextActions).Equals(core.ActionSet{
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(1)),
LastPermit: lo.ToPtr(resForCU(1)),
Target: resForCU(2),
Metrics: ptr(metrics.ToAPI()),
Metrics: lo.ToPtr(metrics.ToAPI()),
},
})
a.Do(state.Plugin().StartingRequest, clock.Now(), resForCU(2))
Expand All @@ -1290,9 +1287,9 @@ func TestFailedRequestRetry(t *testing.T) {
// ... and then retry:
a.Call(nextActions).Equals(core.ActionSet{
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(1)),
LastPermit: lo.ToPtr(resForCU(1)),
Target: resForCU(2),
Metrics: ptr(metrics.ToAPI()),
Metrics: lo.ToPtr(metrics.ToAPI()),
},
})
a.Do(state.Plugin().StartingRequest, clock.Now(), resForCU(2))
Expand Down Expand Up @@ -1460,9 +1457,9 @@ func TestMetricsConcurrentUpdatedDuringDownscale(t *testing.T) {
// incorrectly for 1 CU, rather than 2 CU. So, the rest of this test case is mostly just
// rounding out the rest of the scale-down routine.
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(3)),
LastPermit: lo.ToPtr(resForCU(3)),
Target: resForCU(2),
Metrics: ptr(metrics.ToAPI()),
Metrics: lo.ToPtr(metrics.ToAPI()),
},
NeonVMRequest: &core.ActionNeonVMRequest{
Current: resForCU(2),
Expand Down Expand Up @@ -1490,9 +1487,9 @@ func TestMetricsConcurrentUpdatedDuringDownscale(t *testing.T) {
a.Do(state.NeonVM().RequestSuccessful, clock.Now())
a.Call(nextActions).Equals(core.ActionSet{
PluginRequest: &core.ActionPluginRequest{
LastPermit: ptr(resForCU(2)),
LastPermit: lo.ToPtr(resForCU(2)),
Target: resForCU(1),
Metrics: ptr(metrics.ToAPI()),
Metrics: lo.ToPtr(metrics.ToAPI()),
},
})
a.Do(state.Plugin().StartingRequest, clock.Now(), resForCU(1))
Expand Down
3 changes: 2 additions & 1 deletion pkg/agent/core/testhelpers/assert.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"reflect"
"testing"

"github.com/samber/lo"
"github.com/stretchr/testify/assert"
)

Expand All @@ -26,7 +27,7 @@ func NewAssert(t *testing.T) Assert {
return Assert{
t: t,
storedWarnings: &[]string{},
waitingOnPreparedCall: &[]bool{false}[0], // take address of false
waitingOnPreparedCall: lo.ToPtr(false),
tinfo: transactionInfo{
expectedWarnings: []string{},
},
Expand Down
Loading

0 comments on commit ba05c42

Please sign in to comment.