Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

migrate to github actions #197

Merged
merged 1 commit into from
Dec 15, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
name: Build

on:
push:
branches: [ master ]
pull_request:
branches: [ master ]

jobs:

build:
name: Build
runs-on: ubuntu-latest
steps:

- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: ^1.14
id: go

- name: Check out code into the Go module directory
uses: actions/checkout@v2

- name: Build
run: make build

- name: Test
run: make test

- name: Test vet
run: make test-vet

- name: Docker
run: make docker
53 changes: 53 additions & 0 deletions .github/workflows/docker-publish.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
name: Docker

on:
push:
# Publish `master` as Docker `it's short sha commit id` image.
branches:
- master

# Publish `v1.2.3` tags as releases.
tags:
- v*

env:
IMAGE_NAME: escalator

jobs:
push:
runs-on: ubuntu-latest
if: github.event_name == 'push'

steps:
- uses: actions/checkout@v2

- name: Set short sha variable
id: vars
run: echo "::set-output name=sha_short::$(git rev-parse --short HEAD)"

- name: Build image
run: docker build . --file Dockerfile --tag $IMAGE_NAME

- name: Log into GitHub Container Registry
# a PAT with `read:packages` and `write:packages` scopes is an Actions secret `CR_PAT`.
# Doesn't support Org or Repo level PATs and no bot accounts
run: echo "${{ secrets.CR_PAT }}" | docker login https://ghcr.io -u ${{ secrets.CR_PAT_USER }} --password-stdin

- name: Push image to GitHub Container Registry
run: |
IMAGE_ID=ghcr.io/${{ github.repository_owner }}/$IMAGE_NAME

# Change all uppercase to lowercase
IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]')

# Strip git ref prefix from version
VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,')

# Use Docker `short_sha` tag convention
[ "$VERSION" == "master" ] && VERSION="${{ steps.vars.outputs.sha_short }}"

echo IMAGE_ID=$IMAGE_ID
echo VERSION=$VERSION

docker tag $IMAGE_NAME $IMAGE_ID:$VERSION
docker push $IMAGE_ID:$VERSION
19 changes: 19 additions & 0 deletions .github/workflows/golangci-lint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
name: golangci-lint

on:
push:
branches: [ master ]
pull_request:
branches: [ master ]

jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
with:
version: v1.33
args: --timeout=5m
25 changes: 0 additions & 25 deletions .travis.yml

This file was deleted.

12 changes: 2 additions & 10 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
.PHONY: build test test-vet docker clean fmt lint
.PHONY: build test test-vet docker clean lint

TARGET=escalator
# E.g. set this to -v (I.e. GOCMDOPTS=-v via shell) to get the go command to be verbose
Expand All @@ -23,13 +23,5 @@ docker: Dockerfile
clean:
rm -f $(TARGET)

# goreturns runs both gofmt and goimports.
# This is used to pickup more comphrehnsive formatting/codestyle changes
# https://github.com/sqs/goreturns
fmt:
goreturns -w pkg/ cmd/

# the linting also uses goreturns.
# the lint.sh script reports formatting changes/errors
lint:
./lint.sh
golangci-lint run
11 changes: 0 additions & 11 deletions lint.sh

This file was deleted.

5 changes: 4 additions & 1 deletion pkg/cloudprovider/aws/aws.go
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ func (n *NodeGroup) allInstancesReady(ids []*string) bool {
IncludeAllInstances: awsapi.Bool(true),
}

n.provider.ec2Service.DescribeInstanceStatusPages(input, func(r *ec2.DescribeInstanceStatusOutput, lastPage bool) bool {
err := n.provider.ec2Service.DescribeInstanceStatusPages(input, func(r *ec2.DescribeInstanceStatusOutput, lastPage bool) bool {
for _, i := range r.InstanceStatuses {
if *i.InstanceState.Name != "running" {
return false
Expand All @@ -477,6 +477,9 @@ func (n *NodeGroup) allInstancesReady(ids []*string) bool {

return true
})
if err != nil {
return false
}

return ready
}
Expand Down
1 change: 1 addition & 0 deletions pkg/cloudprovider/aws/node_group_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,7 @@ func TestNodeGroup_IncreaseSize_CreateFleet(t *testing.T) {
AllInstancesReady: true,
},
)
require.NoError(t, err)

for _, nodeGroup := range awsCloudProvider.NodeGroups() {
err = nodeGroup.IncreaseSize(tt.increaseSize)
Expand Down
36 changes: 25 additions & 11 deletions pkg/controller/controller_scale_node_group_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,16 +33,23 @@ func buildTestPods(amount int, CPU int64, Mem int64) []*v1.Pod {
})
}

func buildTestClient(nodes []*v1.Node, pods []*v1.Pod, nodeGroups []NodeGroupOptions, listerOptions ListerOptions) (*Client, Opts) {
func buildTestClient(nodes []*v1.Node, pods []*v1.Pod, nodeGroups []NodeGroupOptions, listerOptions ListerOptions) (*Client, Opts, error) {
fakeClient, _ := test.BuildFakeClient(nodes, pods)
opts := Opts{
K8SClient: fakeClient,
NodeGroups: nodeGroups,
ScanInterval: 1 * duration.Minute,
DryMode: false,
}
allPodLister := test.NewTestPodWatcher(pods, listerOptions.podListerOptions)
allNodeLister := test.NewTestNodeWatcher(nodes, listerOptions.nodeListerOptions)
allPodLister, err := test.NewTestPodWatcher(pods, listerOptions.podListerOptions)
if err != nil {
return nil, opts, err
}

allNodeLister, err := test.NewTestNodeWatcher(nodes, listerOptions.nodeListerOptions)
if err != nil {
return nil, opts, err
}

nodeGroupListerMap := make(map[string]*NodeGroupLister)
for _, ng := range nodeGroups {
Expand All @@ -60,7 +67,7 @@ func buildTestClient(nodes []*v1.Node, pods []*v1.Pod, nodeGroups []NodeGroupOpt
allNodeLister,
}

return client, opts
return client, opts, nil
}

// Test the edge case where the min nodes gets changed to above the current number of untainted nodes
Expand All @@ -84,7 +91,8 @@ func TestUntaintNodeGroupMinNodes(t *testing.T) {
Tainted: true,
})

client, opts := buildTestClient(nodes, buildTestPods(10, 1000, 1000), nodeGroups, ListerOptions{})
client, opts, err := buildTestClient(nodes, buildTestPods(10, 1000, 1000), nodeGroups, ListerOptions{})
require.NoError(t, err)

// For these test cases we only use 1 node group/cloud provider node group
nodeGroupSize := 1
Expand Down Expand Up @@ -114,7 +122,7 @@ func TestUntaintNodeGroupMinNodes(t *testing.T) {
cloudProvider: testCloudProvider,
}

_, err := controller.scaleNodeGroup(nodeGroup.Name, nodeGroupsState[nodeGroup.Name])
_, err = controller.scaleNodeGroup(nodeGroup.Name, nodeGroupsState[nodeGroup.Name])
assert.NoError(t, err)

untainted, tainted, _ := controller.filterNodes(nodeGroupsState[nodeGroup.Name], nodes)
Expand Down Expand Up @@ -150,7 +158,8 @@ func TestUntaintNodeGroupMaxNodes(t *testing.T) {
Mem: 1000,
})...)

client, opts := buildTestClient(nodes, buildTestPods(10, 1000, 1000), nodeGroups, ListerOptions{})
client, opts, err := buildTestClient(nodes, buildTestPods(10, 1000, 1000), nodeGroups, ListerOptions{})
require.NoError(t, err)

// For these test cases we only use 1 node group/cloud provider node group
nodeGroupSize := 1
Expand Down Expand Up @@ -180,7 +189,8 @@ func TestUntaintNodeGroupMaxNodes(t *testing.T) {
cloudProvider: testCloudProvider,
}

controller.scaleNodeGroup(nodeGroup.Name, nodeGroupsState[nodeGroup.Name])
_, err = controller.scaleNodeGroup(nodeGroup.Name, nodeGroupsState[nodeGroup.Name])
require.NoError(t, err)

untainted, tainted, _ := controller.filterNodes(nodeGroupsState[nodeGroup.Name], nodes)
// Ensure that the tainted nodes where untainted
Expand Down Expand Up @@ -457,7 +467,8 @@ func TestScaleNodeGroup(t *testing.T) {
nodeGroups := []NodeGroupOptions{tt.args.nodeGroupOptions}
ngName := tt.args.nodeGroupOptions.Name
nodes := buildTestNodes(tt.args.nodeArgs.initialAmount, tt.args.nodeArgs.cpu, tt.args.nodeArgs.mem)
client, opts := buildTestClient(nodes, tt.args.pods, nodeGroups, tt.args.listerOptions)
client, opts, err := buildTestClient(nodes, tt.args.pods, nodeGroups, tt.args.listerOptions)
require.NoError(t, err)

// For these test cases we only use 1 node group/cloud provider node group
nodeGroupSize := 1
Expand Down Expand Up @@ -506,7 +517,9 @@ func TestScaleNodeGroup(t *testing.T) {
// Create the nodes to simulate the cloud provider bringing up the new nodes
newNodes := append(nodes, buildTestNodes(nodesDelta, tt.args.nodeArgs.cpu, tt.args.nodeArgs.mem)...)
// Create a new client with the new nodes and update everything that uses the client
client, opts = buildTestClient(newNodes, tt.args.pods, nodeGroups, tt.args.listerOptions)
client, opts, err = buildTestClient(newNodes, tt.args.pods, nodeGroups, tt.args.listerOptions)
require.NoError(t, err)

controller.Client = client
controller.Opts = opts
nodeGroupsState[ngName].NodeGroupLister = client.Listers[ngName]
Expand Down Expand Up @@ -678,7 +691,8 @@ func TestScaleNodeGroup_MultipleRuns(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
nodeGroups := []NodeGroupOptions{tt.args.nodeGroupOptions}
client, opts := buildTestClient(tt.args.nodes, tt.args.pods, nodeGroups, tt.args.listerOptions)
client, opts, err := buildTestClient(tt.args.nodes, tt.args.pods, nodeGroups, tt.args.listerOptions)
require.NoError(t, err)

// For these test cases we only use 1 node group/cloud provider node group
nodeGroupSize := 1
Expand Down
10 changes: 7 additions & 3 deletions pkg/controller/scale_down_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package controller

import (
"fmt"
"github.com/stretchr/testify/require"
"testing"
"time"

Expand Down Expand Up @@ -176,7 +177,8 @@ func TestControllerScaleDownTaint(t *testing.T) {
// untaint all
for _, node := range nodes {
if _, tainted := k8s.GetToBeRemovedTaint(node); tainted {
k8s.DeleteToBeRemovedTaint(node, client)
_, err := k8s.DeleteToBeRemovedTaint(node, client)
require.NoError(t, err)
<-updateChan
}
}
Expand Down Expand Up @@ -353,7 +355,8 @@ func TestControllerTaintOldestN(t *testing.T) {
// untaint all
for _, node := range nodes {
if _, tainted := k8s.GetToBeRemovedTaint(node); tainted {
k8s.DeleteToBeRemovedTaint(node, client)
_, err := k8s.DeleteToBeRemovedTaint(node, client)
require.NoError(t, err)
<-updateChan
}
}
Expand Down Expand Up @@ -386,7 +389,8 @@ func TestController_TryRemoveTaintedNodes(t *testing.T) {
})

pods := buildTestPods(10, 1000, 1000)
client, opts := buildTestClient(nodes, pods, nodeGroups, ListerOptions{})
client, opts, err := buildTestClient(nodes, pods, nodeGroups, ListerOptions{})
require.NoError(t, err)

// For these test cases we only use 1 node group/cloud provider node group
nodeGroupSize := 1
Expand Down
6 changes: 3 additions & 3 deletions pkg/controller/scale_lock.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ type scaleLock struct {

// locked returns whether the scale lock is locked
func (l *scaleLock) locked() bool {
if time.Now().Sub(l.lockTime) < l.minimumLockDuration {
if time.Since(l.lockTime) < l.minimumLockDuration {
metrics.NodeGroupScaleLockCheckWasLocked.WithLabelValues(l.nodegroup).Add(1.0)
return true
}
Expand All @@ -46,7 +46,7 @@ func (l *scaleLock) unlock() {
// Only if it's already locked, otherwise noop; handles frequent forced unlocking from the locked() call to avoid spurious metrics submission
if l.isLocked {
// Recording the lock duration in seconds, if $cloud provider could do scaling in nanosecond resolution; good problem to have.
lockDuration := time.Now().Sub(l.lockTime).Seconds()
lockDuration := time.Since(l.lockTime).Seconds()
log.Debug(fmt.Sprintf("Unlocking scale lock. Lock Duration: %0.0f s Node Group: %s", lockDuration, l.nodegroup))
l.isLocked = false
l.requestedNodes = 0
Expand All @@ -57,7 +57,7 @@ func (l *scaleLock) unlock() {

// timeUntilMinimumUnlock returns the the time until the minimum unlock
func (l *scaleLock) timeUntilMinimumUnlock() time.Duration {
return l.lockTime.Add(l.minimumLockDuration).Sub(time.Now())
return time.Until(l.lockTime.Add(l.minimumLockDuration))
}

func (l scaleLock) String() string {
Expand Down
4 changes: 3 additions & 1 deletion pkg/controller/scale_up_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package controller

import (
"fmt"
"github.com/stretchr/testify/require"
"testing"
"time"

Expand Down Expand Up @@ -163,7 +164,8 @@ func TestControllerUntaintNewestN(t *testing.T) {
var tc int
for _, node := range nodes {
if _, tainted := k8s.GetToBeRemovedTaint(node); !tainted {
k8s.AddToBeRemovedTaint(node, client, "NoSchedule")
_, err := k8s.AddToBeRemovedTaint(node, client, "NoSchedule")
require.NoError(t, err)
nodeGroupsState["example"].taintTracker = append(nodeGroupsState["example"].taintTracker, node.Name)
<-updateChan
tc++
Expand Down
2 changes: 1 addition & 1 deletion pkg/k8s/taint_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ func TestGetToBeRemovedTime(t *testing.T) {
// Get the taint to be removed time
val, err = GetToBeRemovedTime(updated)
assert.NoError(t, err)
assert.True(t, time.Now().Sub(*val) < 10*time.Second)
assert.True(t, time.Since(*val) < 10*time.Second)
}

func TestGetToBeRemovedTime_InvalidValue(t *testing.T) {
Expand Down
Loading