Skip to content

Commit

Permalink
Fix bug related to #nomad-autoscaler/issues/572
Browse files Browse the repository at this point in the history
  • Loading branch information
jorgemarey committed Apr 5, 2022
1 parent a813294 commit cca30ca
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 7 deletions.
9 changes: 9 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,14 @@
## 0.2.4 (Apr 5, 2022)

BUG FIXES:
* Fix deletion of all servers when scaling in.

## 0.2.3 (Apr 3, 2022)

NOTES:
* This version contains a bug related to https://github.com/hashicorp/nomad-autoscaler/issues/572
that will destroy all nodes in the pool when scaling in. You should skip this and upgrade to 0.2.4

FEATURES:
* Allow configuring a timeout for creation and deletion of servers
* Allow setting a `value_separator` to use when splitting value strings
Expand Down
10 changes: 6 additions & 4 deletions plugin/openstack.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,8 +227,8 @@ func (t *TargetPlugin) scaleOut(ctx context.Context, count int64, azDist map[str

// scaleIn updates the Auto Scaling Group desired count to match what the
// Autoscaler has deemed required.
func (t *TargetPlugin) scaleIn(ctx context.Context, count int64, config map[string]string) error {
ids, err := t.clusterUtils.RunPreScaleInTasks(ctx, config, int(count))
func (t *TargetPlugin) scaleIn(ctx context.Context, count int64, remoteIDs []string, config map[string]string) error {
ids, err := t.clusterUtils.RunPreScaleInTasksWithRemoteCheck(ctx, config, remoteIDs, int(count))
if err != nil {
return fmt.Errorf("failed to perform pre-scale Nomad scale in tasks: %v", err)
}
Expand Down Expand Up @@ -415,10 +415,11 @@ type customServer struct {
Tags *[]string `json:"tags"`
}

func (t *TargetPlugin) countServers(ctx context.Context, pool string) (int64, int64, map[string]int, error) {
func (t *TargetPlugin) countServers(ctx context.Context, pool string) (int64, int64, map[string]int, []string, error) {
var total int64
var ready int64
azDist := make(map[string]int)
remoteIDs := make([]string, 0)

pager := servers.List(t.computeClient, servers.ListOpts{Tags: fmt.Sprintf(poolTag, pool)})
err := pager.EachPage(func(page pagination.Page) (bool, error) {
Expand All @@ -437,11 +438,12 @@ func (t *TargetPlugin) countServers(ctx context.Context, pool string) (int64, in
ready += 1
}
azDist[v.AZ] = azDist[v.AZ] + 1
remoteIDs = append(remoteIDs, v.ID)
total += 1
}
return true, nil
})
return total, ready, azDist, err
return total, ready, azDist, remoteIDs, err
}

type customCreateData struct {
Expand Down
6 changes: 3 additions & 3 deletions plugin/plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,15 +139,15 @@ func (t *TargetPlugin) Scale(action sdk.ScalingAction, config map[string]string)
}

ctx := context.Background()
total, _, azDist, err := t.countServers(ctx, pool)
total, _, azDist, remoteIDs, err := t.countServers(ctx, pool)
if err != nil {
return fmt.Errorf("failed to count Nova servers: %v", err)
}

diff, direction := t.calculateDirection(total, action.Count)
switch direction {
case "in":
err = t.scaleIn(ctx, diff, config)
err = t.scaleIn(ctx, diff, remoteIDs, config)
case "out":
err = t.scaleOut(ctx, diff, azDist, config)
default:
Expand Down Expand Up @@ -183,7 +183,7 @@ func (t *TargetPlugin) Status(config map[string]string) (*sdk.TargetStatus, erro
}

ctx := context.Background()
total, active, _, err := t.countServers(ctx, pool)
total, active, _, _, err := t.countServers(ctx, pool)
if err != nil {
return nil, fmt.Errorf("failed to count Nova servers: %v", err)
}
Expand Down

0 comments on commit cca30ca

Please sign in to comment.