-
Notifications
You must be signed in to change notification settings - Fork 4.2k
Node removal latency metrics added #8485
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -27,6 +27,7 @@ import ( | |
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown" | ||
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/budgets" | ||
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/deletiontracker" | ||
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/latencytracker" | ||
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/pdb" | ||
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/status" | ||
"k8s.io/autoscaler/cluster-autoscaler/core/utils" | ||
|
@@ -58,6 +59,7 @@ const ( | |
type Actuator struct { | ||
ctx *context.AutoscalingContext | ||
nodeDeletionTracker *deletiontracker.NodeDeletionTracker | ||
nodeLatencyTracker *latencytracker.NodeLatencyTracker | ||
nodeDeletionScheduler *GroupDeletionScheduler | ||
deleteOptions options.NodeDeleteOptions | ||
drainabilityRules rules.Rules | ||
|
@@ -78,7 +80,7 @@ type actuatorNodeGroupConfigGetter interface { | |
} | ||
|
||
// NewActuator returns a new instance of Actuator. | ||
func NewActuator(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupchange.NodeGroupChangeObserver, ndt *deletiontracker.NodeDeletionTracker, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, configGetter actuatorNodeGroupConfigGetter) *Actuator { | ||
func NewActuator(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupchange.NodeGroupChangeObserver, ndt *deletiontracker.NodeDeletionTracker, nlt *latencytracker.NodeLatencyTracker, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, configGetter actuatorNodeGroupConfigGetter) *Actuator { | ||
ndb := NewNodeDeletionBatcher(ctx, scaleStateNotifier, ndt, ctx.NodeDeletionBatcherInterval) | ||
legacyFlagDrainConfig := SingleRuleDrainConfig(ctx.MaxGracefulTerminationSec) | ||
var evictor Evictor | ||
|
@@ -90,6 +92,7 @@ func NewActuator(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupch | |
return &Actuator{ | ||
ctx: ctx, | ||
nodeDeletionTracker: ndt, | ||
nodeLatencyTracker: nlt, | ||
nodeDeletionScheduler: NewGroupDeletionScheduler(ctx, ndt, ndb, evictor), | ||
budgetProcessor: budgets.NewScaleDownBudgetProcessor(ctx), | ||
deleteOptions: deleteOptions, | ||
|
@@ -324,6 +327,9 @@ func (a *Actuator) deleteNodesAsync(nodes []*apiv1.Node, nodeGroup cloudprovider | |
} | ||
|
||
for _, node := range nodes { | ||
if a.nodeLatencyTracker != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It doesn’t seem like the best placement. Deletion still might fail (look at the checks below). I think we would want to report only successful deletion. |
||
a.nodeLatencyTracker.ObserveDeletion(node.Name, time.Now()) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We could consider covering this logic with test. |
||
} | ||
nodeInfo, err := clusterSnapshot.GetNodeInfo(node.Name) | ||
if err != nil { | ||
nodeDeleteResult := status.NodeDeleteResult{ResultType: status.NodeDeleteErrorInternal, Err: errors.NewAutoscalerErrorf(errors.InternalError, "nodeInfos.Get for %q returned error: %v", node.Name, err)} | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Names of the test file and file implementation do not match. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't see any test checking the reported value. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,134 @@ | ||
/* | ||
Copyright 2024 The Kubernetes Authors. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, | ||
software distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package latencytracker | ||
|
||
import ( | ||
"sync" | ||
"testing" | ||
"time" | ||
) | ||
|
||
func TestUpdateStateWithUnneededList_AddsNewNodes(t *testing.T) { | ||
tracker := NewNodeLatencyTracker() | ||
now := time.Now() | ||
node := NodeInfo{Name: "node1", UnneededSince: now, Threshold: 5 * time.Minute} | ||
|
||
tracker.UpdateStateWithUnneededList([]NodeInfo{node}, now) | ||
|
||
tracker.Lock() | ||
defer tracker.Unlock() | ||
if _, ok := tracker.nodes["node1"]; !ok { | ||
t.Errorf("expected node1 to be tracked, but was not") | ||
} | ||
} | ||
|
||
func TestUpdateStateWithUnneededList_DoesNotDuplicate(t *testing.T) { | ||
tracker := NewNodeLatencyTracker() | ||
now := time.Now() | ||
node := NodeInfo{Name: "node1", UnneededSince: now, Threshold: 5 * time.Minute} | ||
|
||
tracker.UpdateStateWithUnneededList([]NodeInfo{node}, now) | ||
tracker.UpdateStateWithUnneededList([]NodeInfo{node}, now.Add(time.Minute)) | ||
|
||
tracker.Lock() | ||
defer tracker.Unlock() | ||
if len(tracker.nodes) != 1 { | ||
t.Errorf("expected 1 tracked node, got %d", len(tracker.nodes)) | ||
} | ||
} | ||
|
||
func TestObserveDeletion_RemovesNode(t *testing.T) { | ||
tracker := NewNodeLatencyTracker() | ||
now := time.Now() | ||
node := NodeInfo{ | ||
Name: "node1", | ||
UnneededSince: now.Add(-10 * time.Minute), | ||
Threshold: 5 * time.Minute, | ||
} | ||
tracker.UpdateStateWithUnneededList([]NodeInfo{node}, now) | ||
|
||
tracker.ObserveDeletion("node1", now) | ||
|
||
tracker.Lock() | ||
defer tracker.Unlock() | ||
if _, ok := tracker.nodes["node1"]; ok { | ||
t.Errorf("expected node1 removed after ObserveDeletion") | ||
} | ||
} | ||
|
||
func TestObserveDeletion_NoOpIfNodeNotTracked(t *testing.T) { | ||
tracker := NewNodeLatencyTracker() | ||
now := time.Now() | ||
|
||
tracker.ObserveDeletion("node1", now) | ||
|
||
tracker.Lock() | ||
defer tracker.Unlock() | ||
if len(tracker.nodes) != 0 { | ||
t.Errorf("expected no nodes tracked, got %d", len(tracker.nodes)) | ||
} | ||
} | ||
|
||
func TestConcurrentUpdatesAndDeletions(t *testing.T) { | ||
tracker := NewNodeLatencyTracker() | ||
now := time.Now() | ||
|
||
node := NodeInfo{ | ||
Name: "node1", | ||
UnneededSince: now, | ||
Threshold: 2 * time.Minute, | ||
} | ||
|
||
var wg sync.WaitGroup | ||
stop := make(chan struct{}) | ||
|
||
wg.Add(1) | ||
go func() { | ||
defer wg.Done() | ||
for { | ||
select { | ||
case <-stop: | ||
return | ||
default: | ||
tracker.UpdateStateWithUnneededList([]NodeInfo{node}, time.Now()) | ||
} | ||
} | ||
}() | ||
|
||
wg.Add(1) | ||
go func() { | ||
defer wg.Done() | ||
for { | ||
select { | ||
case <-stop: | ||
return | ||
default: | ||
tracker.ObserveDeletion("node1", time.Now()) | ||
} | ||
} | ||
}() | ||
|
||
time.Sleep(50 * time.Millisecond) | ||
close(stop) | ||
wg.Wait() | ||
|
||
tracker.Lock() | ||
defer tracker.Unlock() | ||
if len(tracker.nodes) > 1 { | ||
t.Errorf("expected at most 1 tracked node, got %d", len(tracker.nodes)) | ||
} | ||
} |
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. File name not aligned with convention - using underscore between words. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,66 @@ | ||
package latencytracker | ||
|
||
import ( | ||
"sync" | ||
"time" | ||
|
||
"k8s.io/autoscaler/cluster-autoscaler/metrics" | ||
|
||
"k8s.io/klog/v2" | ||
) | ||
|
||
type NodeInfo struct { | ||
Name string | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Looks like we have duplicated information. Name is already in key. |
||
UnneededSince time.Time | ||
Threshold time.Duration | ||
} | ||
|
||
type NodeLatencyTracker struct { | ||
sync.Mutex | ||
nodes map[string]NodeInfo | ||
} | ||
|
||
// NewNodeLatencyTracker creates a new tracker. | ||
func NewNodeLatencyTracker() *NodeLatencyTracker { | ||
return &NodeLatencyTracker{ | ||
nodes: make(map[string]NodeInfo), | ||
} | ||
} | ||
|
||
func (t *NodeLatencyTracker) UpdateStateWithUnneededList(list []NodeInfo, timestamp time.Time) { | ||
t.Lock() | ||
defer t.Unlock() | ||
|
||
currentSet := make(map[string]struct{}, len(list)) | ||
for _, info := range list { | ||
currentSet[info.Name] = struct{}{} | ||
_, exists := t.nodes[info.Name] | ||
if !exists { | ||
t.nodes[info.Name] = NodeInfo{ | ||
Name: info.Name, | ||
UnneededSince: info.UnneededSince, | ||
Threshold: info.Threshold, | ||
} | ||
klog.V(2).Infof("Started tracking unneeded node %s at %v with ScaleDownUnneededTime=%v", | ||
info.Name, info.UnneededSince, info.Threshold) | ||
} | ||
} | ||
} | ||
|
||
// ObserveDeletion is called by the actuator just before node deletion. | ||
func (t *NodeLatencyTracker) ObserveDeletion(nodeName string, timestamp time.Time) { | ||
t.Lock() | ||
defer t.Unlock() | ||
|
||
if info, exists := t.nodes[nodeName]; exists { | ||
duration := timestamp.Sub(info.UnneededSince) | ||
|
||
klog.V(2).Infof( | ||
"Observing deletion for node %s, unneeded for %s (threshold was %s).", | ||
nodeName, duration, info.Threshold, | ||
) | ||
|
||
metrics.UpdateScaleDownNodeDeletionDuration("true", duration-info.Threshold) | ||
Comment on lines
+58
to
+63
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Logging and updating metric don't need and shouldn’t be done under lock. |
||
delete(t.nodes, nodeName) | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Name of the flag is unclear. We are missing information that it is latency of removal.