Skip to content

Create a new monitor for node-level write load #131560

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/

package org.elasticsearch.cluster.routing.allocation;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.RerouteService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.gateway.GatewayService;

import java.util.function.LongSupplier;
import java.util.function.Supplier;

/**
* Monitors the node-level write thread pool usage across the cluster and initiates (coming soon) a rebalancing round (via
* {@link RerouteService#reroute}) whenever a node crosses the node-level write load thresholds.
*
* TODO (ES-11992): implement
*/
public class WriteLoadConstraintMonitor {
Copy link
Contributor

@mhl-b mhl-b Jul 21, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can follow disk allocator naming too, WriteLoadThresholdSettings and WriteLoadThresholdMonitor. I dont see reason for Constraint or Threshold wording here, it's implicit in the name of Monitor. If we monitor something that means there is a value and threshold for this value.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was hoping to convey the notion of resource constrained shard allocation decisions.

Monitor by itself doesn't imply thresholds or constraints. It says we're watching for something. In the case of WriteLoadMonitor, we're monitoring write load, but we aren't saying for what.

A significant part of this is simply consistency in naming. For example, you can find all the DiskThreshold* files with the same prefix. Similarly here, WriteLoadConstraint*. If we settle on something other than the WriteLoadConstraint* prefix, I'm inclined to put that in a separate PR to rename the other file as well. The new heap decider logic also follows a prefix pattern -- they in fact did a followup PR for renaming.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Went ahead with pushing this so as not to block ES-11992.

private static final Logger logger = LogManager.getLogger(WriteLoadConstraintMonitor.class);
private final WriteLoadConstraintSettings writeLoadConstraintSettings;
private final Supplier<ClusterState> clusterStateSupplier;
private final LongSupplier currentTimeMillisSupplier;
private final RerouteService rerouteService;

public WriteLoadConstraintMonitor(
ClusterSettings clusterSettings,
LongSupplier currentTimeMillisSupplier,
Supplier<ClusterState> clusterStateSupplier,
RerouteService rerouteService
) {
this.writeLoadConstraintSettings = new WriteLoadConstraintSettings(clusterSettings);
this.clusterStateSupplier = clusterStateSupplier;
this.currentTimeMillisSupplier = currentTimeMillisSupplier;
this.rerouteService = rerouteService;
}

/**
* Receives a copy of the latest {@link ClusterInfo} whenever the {@link ClusterInfoService} collects it. Processes the new
* {@link org.elasticsearch.cluster.NodeUsageStatsForThreadPools} and initiates rebalancing, via reroute, if a node in the cluster
* exceeds thread pool usage thresholds.
*/
public void onNewInfo(ClusterInfo clusterInfo) {
final ClusterState state = clusterStateSupplier.get();
if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) {
logger.debug("skipping monitor as the cluster state is not recovered yet");
return;
}

if (writeLoadConstraintSettings.getWriteLoadConstraintEnabled() == WriteLoadConstraintSettings.WriteLoadDeciderStatus.DISABLED) {
logger.trace("skipping monitor because the write load decider is disabled");
return;
}

logger.trace("processing new cluster info");

boolean reroute = false;
String explanation = "";
final long currentTimeMillis = currentTimeMillisSupplier.getAsLong();

// TODO (ES-11992): implement

if (reroute) {
logger.debug("rerouting shards: [{}]", explanation);
rerouteService.reroute("disk threshold monitor", Priority.NORMAL, ActionListener.wrap(ignored -> {
final var reroutedClusterState = clusterStateSupplier.get();

// TODO (ES-11992): implement

}, e -> logger.debug("reroute failed", e)));
} else {
logger.trace("no reroute required");
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.routing.RerouteService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.unit.RatioValue;
import org.elasticsearch.core.TimeValue;
Expand Down Expand Up @@ -98,4 +99,28 @@ public enum WriteLoadDeciderStatus {
Setting.Property.Dynamic,
Setting.Property.NodeScope
);

WriteLoadDeciderStatus writeLoadDeciderStatus;
TimeValue writeLoadDeciderRerouteIntervalSetting;

WriteLoadConstraintSettings(ClusterSettings clusterSettings) {
clusterSettings.initializeAndWatch(WRITE_LOAD_DECIDER_ENABLED_SETTING, this::setWriteLoadConstraintEnabled);
clusterSettings.initializeAndWatch(WRITE_LOAD_DECIDER_REROUTE_INTERVAL_SETTING, this::setWriteLoadDeciderRerouteIntervalSetting);
};

private void setWriteLoadConstraintEnabled(WriteLoadDeciderStatus status) {
this.writeLoadDeciderStatus = status;
}

public WriteLoadDeciderStatus getWriteLoadConstraintEnabled() {
return this.writeLoadDeciderStatus;
}

public TimeValue getWriteLoadDeciderRerouteIntervalSetting() {
return this.writeLoadDeciderRerouteIntervalSetting;
}

private void setWriteLoadDeciderRerouteIntervalSetting(TimeValue timeValue) {
this.writeLoadDeciderRerouteIntervalSetting = timeValue;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@
import org.elasticsearch.cluster.routing.RerouteService;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.DiskThresholdMonitor;
import org.elasticsearch.cluster.routing.allocation.WriteLoadConstraintMonitor;
import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.version.CompatibilityVersions;
Expand Down Expand Up @@ -785,6 +786,15 @@ private void construct(
)::onNewInfo
);

clusterInfoService.addListener(
new WriteLoadConstraintMonitor(
clusterService.getClusterSettings(),
threadPool.relativeTimeInMillisSupplier(),
clusterService::state,
rerouteService
)::onNewInfo
);

IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class).toList());
modules.add(indicesModule);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,14 @@
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeUtils;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RerouteService;
import org.elasticsearch.cluster.routing.allocation.WriteLoadConstraintMonitor;
import org.elasticsearch.cluster.routing.allocation.WriteLoadConstraintSettings;
import org.elasticsearch.cluster.service.ClusterApplierService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.service.FakeThreadPoolMasterService;
import org.elasticsearch.cluster.service.MasterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue;
Expand Down Expand Up @@ -95,6 +98,18 @@ protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() {
mockEstimatedHeapUsageCollector,
mockNodeUsageStatsForThreadPoolsCollector
);
final WriteLoadConstraintMonitor usageMonitor = spy(
new WriteLoadConstraintMonitor(
clusterService.getClusterSettings(),
threadPool.relativeTimeInMillisSupplier(),
clusterService::state,
new RerouteService() {
@Override
public void reroute(String reason, Priority priority, ActionListener<Void> listener) {}
}
)
);
clusterInfoService.addListener(usageMonitor::onNewInfo);
clusterService.addListener(clusterInfoService);
clusterInfoService.addListener(ignored -> {});

Expand Down Expand Up @@ -132,13 +147,15 @@ protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() {
for (int i = 0; i < 3; i++) {
Mockito.clearInvocations(mockEstimatedHeapUsageCollector);
Mockito.clearInvocations(mockNodeUsageStatsForThreadPoolsCollector);
Mockito.clearInvocations(usageMonitor);
final int initialRequestCount = client.requestCount;
final long duration = INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings).millis();
runFor(deterministicTaskQueue, duration);
deterministicTaskQueue.runAllRunnableTasks();
assertThat(client.requestCount, equalTo(initialRequestCount + 2)); // should have run two client requests per interval
verify(mockEstimatedHeapUsageCollector).collectClusterHeapUsage(any()); // Should poll for heap usage once per interval
verify(mockNodeUsageStatsForThreadPoolsCollector).collectUsageStats(any());
verify(usageMonitor).onNewInfo(any());
}

final AtomicBoolean failMaster2 = new AtomicBoolean();
Expand Down