Skip to content

Commit

Permalink
[Profiling] Query in parallel only if beneficial (elastic#103061)
Browse files Browse the repository at this point in the history
With this commit we check index allocation before we do key-value lookups. To
reduce latency, key-value lookups are done in parallel for multiple slices of
data. However, on nodes with spinning disks, parallel accesses are harmful.
Therefore, we check whether any index is allocated either to the warm or
cold tier (which are usually on spinning disks) and disable parallel
key-value lookups. This has improved latency on the warm tier by about
10% in our experiments.
  • Loading branch information
danielmitterdorfer committed Dec 7, 2023
1 parent 80efdd1 commit eea5f6e
Show file tree
Hide file tree
Showing 5 changed files with 211 additions and 5 deletions.
5 changes: 5 additions & 0 deletions docs/changelog/103061.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 103061
summary: "[Profiling] Query in parallel only if beneficial"
area: Application
type: bug
issues: []
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/

package org.elasticsearch.xpack.profiling;

import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.DataTier;
import org.elasticsearch.index.Index;

import java.util.List;
import java.util.function.Predicate;

final class IndexAllocation {
private IndexAllocation() {
// no instances intended
}

static boolean isAnyAssignedToNode(ClusterState state, List<Index> indices, Predicate<DiscoveryNode> nodePredicate) {
for (Index index : indices) {
IndexMetadata metadata = state.getMetadata().index(index);
if (metadata == null) {
continue;
}
IndexRoutingTable routingTable = state.routingTable().index(index);
if (routingTable == null) {
continue;
}
for (ShardRouting shardRouting : routingTable.randomAllActiveShardsIt()) {
if (shardRouting.assignedToNode() == false) {
continue;
}
DiscoveryNode assignedNode = state.nodes().get(shardRouting.currentNodeId());
if (nodePredicate.test(assignedNode)) {
return true;
}
}
}
return false;
}

/**
* Determines whether any of the provided indices is allocated to the warm or cold tier. Machines on these
* tiers usually use spinning disks.
*
* @param state Current cluster state.
* @param indices A list of indices to check.
* @return <code>true</code> iff at least one index is allocated to either a warm or cold data node.
*/
static boolean isAnyOnWarmOrColdTier(ClusterState state, List<Index> indices) {
return isAnyAssignedToNode(state, indices, n -> DataTier.isWarmNode(n) || DataTier.isColdNode(n));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -408,9 +408,12 @@ private void retrieveStackTraces(
return;
}
List<String> eventIds = new ArrayList<>(responseBuilder.getStackTraceEvents().keySet());
List<List<String>> slicedEventIds = sliced(eventIds, desiredSlices);
ClusterState clusterState = clusterService.state();
List<Index> indices = resolver.resolve(clusterState, "profiling-stacktraces", responseBuilder.getStart(), responseBuilder.getEnd());
// Avoid parallelism if there is potential we are on spinning disks (frozen tier uses searchable snapshots)
int sliceCount = IndexAllocation.isAnyOnWarmOrColdTier(clusterState, indices) ? 1 : desiredSlices;
log.trace("Using [{}] slice(s) to lookup stacktraces.", sliceCount);
List<List<String>> slicedEventIds = sliced(eventIds, sliceCount);

// Build a set of unique host IDs.
Set<String> uniqueHostIDs = new HashSet<>(responseBuilder.hostEventCounts.size());
Expand Down Expand Up @@ -464,7 +467,7 @@ private void retrieveStackTraces(

// package private for testing
static <T> List<List<T>> sliced(List<T> c, int slices) {
if (c.size() <= slices) {
if (c.size() <= slices || slices == 1) {
return List.of(c);
}
List<List<T>> slicedList = new ArrayList<>();
Expand Down Expand Up @@ -628,9 +631,6 @@ private void retrieveStackTraceDetails(
if (mayNotifyOfCancellation(submitTask, submitListener)) {
return;
}

List<List<String>> slicedStackFrameIds = sliced(stackFrameIds, desiredDetailSlices);
List<List<String>> slicedExecutableIds = sliced(executableIds, desiredDetailSlices);
List<Index> stackFrameIndices = resolver.resolve(
clusterState,
"profiling-stackframes",
Expand All @@ -643,6 +643,18 @@ private void retrieveStackTraceDetails(
responseBuilder.getStart(),
responseBuilder.getEnd()
);
// Avoid parallelism if there is potential we are on spinning disks (frozen tier uses searchable snapshots)
int stackFrameSliceCount = IndexAllocation.isAnyOnWarmOrColdTier(clusterState, stackFrameIndices) ? 1 : desiredDetailSlices;
int executableSliceCount = IndexAllocation.isAnyOnWarmOrColdTier(clusterState, executableIndices) ? 1 : desiredDetailSlices;
log.trace(
"Using [{}] slice(s) to lookup stack frames and [{}] slice(s) to lookup executables.",
stackFrameSliceCount,
executableSliceCount
);

List<List<String>> slicedStackFrameIds = sliced(stackFrameIds, stackFrameSliceCount);
List<List<String>> slicedExecutableIds = sliced(executableIds, executableSliceCount);

DetailsHandler handler = new DetailsHandler(
responseBuilder,
submitListener,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/

package org.elasticsearch.xpack.profiling;

import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeRole;
import org.elasticsearch.cluster.node.DiscoveryNodeUtils;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.ESTestCase;

import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;

public class IndexAllocationTests extends ESTestCase {
private final Index hot = idx("hot");
private final Index warm = idx("warm");
private final Index cold = idx("cold");
private final Index frozen = idx("frozen");

public void testEmptyIndicesNotOnWarmColdTier() {
assertFalse(IndexAllocation.isAnyOnWarmOrColdTier(clusterState(), Collections.emptyList()));
}

public void testOtherIndicesNotOnWarmColdTier() {
assertFalse(IndexAllocation.isAnyOnWarmOrColdTier(clusterState(), List.of(hot, frozen)));
}

public void testIndicesOnWarmColdTier() {
assertTrue(IndexAllocation.isAnyOnWarmOrColdTier(clusterState(), List.of(warm)));
assertTrue(IndexAllocation.isAnyOnWarmOrColdTier(clusterState(), List.of(cold)));
}

public void testMixedIndicesOnWarmColdTier() {
assertTrue(IndexAllocation.isAnyOnWarmOrColdTier(clusterState(), List.of(hot, warm)));
assertTrue(IndexAllocation.isAnyOnWarmOrColdTier(clusterState(), List.of(frozen, cold)));
}

/**
* Creates a cluster state that represents several indices:
*
* <ul>
* <li><code>hot</code> assigned to a hot-tier node named <code>n-hot</code></li>
* <li><code>warm</code> assigned to a warm-tier node named <code>n-warm</code></li>
* <li><code>cold</code> assigned to a cold-tier node named <code>n-cold</code></li>
* <li><code>frozen</code> assigned to a frozen-tier node named <code>n-frozen</code></li>
* </ul>
*/
private ClusterState clusterState() {
DiscoveryNode node = DiscoveryNodeUtils.create("node");
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node);

nodesBuilder.add(DiscoveryNodeUtils.builder("n-" + hot.getName()).roles(Set.of(DiscoveryNodeRole.DATA_HOT_NODE_ROLE)).build());
nodesBuilder.add(DiscoveryNodeUtils.builder("n-" + warm.getName()).roles(Set.of(DiscoveryNodeRole.DATA_WARM_NODE_ROLE)).build());
nodesBuilder.add(DiscoveryNodeUtils.builder("n-" + cold.getName()).roles(Set.of(DiscoveryNodeRole.DATA_COLD_NODE_ROLE)).build());
nodesBuilder.add(
DiscoveryNodeUtils.builder("n-" + frozen.getName()).roles(Set.of(DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE)).build()
);

RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
Map<String, IndexMetadata> indices = new HashMap<>();
for (Index index : List.of(hot, warm, cold, frozen)) {
indices.put(index.getName(), metadata(index));
ShardRouting shardRouting = ShardRouting.newUnassigned(
new ShardId(index, 0),
true,
RecoverySource.ExistingStoreRecoverySource.INSTANCE,
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, ""),
ShardRouting.Role.DEFAULT
);

shardRouting = shardRouting.initialize("n-" + index.getName(), null, 0).moveToStarted(0);
routingTableBuilder.add(
IndexRoutingTable.builder(index)
.addIndexShard(IndexShardRoutingTable.builder(shardRouting.shardId()).addShard(shardRouting))
);
}

return ClusterState.builder(new ClusterName("test"))
.metadata(Metadata.builder().indices(indices).build())
.blocks(new ClusterBlocks.Builder().build())
.nodes(nodesBuilder)
.routingTable(routingTableBuilder)
.build();
}

private IndexMetadata metadata(Index index) {
final Settings settings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID())
.build();
return IndexMetadata.builder(index.getName()).settings(settings).numberOfShards(1).numberOfReplicas(0).build();
}

private Index idx(String name) {
return new Index(name, UUID.randomUUID().toString());
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,13 @@ public void testSliceEmptyList() {
assertEquals(List.of(List.of()), TransportGetStackTracesAction.sliced(Collections.emptyList(), 4));
}

public void testSingleSlice() {
List<String> input = randomList(2, 5, () -> randomAlphaOfLength(3));
List<List<String>> sliced = TransportGetStackTracesAction.sliced(input, 1);
assertEquals(1, sliced.size());
assertEquals(input, sliced.get(0));
}

public void testSliceListSmallerOrEqualToSliceCount() {
int slices = 7;
List<String> input = randomList(0, slices, () -> randomAlphaOfLength(3));
Expand Down

0 comments on commit eea5f6e

Please sign in to comment.