Skip to content

Commit

Permalink
Upgrading Jettison due to CVE-2022-45685 (#5777)
Browse files Browse the repository at this point in the history
* Upgrading Jettison due to CVE

Signed-off-by: Sarat Vemulapalli <vemulapallisarat@gmail.com>

* Updated Changelog

Signed-off-by: Sarat Vemulapalli <vemulapallisarat@gmail.com>

Signed-off-by: Sarat Vemulapalli <vemulapallisarat@gmail.com>
  • Loading branch information
saratvemulapalli authored and Rishav Sagar committed Jan 13, 2023
1 parent 5989d01 commit a8787d6
Show file tree
Hide file tree
Showing 7 changed files with 186 additions and 27 deletions.
5 changes: 3 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
### Dependencies
- Bumps `log4j-core` from 2.18.0 to 2.19.0
- Bumps `reactor-netty-http` from 1.0.18 to 1.0.23
- Bumps `jettison` from 1.5.0 to 1.5.1
- Bumps `jettison` from 1.5.0 to 1.5.3
- Bumps `forbiddenapis` from 3.3 to 3.4
- Bumps `gson` from 2.9.0 to 2.10
- Bumps `avro` from 1.11.0 to 1.11.1
Expand Down Expand Up @@ -106,6 +106,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),

## [Unreleased 2.x]
### Added
- Adding index create block when all nodes have breached high disk watermark ([#5852](https://github.com/opensearch-project/OpenSearch/pull/5852))

### Dependencies

Expand All @@ -120,4 +121,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
### Security

[Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.4...HEAD
[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.4...2.x
[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.4...2.x
2 changes: 1 addition & 1 deletion buildSrc/version.properties
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ supercsv = 2.4.0
log4j = 2.17.1
slf4j = 1.7.36
asm = 9.4
jettison = 1.5.1
jettison = 1.5.3
woodstox = 6.4.0
kotlin = 1.7.10
antlr4 = 4.11.1
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
964d35bbdecbbc33cf2f2044e8a1648d9f6f1474
Original file line number Diff line number Diff line change
Expand Up @@ -39,14 +39,17 @@

import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
import org.opensearch.action.admin.indices.delete.DeleteIndexRequest;
import org.opensearch.action.admin.indices.stats.ShardStats;
import org.opensearch.action.index.IndexRequestBuilder;
import org.opensearch.cluster.ClusterInfoService;
import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.InternalClusterInfoService;
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.cluster.routing.IndexShardRoutingTable;
import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.cluster.routing.ShardRoutingState;
import org.opensearch.cluster.routing.allocation.DiskThresholdMonitor;
import org.opensearch.cluster.routing.allocation.DiskThresholdSettings;
import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider.Rebalance;
import org.opensearch.cluster.service.ClusterService;
Expand Down Expand Up @@ -82,12 +85,15 @@
import java.nio.file.NotDirectoryException;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
Expand Down Expand Up @@ -126,7 +132,8 @@ public void removeFilesystemProvider() {
defaultFileSystem = null;
}

private static final long WATERMARK_BYTES = new ByteSizeValue(10, ByteSizeUnit.KB).getBytes();
// Increasing watermark limit to avoid flaky test case failures.
private static final long WATERMARK_BYTES = new ByteSizeValue(1, ByteSizeUnit.MB).getBytes();

@Override
protected Settings nodeSettings(int nodeOrdinal) {
Expand Down Expand Up @@ -167,16 +174,7 @@ public void testHighWatermarkNotExceeded() throws Exception {
final Path dataNode0Path = internalCluster().getInstance(Environment.class, dataNodeName).dataFiles()[0];

final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
createIndex(
indexName,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 6)
.put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms")
.put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), false)
.build()
);
final long minShardSize = createReasonableSizedShards(indexName);
final long minShardSize = createAndPopulateIndex(indexName);

// reduce disk size of node 0 so that no shards fit below the high watermark, forcing all shards onto the other data node
// (subtract the translog size since the disk threshold decider ignores this and may therefore move the shard back again)
Expand All @@ -188,6 +186,89 @@ public void testHighWatermarkNotExceeded() throws Exception {
assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, hasSize(1));
}

public void testIndexCreateBlockWhenAllNodesExceededHighWatermark() throws Exception {
internalCluster().startClusterManagerOnlyNode();
final List<String> dataNodeNames = internalCluster().startDataOnlyNodes(2);
ensureStableCluster(3);

final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster()
.getCurrentClusterManagerNodeInstance(ClusterInfoService.class);
internalCluster().getCurrentClusterManagerNodeInstance(ClusterService.class).addListener(event -> clusterInfoService.refresh());

// Reduce disk space of all node until all of them is breaching high disk watermark.
for (final String dataNodeName : dataNodeNames) {
populateNode(dataNodeName);
}

// Validate if cluster block is applied on the node
assertBusy(() -> {
refreshDiskUsage();
ClusterState state = client().admin().cluster().prepareState().setLocal(true).get().getState();
assertTrue(state.blocks().hasGlobalBlockWithId(DiskThresholdMonitor.INDEX_CREATE_BLOCK_ID));
}, 60L, TimeUnit.SECONDS);
}

public void testIndexCreateBlockIsRemovedWhenAnyNodesNotExceedHighWatermark() throws Exception {
internalCluster().startClusterManagerOnlyNode();
final List<String> dataNodeNames = internalCluster().startDataOnlyNodes(2);
final List<String> indexNames = new ArrayList<>();
ensureStableCluster(3);

final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster()
.getCurrentClusterManagerNodeInstance(ClusterInfoService.class);
internalCluster().getCurrentClusterManagerNodeInstance(ClusterService.class).addListener(event -> clusterInfoService.refresh());

// Reduce disk space of all node until all of them is breaching high disk watermark.
for (final String dataNodeName : dataNodeNames) {
final String indexName = populateNode(dataNodeName);
indexNames.add(indexName);
}

// Validate if index create block is applied on the cluster
assertBusy(() -> {
refreshDiskUsage();
ClusterState state = client().admin().cluster().prepareState().setLocal(true).get().getState();
assertTrue(state.blocks().hasGlobalBlockWithId(DiskThresholdMonitor.INDEX_CREATE_BLOCK_ID));
}, 60L, TimeUnit.SECONDS);

deleteIndices(indexNames);
// Validate if index create block is removed on the cluster
assertBusy(() -> {
refreshDiskUsage();
ClusterState state = client().admin().cluster().prepareState().setLocal(true).get().getState();
assertFalse(state.blocks().hasGlobalBlockWithId(DiskThresholdMonitor.INDEX_CREATE_BLOCK_ID));
}, 60L, TimeUnit.SECONDS);
}

public void testIndexCreateBlockWithAReadOnlyBlock() throws Exception {
internalCluster().startClusterManagerOnlyNode();
final List<String> dataNodeNames = internalCluster().startDataOnlyNodes(2);
ensureStableCluster(3);
final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster()
.getCurrentClusterManagerNodeInstance(ClusterInfoService.class);
internalCluster().getCurrentClusterManagerNodeInstance(ClusterService.class).addListener(event -> clusterInfoService.refresh());

final String indexName = populateNode(dataNodeNames.get(0));
// Apply a read_only_allow_delete_block on one of the index
// (can happen if the corresponding node has breached flood stage watermark).
final Settings readOnlySettings = Settings.builder()
.put(IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE, Boolean.TRUE.toString())
.build();
client().admin().indices().prepareUpdateSettings(indexName).setSettings(readOnlySettings).get();

// Reduce disk space of all other node until all of them is breaching high disk watermark.
for (int i = 1; i < dataNodeNames.size(); i++) {
populateNode(dataNodeNames.get(i));
}

// Validate if cluster block is applied on the node
assertBusy(() -> {
refreshDiskUsage();
ClusterState state = client().admin().cluster().prepareState().setLocal(true).get().getState();
assertTrue(state.blocks().hasGlobalBlockWithId(DiskThresholdMonitor.INDEX_CREATE_BLOCK_ID));
}, 60L, TimeUnit.SECONDS);
}

public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Exception {
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
Expand All @@ -210,16 +291,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti
final Path dataNode0Path = internalCluster().getInstance(Environment.class, dataNodeName).dataFiles()[0];

final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
createIndex(
indexName,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 6)
.put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms")
.put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), false)
.build()
);
final long minShardSize = createReasonableSizedShards(indexName);
final long minShardSize = createAndPopulateIndex(indexName);

final CreateSnapshotResponse createSnapshotResponse = client().admin()
.cluster()
Expand Down Expand Up @@ -274,6 +346,35 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti
assertBusyWithDiskUsageRefresh(dataNode0Id, indexName, hasSize(1));
}

private void deleteIndices(final List<String> indexNames) throws ExecutionException, InterruptedException {
for (String indexName : indexNames) {
assertAcked(client().admin().indices().delete(new DeleteIndexRequest(indexName)).get());
assertFalse("index [" + indexName + "] should have been deleted", indexExists(indexName));
}
}

private String populateNode(final String dataNodeName) throws Exception {
final Path dataNodePath = internalCluster().getInstance(Environment.class, dataNodeName).dataFiles()[0];
final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
long minShardSize = createAndPopulateIndex(indexName);
fileSystemProvider.getTestFileStore(dataNodePath).setTotalSpace(minShardSize + WATERMARK_BYTES - 1L);
return indexName;
}

private long createAndPopulateIndex(final String indexName) throws Exception {
createIndex(
indexName,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 6)
.put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms")
.put(IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED.getKey(), false)
.build()
);

return createReasonableSizedShards(indexName);
}

private Set<ShardRouting> getShardRoutings(final String nodeId, final String indexName) {
final Set<ShardRouting> shardRoutings = new HashSet<>();
for (IndexShardRoutingTable indexShardRoutingTable : client().admin()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
import org.opensearch.cluster.DiskUsage;
import org.opensearch.cluster.block.ClusterBlockLevel;
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.cluster.metadata.Metadata;
import org.opensearch.cluster.routing.RerouteService;
import org.opensearch.cluster.routing.RoutingNode;
import org.opensearch.cluster.routing.RoutingNodes;
Expand Down Expand Up @@ -78,7 +79,7 @@
public class DiskThresholdMonitor {

private static final Logger logger = LogManager.getLogger(DiskThresholdMonitor.class);

public static final int INDEX_CREATE_BLOCK_ID = 10;
private final DiskThresholdSettings diskThresholdSettings;
private final Client client;
private final Supplier<ClusterState> clusterStateSupplier;
Expand Down Expand Up @@ -286,7 +287,7 @@ public void onNewInfo(ClusterInfo info) {
}
}

final ActionListener<Void> listener = new GroupedActionListener<>(ActionListener.wrap(this::checkFinished), 3);
final ActionListener<Void> listener = new GroupedActionListener<>(ActionListener.wrap(this::checkFinished), 4);

if (reroute) {
logger.debug("rerouting shards: [{}]", explanation);
Expand Down Expand Up @@ -373,6 +374,15 @@ public void onNewInfo(ClusterInfo info) {
} else {
listener.onResponse(null);
}

// If all the nodes are breaching high disk watermark, we apply index create block to avoid red clusters.
if (nodesOverHighThreshold.size() == nodes.size()) {
applyIndexCreateBlock(listener, true);
} else if (state.getBlocks().hasGlobalBlockWithId(INDEX_CREATE_BLOCK_ID)) {
applyIndexCreateBlock(listener, false);
} else {
listener.onResponse(null);
}
}

// exposed for tests to override
Expand Down Expand Up @@ -406,6 +416,27 @@ private void setLastRunTimeMillis() {
lastRunTimeMillis.getAndUpdate(l -> Math.max(l, currentTimeMillisSupplier.getAsLong()));
}

protected void applyIndexCreateBlock(final ActionListener<Void> listener, boolean indexCreateBlock) {
final ActionListener<Void> wrappedListener = ActionListener.wrap(r -> {
setLastRunTimeMillis();
listener.onResponse(r);
}, e -> {
logger.debug("setting index create block failed", e);
setLastRunTimeMillis();
listener.onFailure(e);
});

final Settings indexCreateBlockSetting = indexCreateBlock
? Settings.builder().put(Metadata.SETTING_CREATE_INDEX_BLOCK_SETTING.getKey(), Boolean.TRUE.toString()).build()
: Settings.builder().putNull(Metadata.SETTING_CREATE_INDEX_BLOCK_SETTING.getKey()).build();

client.admin()
.cluster()
.prepareUpdateSettings()
.setPersistentSettings(indexCreateBlockSetting)
.execute(ActionListener.map(wrappedListener, r -> null));
}

protected void updateIndicesReadOnly(Set<String> indicesToUpdate, ActionListener<Void> listener, boolean readOnly) {
// set read-only block but don't block on the response
ActionListener<Void> wrappedListener = ActionListener.wrap(r -> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,11 @@ protected void updateIndicesReadOnly(Set<String> indicesToMarkReadOnly, ActionLi
assertTrue(readOnly);
listener.onResponse(null);
}

@Override
protected void applyIndexCreateBlock(ActionListener<Void> listener, boolean indexCreateBlock) {
listener.onResponse(null);
}
};

ImmutableOpenMap.Builder<String, DiskUsage> builder = ImmutableOpenMap.builder();
Expand Down Expand Up @@ -185,6 +190,11 @@ protected void updateIndicesReadOnly(Set<String> indicesToMarkReadOnly, ActionLi
assertTrue(readOnly);
listener.onResponse(null);
}

@Override
protected void applyIndexCreateBlock(ActionListener<Void> listener, boolean indexCreateBlock) {
listener.onResponse(null);
}
};

indices.set(null);
Expand Down Expand Up @@ -372,6 +382,12 @@ protected void updateIndicesReadOnly(Set<String> indicesToUpdate, ActionListener
}
listener.onResponse(null);
}

@Override
protected void applyIndexCreateBlock(ActionListener<Void> listener, boolean indexCreateBlock) {
listener.onResponse(null);
}

};
indicesToMarkReadOnly.set(null);
indicesToRelease.set(null);
Expand Down Expand Up @@ -428,6 +444,11 @@ protected void updateIndicesReadOnly(Set<String> indicesToUpdate, ActionListener
}
listener.onResponse(null);
}

@Override
protected void applyIndexCreateBlock(ActionListener<Void> listener, boolean indexCreateBlock) {
listener.onResponse(null);
}
};
// When free disk on any of node1 or node2 goes below 5% flood watermark, then apply index block on indices not having the block
indicesToMarkReadOnly.set(null);
Expand Down Expand Up @@ -536,6 +557,11 @@ protected void updateIndicesReadOnly(Set<String> indicesToMarkReadOnly, ActionLi
long sizeOfRelocatingShards(RoutingNode routingNode, DiskUsage diskUsage, ClusterInfo info, ClusterState reroutedClusterState) {
return relocatingShardSizeRef.get();
}

@Override
protected void applyIndexCreateBlock(ActionListener<Void> listener, boolean indexCreateBlock) {
listener.onResponse(null);
}
};

final ImmutableOpenMap.Builder<String, DiskUsage> allDisksOkBuilder;
Expand Down

0 comments on commit a8787d6

Please sign in to comment.