Skip to content

Commit

Permalink
Merge branch 'main' into fix_unsinged_long
Browse files Browse the repository at this point in the history
  • Loading branch information
nik9000 committed Sep 12, 2023
2 parents ee05f8f + 2072be9 commit 6694446
Show file tree
Hide file tree
Showing 33 changed files with 387 additions and 289 deletions.
18 changes: 18 additions & 0 deletions .buildkite/pull-requests.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"jobs": [
{
"enabled": true,
"pipeline_slug": "elasticsearch-pull-request",
"allow_org_users": true,
"allowed_repo_permissions": [
"admin",
"write"
],
"set_commit_status": false,
"build_on_commit": true,
"build_on_comment": true,
"trigger_comment_regex": "buildkite\\W+elasticsearch-ci.+",
"labels": ["buildkite-opt-in"]
}
]
}
22 changes: 11 additions & 11 deletions .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,17 @@
# For more info, see https://help.github.com/articles/about-codeowners/

# Stack Monitoring index templates
x-pack/plugin/core/src/main/resources/monitoring-alerts-7.json @elastic/infra-monitoring-ui
x-pack/plugin/core/src/main/resources/monitoring-beats-mb.json @elastic/infra-monitoring-ui
x-pack/plugin/core/src/main/resources/monitoring-beats.json @elastic/infra-monitoring-ui
x-pack/plugin/core/src/main/resources/monitoring-ent-search-mb.json @elastic/infra-monitoring-ui
x-pack/plugin/core/src/main/resources/monitoring-es-mb.json @elastic/infra-monitoring-ui
x-pack/plugin/core/src/main/resources/monitoring-es.json @elastic/infra-monitoring-ui
x-pack/plugin/core/src/main/resources/monitoring-kibana-mb.json @elastic/infra-monitoring-ui
x-pack/plugin/core/src/main/resources/monitoring-kibana.json @elastic/infra-monitoring-ui
x-pack/plugin/core/src/main/resources/monitoring-logstash-mb.json @elastic/infra-monitoring-ui
x-pack/plugin/core/src/main/resources/monitoring-logstash.json @elastic/infra-monitoring-ui
x-pack/plugin/core/src/main/resources/monitoring-mb-ilm-policy.json @elastic/infra-monitoring-ui
x-pack/plugin/core/template-resources/src/main/resources/monitoring-alerts-7.json @elastic/infra-monitoring-ui
x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats-mb.json @elastic/infra-monitoring-ui
x-pack/plugin/core/template-resources/src/main/resources/monitoring-beats.json @elastic/infra-monitoring-ui
x-pack/plugin/core/template-resources/src/main/resources/monitoring-ent-search-mb.json @elastic/infra-monitoring-ui
x-pack/plugin/core/template-resources/src/main/resources/monitoring-es-mb.json @elastic/infra-monitoring-ui
x-pack/plugin/core/template-resources/src/main/resources/monitoring-es.json @elastic/infra-monitoring-ui
x-pack/plugin/core/template-resources/src/main/resources/monitoring-kibana-mb.json @elastic/infra-monitoring-ui
x-pack/plugin/core/template-resources/src/main/resources/monitoring-kibana.json @elastic/infra-monitoring-ui
x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash-mb.json @elastic/infra-monitoring-ui
x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash.json @elastic/infra-monitoring-ui
x-pack/plugin/core/template-resources/src/main/resources/monitoring-mb-ilm-policy.json @elastic/infra-monitoring-ui
x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @elastic/infra-monitoring-ui

# Fleet
Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/99117.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 99117
summary: Do not report failure after connections are made
area: Network
type: bug
issues: []
6 changes: 6 additions & 0 deletions docs/changelog/99382.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 99382
summary: "ESQL: create a Vector when needed for IN"
area: ES|QL
type: bug
issues:
- 99347
6 changes: 6 additions & 0 deletions docs/changelog/99417.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 99417
summary: Disable `FilterByFilterAggregator` through `ClusterSettings`
area: Aggregations
type: enhancement
issues:
- 99335
5 changes: 0 additions & 5 deletions docs/changelog/99434.yaml

This file was deleted.

2 changes: 1 addition & 1 deletion docs/reference/esql/source-commands/from.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ or aliases:

[source,esql]
----
FROM employees-00001,employees-*
FROM employees-00001,other-employees-*
----

Use the `METADATA` directive to enable <<esql-metadata-fields,metadata fields>>:
Expand Down
2 changes: 1 addition & 1 deletion docs/reference/indices/data-stream-stats.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ Total number of selected data streams.
(integer)
Total number of backing indices for the selected data streams.

`total_store_sizes`::
`total_store_size`::
(<<byte-units,byte value>>)
Total size of all shards for the selected data streams.
This property is included only if the `human` query parameter is `true`.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
[[remote-clusters-api-key]]
=== Add remote clusters using API key authentication

coming::[8.10]
beta::[]

API key authentication enables a local cluster to authenticate itself with a
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PartETag;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.amazonaws.services.s3.model.UploadPartRequest;
import com.amazonaws.services.s3.model.UploadPartResult;

Expand Down Expand Up @@ -60,7 +59,6 @@
import java.io.OutputStream;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
Expand Down Expand Up @@ -272,14 +270,13 @@ public void writeBlobAtomic(String blobName, BytesReference bytes, boolean failI
}

@Override
@SuppressWarnings("unchecked")
public DeleteResult delete() throws IOException {
final AtomicLong deletedBlobs = new AtomicLong();
final AtomicLong deletedBytes = new AtomicLong();
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
ObjectListing prevListing = null;
while (true) {
ObjectListing list;
final ObjectListing list;
if (prevListing != null) {
final var listNextBatchOfObjectsRequest = new ListNextBatchOfObjectsRequest(prevListing);
listNextBatchOfObjectsRequest.setRequestMetricCollector(blobStore.listMetricCollector);
Expand All @@ -291,26 +288,16 @@ public DeleteResult delete() throws IOException {
listObjectsRequest.setRequestMetricCollector(blobStore.listMetricCollector);
list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest));
}
final Iterator<S3ObjectSummary> objectSummaryIterator = list.getObjectSummaries().iterator();
final Iterator<String> blobNameIterator = new Iterator<>() {
@Override
public boolean hasNext() {
return objectSummaryIterator.hasNext();
}

@Override
public String next() {
final S3ObjectSummary summary = objectSummaryIterator.next();
deletedBlobs.incrementAndGet();
deletedBytes.addAndGet(summary.getSize());
return summary.getKey();
}
};
final Iterator<String> blobNameIterator = Iterators.map(list.getObjectSummaries().iterator(), summary -> {
deletedBlobs.incrementAndGet();
deletedBytes.addAndGet(summary.getSize());
return summary.getKey();
});
if (list.isTruncated()) {
doDeleteBlobs(blobNameIterator, false);
blobStore.deleteBlobsIgnoringIfNotExists(blobNameIterator);
prevListing = list;
} else {
doDeleteBlobs(Iterators.concat(blobNameIterator, Collections.singletonList(keyPath).iterator()), false);
blobStore.deleteBlobsIgnoringIfNotExists(Iterators.concat(blobNameIterator, Iterators.single(keyPath)));
break;
}
}
Expand All @@ -322,31 +309,7 @@ public String next() {

@Override
public void deleteBlobsIgnoringIfNotExists(Iterator<String> blobNames) throws IOException {
doDeleteBlobs(blobNames, true);
}

private void doDeleteBlobs(Iterator<String> blobNames, boolean relative) throws IOException {
if (blobNames.hasNext() == false) {
return;
}
final Iterator<String> outstanding;
if (relative) {
outstanding = new Iterator<>() {
@Override
public boolean hasNext() {
return blobNames.hasNext();
}

@Override
public String next() {
return buildKey(blobNames.next());
}
};
} else {
outstanding = blobNames;
}

blobStore.deleteBlobsIgnoringIfNotExists(outstanding);
blobStore.deleteBlobsIgnoringIfNotExists(Iterators.map(blobNames, this::buildKey));
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,10 @@ public BlobContainer blobContainer(BlobPath path) {

@Override
public void deleteBlobsIgnoringIfNotExists(Iterator<String> blobNames) throws IOException {
if (blobNames.hasNext() == false) {
return;
}

final List<String> partition = new ArrayList<>();
try (AmazonS3Reference clientReference = clientReference()) {
// S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import org.apache.lucene.search.KnnFloatVectorQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.VectorUtil;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.fielddata.FieldDataContext;
Expand Down Expand Up @@ -859,10 +860,7 @@ public Query createKnnQuery(byte[] queryVector, int numCands, Query filter, Floa
}

if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) {
float squaredMagnitude = 0.0f;
for (byte b : queryVector) {
squaredMagnitude += b * b;
}
float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector);
elementType.checkVectorMagnitude(similarity, elementType.errorByteElementsAppender(queryVector), squaredMagnitude);
}
Query knnQuery = new KnnByteVectorQuery(name(), queryVector, numCands, filter);
Expand Down Expand Up @@ -891,10 +889,7 @@ public Query createKnnQuery(float[] queryVector, int numCands, Query filter, Flo
elementType.checkVectorBounds(queryVector);

if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) {
float squaredMagnitude = 0.0f;
for (float e : queryVector) {
squaredMagnitude += e * e;
}
float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector);
elementType.checkVectorMagnitude(similarity, elementType.errorFloatElementsAppender(queryVector), squaredMagnitude);
}
Query knnQuery = switch (elementType) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
package org.elasticsearch.index.mapper.vectors;

import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.VectorUtil;
import org.elasticsearch.index.IndexVersion;

import java.nio.ByteBuffer;
Expand Down Expand Up @@ -46,12 +47,7 @@ public static float decodeMagnitude(IndexVersion indexVersion, BytesRef vectorBR
* Calculates vector magnitude
*/
private static float calculateMagnitude(float[] decodedVector) {
double magnitude = 0.0f;
for (int i = 0; i < decodedVector.length; i++) {
magnitude += decodedVector[i] * decodedVector[i];
}
magnitude = Math.sqrt(magnitude);
return (float) magnitude;
return (float) Math.sqrt(VectorUtil.dotProduct(decodedVector, decodedVector));
}

public static float getMagnitude(IndexVersion indexVersion, BytesRef vectorBR, float[] decodedVector) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

package org.elasticsearch.script.field.vectors;

import org.apache.lucene.util.VectorUtil;

import java.util.List;

/**
Expand Down Expand Up @@ -151,11 +153,7 @@ default double cosineSimilarity(Object queryVector) {
int size();

static float getMagnitude(byte[] vector) {
int mag = 0;
for (int elem : vector) {
mag += elem * elem;
}
return (float) Math.sqrt(mag);
return (float) Math.sqrt(VectorUtil.dotProduct(vector, vector));
}

static float getMagnitude(byte[] vector, int dims) {
Expand All @@ -170,11 +168,7 @@ static float getMagnitude(byte[] vector, int dims) {
}

static float getMagnitude(float[] vector) {
double mag = 0.0f;
for (float elem : vector) {
mag += elem * elem;
}
return (float) Math.sqrt(mag);
return (float) Math.sqrt(VectorUtil.dotProduct(vector, vector));
}

static float getMagnitude(List<Number> vector) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ final void add(QueryToFilterAdapter filter) throws IOException {
* Build the the adapter or {@code null} if the this isn't a valid rewrite.
*/
public final T build() throws IOException {
if (false == valid) {
if (false == valid || aggCtx.enableRewriteToFilterByFilter() == false) {
return null;
}
class AdapterBuild implements CheckedFunction<AggregatorFactories, FilterByFilterAggregator, IOException> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
Expand Down Expand Up @@ -315,19 +314,13 @@ public void onFailure(Exception e) {
}));
}
} else {
int openConnections = connectionManager.size();
if (openConnections == 0) {
assert false : "should not happen since onFailure should catch it and report with underlying cause";
finished.onFailure(getNoSeedNodeLeftException(Set.of()));
} else {
logger.debug(
"unable to open maximum number of connections [remote cluster: {}, opened: {}, maximum: {}]",
clusterAlias,
openConnections,
maxNumConnections
);
finished.onResponse(null);
}
logger.debug(
"unable to open maximum number of connections [remote cluster: {}, opened: {}, maximum: {}]",
clusterAlias,
connectionManager.size(),
maxNumConnections
);
finished.onResponse(null);
}
}

Expand Down
Loading

0 comments on commit 6694446

Please sign in to comment.