diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java
index af38246fd1536..48d9e8305e09d 100644
--- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java
+++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java
@@ -122,8 +122,7 @@ protected void prepareSubAggs(long[] ordsToCollect) throws IOException {
continue;
}
DocIdSetIterator childDocsIter = childDocsScorer.iterator();
-
- final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx);
+ final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(new AggregationExecutionContext(ctx, null, null));
final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx);
// Set the scorer, since we now replay only the child docIds
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java
index aa811b15d79b2..657633f774c74 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java
@@ -9,7 +9,6 @@
package org.elasticsearch.search.aggregations;
import org.apache.lucene.search.CollectionTerminatedException;
-import org.apache.lucene.search.Collector;
import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.MultiCollector;
import org.apache.lucene.search.Scorable;
@@ -127,7 +126,7 @@ private MultiBucketCollector(boolean terminateIfNoop, BucketCollector... collect
this.terminateIfNoop = terminateIfNoop;
this.collectors = collectors;
int numNeedsScores = 0;
- for (Collector collector : collectors) {
+ for (BucketCollector collector : collectors) {
if (collector.scoreMode().needsScores()) {
numNeedsScores += 1;
}
@@ -138,7 +137,7 @@ private MultiBucketCollector(boolean terminateIfNoop, BucketCollector... collect
@Override
public ScoreMode scoreMode() {
ScoreMode scoreMode = null;
- for (Collector collector : collectors) {
+ for (BucketCollector collector : collectors) {
if (scoreMode == null) {
scoreMode = collector.scoreMode();
} else if (scoreMode != collector.scoreMode()) {
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java
index 679bdcf5c0f7a..0c92cf6d38287 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java
@@ -8,7 +8,6 @@
package org.elasticsearch.search.aggregations.bucket;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.CollectionTerminatedException;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
@@ -42,12 +41,12 @@
*/
public class BestBucketsDeferringCollector extends DeferringBucketCollector {
static class Entry {
- final LeafReaderContext context;
+ final AggregationExecutionContext aggCtx;
final PackedLongValues docDeltas;
final PackedLongValues buckets;
- Entry(LeafReaderContext context, PackedLongValues docDeltas, PackedLongValues buckets) {
- this.context = Objects.requireNonNull(context);
+ Entry(AggregationExecutionContext aggCtx, PackedLongValues docDeltas, PackedLongValues buckets) {
+ this.aggCtx = Objects.requireNonNull(aggCtx);
this.docDeltas = Objects.requireNonNull(docDeltas);
this.buckets = Objects.requireNonNull(buckets);
}
@@ -59,7 +58,7 @@ static class Entry {
private List entries = new ArrayList<>();
private BucketCollector collector;
- private LeafReaderContext context;
+ private AggregationExecutionContext aggCtx;
private PackedLongValues.Builder docDeltasBuilder;
private PackedLongValues.Builder bucketsBuilder;
private LongHash selectedBuckets;
@@ -93,10 +92,10 @@ public void setDeferredCollector(Iterable deferredCollectors) {
* Button up the builders for the current leaf.
*/
private void finishLeaf() {
- if (context != null) {
+ if (aggCtx != null) {
assert docDeltasBuilder != null && bucketsBuilder != null;
assert docDeltasBuilder.size() > 0;
- entries.add(new Entry(context, docDeltasBuilder.build(), bucketsBuilder.build()));
+ entries.add(new Entry(aggCtx, docDeltasBuilder.build(), bucketsBuilder.build()));
clearLeaf();
}
}
@@ -105,22 +104,22 @@ private void finishLeaf() {
* Clear the status for the current leaf.
*/
private void clearLeaf() {
- context = null;
+ aggCtx = null;
docDeltasBuilder = null;
bucketsBuilder = null;
}
@Override
- public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx) throws IOException {
+ public LeafBucketCollector getLeafCollector(AggregationExecutionContext context) throws IOException {
finishLeaf();
return new LeafBucketCollector() {
int lastDoc = 0;
@Override
- public void collect(int doc, long bucket) throws IOException {
- if (context == null) {
- context = aggCtx.getLeafReaderContext();
+ public void collect(int doc, long bucket) {
+ if (aggCtx == null) {
+ aggCtx = context;
docDeltasBuilder = PackedLongValues.packedBuilder(PackedInts.DEFAULT);
bucketsBuilder = PackedLongValues.packedBuilder(PackedInts.DEFAULT);
}
@@ -169,10 +168,10 @@ public void prepareSelectedBuckets(long... selectedBuckets) throws IOException {
for (Entry entry : entries) {
assert entry.docDeltas.size() > 0 : "segment should have at least one document to replay, got 0";
try {
- final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context);
+ final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.aggCtx);
DocIdSetIterator scoreIt = null;
if (needsScores) {
- Scorer scorer = weight.scorer(entry.context);
+ Scorer scorer = weight.scorer(entry.aggCtx.getLeafReaderContext());
// We don't need to check if the scorer is null
// since we are sure that there are documents to replay (entry.docDeltas it not empty).
scoreIt = scorer.iterator();
@@ -266,7 +265,7 @@ public void rewriteBuckets(LongUnaryOperator howToRewrite) {
// Only create an entry if this segment has buckets after merging
if (newBuckets.size() > 0) {
assert newDocDeltas.size() > 0 : "docDeltas was empty but we had buckets";
- newEntries.add(new Entry(sourceEntry.context, newDocDeltas.build(), newBuckets.build()));
+ newEntries.add(new Entry(sourceEntry.aggCtx, newDocDeltas.build(), newBuckets.build()));
}
}
entries = newEntries;
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java
index 1861cbe7c9551..3eac58db2d0df 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java
@@ -46,13 +46,13 @@ public abstract class BucketsAggregator extends AggregatorBase {
public BucketsAggregator(
String name,
AggregatorFactories factories,
- AggregationContext context,
+ AggregationContext aggCtx,
Aggregator parent,
CardinalityUpperBound bucketCardinality,
Map metadata
) throws IOException {
- super(name, factories, context, parent, bucketCardinality, metadata);
- multiBucketConsumer = context.multiBucketConsumer();
+ super(name, factories, aggCtx, parent, bucketCardinality, metadata);
+ multiBucketConsumer = aggCtx.multiBucketConsumer();
docCounts = bigArrays().newLongArray(1, true);
docCountProvider = new DocCountProvider();
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java
index 82202363e56d4..dd91224f21819 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java
@@ -78,7 +78,7 @@ public final class CompositeAggregator extends BucketsAggregator implements Size
private final DateHistogramValuesSource[] innerSizedBucketAggregators;
private final List entries = new ArrayList<>();
- private LeafReaderContext currentLeaf;
+ private AggregationExecutionContext currentAggCtx;
private RoaringDocIdSet.Builder docIdSetBuilder;
private BucketCollector deferredCollectors;
@@ -87,14 +87,14 @@ public final class CompositeAggregator extends BucketsAggregator implements Size
CompositeAggregator(
String name,
AggregatorFactories factories,
- AggregationContext context,
+ AggregationContext aggCtx,
Aggregator parent,
Map metadata,
int size,
CompositeValuesSourceConfig[] sourceConfigs,
CompositeKey rawAfterKey
) throws IOException {
- super(name, factories, context, parent, CardinalityUpperBound.MANY, metadata);
+ super(name, factories, aggCtx, parent, CardinalityUpperBound.MANY, metadata);
this.size = size;
this.sourceNames = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::name).toList();
this.reverseMuls = Arrays.stream(sourceConfigs).mapToInt(CompositeValuesSourceConfig::reverseMul).toArray();
@@ -102,7 +102,7 @@ public final class CompositeAggregator extends BucketsAggregator implements Size
this.formats = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::format).toList();
this.sources = new SingleDimensionValuesSource>[sourceConfigs.length];
// check that the provided size is not greater than the search.max_buckets setting
- int bucketLimit = context.multiBucketConsumer().getLimit();
+ int bucketLimit = aggCtx.multiBucketConsumer().getLimit();
if (size > bucketLimit) {
throw new MultiBucketConsumerService.TooManyBucketsException(
"Trying to create too many buckets. Must be less than or equal"
@@ -120,8 +120,8 @@ public final class CompositeAggregator extends BucketsAggregator implements Size
List dateHistogramValuesSources = new ArrayList<>();
for (int i = 0; i < sourceConfigs.length; i++) {
this.sources[i] = sourceConfigs[i].createValuesSource(
- context.bigArrays(),
- context.searcher().getIndexReader(),
+ aggCtx.bigArrays(),
+ aggCtx.searcher().getIndexReader(),
size,
this::addRequestCircuitBreakerBytes
);
@@ -130,7 +130,7 @@ public final class CompositeAggregator extends BucketsAggregator implements Size
}
}
this.innerSizedBucketAggregators = dateHistogramValuesSources.toArray(new DateHistogramValuesSource[0]);
- this.queue = new CompositeValuesCollectorQueue(context.bigArrays(), sources, size);
+ this.queue = new CompositeValuesCollectorQueue(aggCtx.bigArrays(), sources, size);
if (rawAfterKey != null) {
try {
this.queue.setAfterKey(rawAfterKey);
@@ -230,10 +230,10 @@ public InternalAggregation buildEmptyAggregation() {
}
private void finishLeaf() {
- if (currentLeaf != null) {
+ if (currentAggCtx != null) {
DocIdSet docIdSet = docIdSetBuilder.build();
- entries.add(new Entry(currentLeaf, docIdSet));
- currentLeaf = null;
+ entries.add(new Entry(currentAggCtx, docIdSet));
+ currentAggCtx = null;
docIdSetBuilder = null;
}
}
@@ -454,7 +454,7 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt
// in the queue.
DocIdSet docIdSet = sortedDocsProducer.processLeaf(topLevelQuery(), queue, aggCtx.getLeafReaderContext(), fillDocIdSet);
if (fillDocIdSet) {
- entries.add(new Entry(aggCtx.getLeafReaderContext(), docIdSet));
+ entries.add(new Entry(aggCtx, docIdSet));
}
// We can bypass search entirely for this segment, the processing is done in the previous call.
// Throwing this exception will terminate the execution of the search for this root aggregation,
@@ -463,7 +463,7 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt
return LeafBucketCollector.NO_OP_COLLECTOR;
} else {
if (fillDocIdSet) {
- currentLeaf = aggCtx.getLeafReaderContext();
+ currentAggCtx = aggCtx;
docIdSetBuilder = new RoaringDocIdSet.Builder(aggCtx.getLeafReaderContext().reader().maxDoc());
}
if (rawAfterKey != null && sortPrefixLen > 0) {
@@ -538,11 +538,14 @@ private void runDeferredCollections() throws IOException {
if (docIdSetIterator == null) {
continue;
}
- final LeafBucketCollector subCollector = deferredCollectors.getLeafCollector(entry.context);
- final LeafBucketCollector collector = queue.getLeafCollector(entry.context, getSecondPassCollector(subCollector));
+ final LeafBucketCollector subCollector = deferredCollectors.getLeafCollector(entry.aggCtx);
+ final LeafBucketCollector collector = queue.getLeafCollector(
+ entry.aggCtx.getLeafReaderContext(),
+ getSecondPassCollector(subCollector)
+ );
DocIdSetIterator scorerIt = null;
if (needsScores) {
- Scorer scorer = weight.scorer(entry.context);
+ Scorer scorer = weight.scorer(entry.aggCtx.getLeafReaderContext());
if (scorer != null) {
scorerIt = scorer.iterator();
subCollector.setScorer(scorer);
@@ -605,11 +608,11 @@ public double bucketSize(Rounding.DateTimeUnit unit) {
}
private static class Entry {
- final LeafReaderContext context;
+ final AggregationExecutionContext aggCtx;
final DocIdSet docIdSet;
- Entry(LeafReaderContext context, DocIdSet docIdSet) {
- this.context = context;
+ Entry(AggregationExecutionContext aggCtx, DocIdSet docIdSet) {
+ this.aggCtx = aggCtx;
this.docIdSet = docIdSet;
}
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java
index 9bcf8de963031..eaa35d5fe9c1b 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java
@@ -47,7 +47,7 @@ public abstract static class AdapterBuilder {
private final String name;
private final List filters = new ArrayList<>();
private final boolean keyed;
- private final AggregationContext context;
+ private final AggregationContext aggCtx;
private final Aggregator parent;
private final CardinalityUpperBound cardinality;
private final Map metadata;
@@ -58,18 +58,18 @@ public AdapterBuilder(
String name,
boolean keyed,
String otherBucketKey,
- AggregationContext context,
+ AggregationContext aggCtx,
Aggregator parent,
CardinalityUpperBound cardinality,
Map metadata
) throws IOException {
this.name = name;
this.keyed = keyed;
- this.context = context;
+ this.aggCtx = aggCtx;
this.parent = parent;
this.cardinality = cardinality;
this.metadata = metadata;
- this.rewrittenTopLevelQuery = context.searcher().rewrite(context.query());
+ this.rewrittenTopLevelQuery = aggCtx.searcher().rewrite(aggCtx.query());
this.valid = parent == null && otherBucketKey == null;
}
@@ -93,7 +93,7 @@ public final void add(String key, Query query) throws IOException {
valid = false;
return;
}
- add(QueryToFilterAdapter.build(context.searcher(), key, query));
+ add(QueryToFilterAdapter.build(aggCtx.searcher(), key, query));
}
final void add(QueryToFilterAdapter filter) throws IOException {
@@ -120,7 +120,7 @@ final void add(QueryToFilterAdapter filter) throws IOException {
* fields are expensive to decode and the overhead of iterating per
* filter causes us to decode doc counts over and over again.
*/
- if (context.hasDocCountField()) {
+ if (aggCtx.hasDocCountField()) {
valid = false;
return;
}
@@ -140,7 +140,7 @@ class AdapterBuild implements CheckedFunction filters,
boolean keyed,
- AggregationContext context,
+ AggregationContext aggCtx,
Aggregator parent,
CardinalityUpperBound cardinality,
Map metadata
) throws IOException {
- super(name, factories, filters, keyed, null, context, parent, cardinality, metadata);
+ super(name, factories, filters, keyed, null, aggCtx, parent, cardinality, metadata);
}
/**
@@ -237,7 +237,7 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt
collectCount(aggCtx.getLeafReaderContext(), live);
} else {
segmentsCollected++;
- collectSubs(aggCtx.getLeafReaderContext(), live, sub);
+ collectSubs(aggCtx, live, sub);
}
return LeafBucketCollector.NO_OP_COLLECTOR;
}
@@ -273,7 +273,7 @@ private void collectCount(LeafReaderContext ctx, Bits live) throws IOException {
* less memory because there isn't a need to buffer a block of matches.
* And its a hell of a lot less code.
*/
- private void collectSubs(LeafReaderContext ctx, Bits live, LeafBucketCollector sub) throws IOException {
+ private void collectSubs(AggregationExecutionContext aggCtx, Bits live, LeafBucketCollector sub) throws IOException {
class MatchCollector implements LeafCollector {
LeafBucketCollector subCollector = sub;
int filterOrd;
@@ -287,11 +287,11 @@ public void collect(int docId) throws IOException {
public void setScorer(Scorable scorer) throws IOException {}
}
MatchCollector collector = new MatchCollector();
- filters().get(0).collect(ctx, collector, live);
+ filters().get(0).collect(aggCtx.getLeafReaderContext(), collector, live);
for (int filterOrd = 1; filterOrd < filters().size(); filterOrd++) {
- collector.subCollector = collectableSubAggregators.getLeafCollector(ctx);
+ collector.subCollector = collectableSubAggregators.getLeafCollector(aggCtx);
collector.filterOrd = filterOrd;
- filters().get(filterOrd).collect(ctx, collector, live);
+ filters().get(filterOrd).collect(aggCtx.getLeafReaderContext(), collector, live);
}
}
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java
index 10755ca2acd9f..00dbb470b4978 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java
@@ -170,12 +170,12 @@ protected FilterByFilterAggregator adapt(
List filters,
boolean keyed,
String otherBucketKey,
- AggregationContext context,
+ AggregationContext aggCtx,
Aggregator parent,
CardinalityUpperBound cardinality,
Map metadata
) throws IOException {
- super(name, factories, context, parent, cardinality.multiply(filters.size() + (otherBucketKey == null ? 0 : 1)), metadata);
+ super(name, factories, aggCtx, parent, cardinality.multiply(filters.size() + (otherBucketKey == null ? 0 : 1)), metadata);
this.filters = List.copyOf(filters);
this.keyed = keyed;
this.otherBucketKey = otherBucketKey;
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java
index a50db167bf06c..5bca7718c9e2a 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java
@@ -7,7 +7,6 @@
*/
package org.elasticsearch.search.aggregations.bucket.sampler;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.CollectionTerminatedException;
import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Scorable;
@@ -79,7 +78,7 @@ public void setDeferredCollector(Iterable deferredCollectors) {
@Override
public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx) throws IOException {
- perSegCollector = new PerSegmentCollects(aggCtx.getLeafReaderContext());
+ perSegCollector = new PerSegmentCollects(aggCtx);
entries.add(perSegCollector);
// Deferring collector
@@ -164,7 +163,7 @@ class PerParentBucketSamples {
private long parentBucket;
private int matchedDocs;
- PerParentBucketSamples(long parentBucket, Scorable scorer, LeafReaderContext readerContext) {
+ PerParentBucketSamples(long parentBucket, Scorable scorer, AggregationExecutionContext aggCtx) {
try {
this.parentBucket = parentBucket;
@@ -172,7 +171,7 @@ class PerParentBucketSamples {
circuitBreakerConsumer.accept((long) shardSize * getPriorityQueueSlotSize());
tdc = createTopDocsCollector(shardSize);
- currentLeafCollector = tdc.getLeafCollector(readerContext);
+ currentLeafCollector = tdc.getLeafCollector(aggCtx.getLeafReaderContext());
setScorer(scorer);
} catch (IOException e) {
throw new ElasticsearchException("IO error creating collector", e);
@@ -201,8 +200,8 @@ public void setScorer(Scorable scorer) throws IOException {
currentLeafCollector.setScorer(scorer);
}
- public void changeSegment(LeafReaderContext readerContext) throws IOException {
- currentLeafCollector = tdc.getLeafCollector(readerContext);
+ public void changeSegment(AggregationExecutionContext aggCtx) throws IOException {
+ currentLeafCollector = tdc.getLeafCollector(aggCtx.getLeafReaderContext());
}
public int getDocCount() {
@@ -211,24 +210,24 @@ public int getDocCount() {
}
class PerSegmentCollects extends Scorable {
- private LeafReaderContext readerContext;
+ private AggregationExecutionContext aggCtx;
int maxDocId = Integer.MIN_VALUE;
private float currentScore;
private int currentDocId = -1;
private Scorable currentScorer;
- PerSegmentCollects(LeafReaderContext readerContext) throws IOException {
+ PerSegmentCollects(AggregationExecutionContext aggCtx) throws IOException {
// The publisher behaviour for Reader/Scorer listeners triggers a
// call to this constructor with a null scorer so we can't call
// scorer.getWeight() and pass the Weight to our base class.
// However, passing null seems to have no adverse effects here...
- this.readerContext = readerContext;
+ this.aggCtx = aggCtx;
for (int i = 0; i < perBucketSamples.size(); i++) {
PerParentBucketSamples perBucketSample = perBucketSamples.get(i);
if (perBucketSample == null) {
continue;
}
- perBucketSample.changeSegment(readerContext);
+ perBucketSample.changeSegment(aggCtx);
}
}
@@ -245,7 +244,7 @@ public void setScorer(Scorable scorer) throws IOException {
public void replayRelatedMatches(List sd) throws IOException {
try {
- final LeafBucketCollector leafCollector = deferred.getLeafCollector(readerContext);
+ final LeafBucketCollector leafCollector = deferred.getLeafCollector(aggCtx);
leafCollector.setScorer(this);
currentScore = 0;
@@ -256,7 +255,7 @@ public void replayRelatedMatches(List sd) throws IOException {
for (ScoreDoc scoreDoc : sd) {
// Doc ids from TopDocCollector are root-level Reader so
// need rebasing
- int rebased = scoreDoc.doc - readerContext.docBase;
+ int rebased = scoreDoc.doc - aggCtx.getLeafReaderContext().docBase;
if ((rebased >= 0) && (rebased <= maxDocId)) {
currentScore = scoreDoc.score;
currentDocId = rebased;
@@ -285,7 +284,7 @@ public void collect(int docId, long parentBucket) throws IOException {
perBucketSamples = bigArrays.grow(perBucketSamples, parentBucket + 1);
PerParentBucketSamples sampler = perBucketSamples.get((int) parentBucket);
if (sampler == null) {
- sampler = new PerParentBucketSamples(parentBucket, currentScorer, readerContext);
+ sampler = new PerParentBucketSamples(parentBucket, currentScorer, aggCtx);
perBucketSamples.set((int) parentBucket, sampler);
}
sampler.collect(docId);
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/package-info.java b/server/src/main/java/org/elasticsearch/search/aggregations/package-info.java
index 071e1dc9bfdee..b64843e62ee53 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/package-info.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/package-info.java
@@ -33,7 +33,7 @@
* of the appropriate type, which in turn builds the
* {@link org.elasticsearch.search.aggregations.Aggregator} for that node. This
* collects the data from that shard, via
- * {@link org.elasticsearch.search.aggregations.Aggregator#getLeafCollector(org.apache.lucene.index.LeafReaderContext)}
+ * {@link org.elasticsearch.search.aggregations.Aggregator#getLeafCollector(AggregationExecutionContext)}
* more or less. These values are shipped back to the coordinating node, which
* performs the reduction on them (partial reductions in place on the data nodes
* are also possible).
diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java
index cfd48598132d8..9170afc1af03a 100644
--- a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java
+++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java
@@ -34,7 +34,9 @@
import org.elasticsearch.index.query.support.NestedScope;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptContext;
+import org.elasticsearch.search.aggregations.AggregationExecutionContext;
import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.BucketCollector;
import org.elasticsearch.search.aggregations.MultiBucketConsumerService.MultiBucketConsumer;
import org.elasticsearch.search.aggregations.bucket.filter.FilterByFilterAggregator;
import org.elasticsearch.search.internal.SubSearchContext;
@@ -289,7 +291,7 @@ public final AggregationUsageService getUsageService() {
* Return true if any of the aggregations in this context is a time-series aggregation that requires an in-sort order execution.
*
* A side-effect of such execution is that all leaves are walked simultaneously and therefore we can no longer rely on
- * {@link org.elasticsearch.search.aggregations.BucketCollector#getLeafCollector(LeafReaderContext)} to be called only after the
+ * {@link BucketCollector#getLeafCollector(AggregationExecutionContext)} to be called only after the
* previous leaf was fully collected.
*/
public abstract boolean isInSortOrderExecutionRequired();
diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java
index 8472e56bc568f..773466716dff0 100644
--- a/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java
+++ b/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java
@@ -213,7 +213,7 @@ public void testNotTerminated() throws IOException {
for (Map.Entry expectedCount : expectedCounts.entrySet()) {
shouldNoop &= expectedCount.getValue().intValue() <= expectedCount.getKey().getTotalHits();
}
- LeafBucketCollector collector = wrapped.getLeafCollector(ctx);
+ LeafBucketCollector collector = wrapped.getLeafCollector(new AggregationExecutionContext(ctx, null, null));
assertThat(collector.isNoop(), equalTo(shouldNoop));
if (false == collector.isNoop()) {
for (int docId = 0; docId < ctx.reader().numDocs(); docId++) {
diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java
index ba00ca545f376..86ac87363463b 100644
--- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java
+++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java
@@ -20,6 +20,7 @@
import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.AggregationExecutionContext;
import org.elasticsearch.search.aggregations.Aggregator.BucketComparator;
import org.elasticsearch.search.aggregations.AggregatorTestCase;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
@@ -109,7 +110,9 @@ public void testBucketComparator() throws IOException {
FilterAggregationBuilder builder = new FilterAggregationBuilder("test", new MatchAllQueryBuilder());
FilterAggregator agg = createAggregator(builder, indexSearcher, fieldType);
agg.preCollection();
- LeafBucketCollector collector = agg.getLeafCollector(indexReader.leaves().get(0));
+ LeafBucketCollector collector = agg.getLeafCollector(
+ new AggregationExecutionContext(indexReader.leaves().get(0), null, null)
+ );
collector.collect(0, 0);
collector.collect(0, 0);
collector.collect(0, 1);
diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java
index fda7b637d3b1f..ad80997a1d588 100644
--- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java
+++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java
@@ -62,6 +62,7 @@
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.aggregations.AggregationBuilder;
+import org.elasticsearch.search.aggregations.AggregationExecutionContext;
import org.elasticsearch.search.aggregations.AggregationReduceContext;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorTestCase;
@@ -1611,7 +1612,7 @@ protected List objectMappers() {
private Map collectAndGetFilterDebugInfo(IndexSearcher searcher, Aggregator aggregator) throws IOException {
aggregator.preCollection();
for (LeafReaderContext ctx : searcher.getIndexReader().leaves()) {
- LeafBucketCollector leafCollector = aggregator.getLeafCollector(ctx);
+ LeafBucketCollector leafCollector = aggregator.getLeafCollector(new AggregationExecutionContext(ctx, null, null));
assertTrue(leafCollector.isNoop());
}
Map debug = new HashMap<>();
diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregatorTests.java
index efa7e591464ea..77c8fd7e27c8e 100644
--- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregatorTests.java
+++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregatorTests.java
@@ -50,6 +50,7 @@
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.AggregationBuilder;
+import org.elasticsearch.search.aggregations.AggregationExecutionContext;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorTestCase;
import org.elasticsearch.search.aggregations.CardinalityUpperBound;
@@ -355,7 +356,9 @@ public void testTonsOfBucketsTriggersBreaker() throws IOException {
Aggregator aggregator = builder.build(context, null).create(null, CardinalityUpperBound.ONE);
aggregator.preCollection();
assertThat(indexReader.leaves(), hasSize(1));
- LeafBucketCollector leaf = aggregator.getLeafCollector(indexReader.leaves().get(0));
+ LeafBucketCollector leaf = aggregator.getLeafCollector(
+ new AggregationExecutionContext(indexReader.leaves().get(0), null, null)
+ );
/*
* Collect some number of buckets that we *know* fit in the