"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java" between
elasticsearch-6.8.2-src.tar.gz and elasticsearch-6.8.3-src.tar.gz

About: elasticsearch is a Distributed, RESTful, Search Engine built on top of Apache Lucene. Source package (GitHub).

BestBucketsDeferringCollector.java  (elasticsearch-6.8.2-src):BestBucketsDeferringCollector.java  (elasticsearch-6.8.3-src)
skipping to change at line 23 skipping to change at line 23
* software distributed under the License is distributed on an * software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the * KIND, either express or implied. See the License for the
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
package org.elasticsearch.search.aggregations.bucket; package org.elasticsearch.search.aggregations.bucket;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.CollectionTerminatedException;
import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight; import org.apache.lucene.search.Weight;
import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PackedInts;
import org.apache.lucene.util.packed.PackedLongValues; import org.apache.lucene.util.packed.PackedLongValues;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.LongHash; import org.elasticsearch.common.util.LongHash;
import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.Aggregator;
skipping to change at line 163 skipping to change at line 164
} }
this.selectedBuckets = hash; this.selectedBuckets = hash;
boolean needsScores = needsScores(); boolean needsScores = needsScores();
Weight weight = null; Weight weight = null;
if (needsScores) { if (needsScores) {
Query query = isGlobal ? new MatchAllDocsQuery() : searchContext.que ry(); Query query = isGlobal ? new MatchAllDocsQuery() : searchContext.que ry();
weight = searchContext.searcher().createNormalizedWeight(query, true ); weight = searchContext.searcher().createNormalizedWeight(query, true );
} }
for (Entry entry : entries) { for (Entry entry : entries) {
final LeafBucketCollector leafCollector = collector.getLeafCollector try {
(entry.context); final LeafBucketCollector leafCollector = collector.getLeafColle
DocIdSetIterator docIt = null; ctor(entry.context);
if (needsScores && entry.docDeltas.size() > 0) { DocIdSetIterator docIt = null;
Scorer scorer = weight.scorer(entry.context); if (needsScores && entry.docDeltas.size() > 0) {
// We don't need to check if the scorer is null Scorer scorer = weight.scorer(entry.context);
// since we are sure that there are documents to replay (entry.d // We don't need to check if the scorer is null
ocDeltas it not empty). // since we are sure that there are documents to replay (ent
docIt = scorer.iterator(); ry.docDeltas it not empty).
leafCollector.setScorer(scorer); docIt = scorer.iterator();
} leafCollector.setScorer(scorer);
final PackedLongValues.Iterator docDeltaIterator = entry.docDeltas.i }
terator(); final PackedLongValues.Iterator docDeltaIterator = entry.docDelt
final PackedLongValues.Iterator buckets = entry.buckets.iterator(); as.iterator();
int doc = 0; final PackedLongValues.Iterator buckets = entry.buckets.iterator
for (long i = 0, end = entry.docDeltas.size(); i < end; ++i) { ();
doc += docDeltaIterator.next(); int doc = 0;
final long bucket = buckets.next(); for (long i = 0, end = entry.docDeltas.size(); i < end; ++i) {
final long rebasedBucket = hash.find(bucket); doc += docDeltaIterator.next();
if (rebasedBucket != -1) { final long bucket = buckets.next();
if (needsScores) { final long rebasedBucket = hash.find(bucket);
if (docIt.docID() < doc) { if (rebasedBucket != -1) {
docIt.advance(doc); if (needsScores) {
if (docIt.docID() < doc) {
docIt.advance(doc);
}
// aggregations should only be replayed on matching
documents
assert docIt.docID() == doc;
} }
// aggregations should only be replayed on matching docu leafCollector.collect(doc, rebasedBucket);
ments
assert docIt.docID() == doc;
} }
leafCollector.collect(doc, rebasedBucket);
} }
} catch (CollectionTerminatedException e) {
// collection was terminated prematurely
// continue with the following leaf
} }
} }
collector.postCollection(); collector.postCollection();
} }
/** /**
* Wrap the provided aggregator so that it behaves (almost) as if it had * Wrap the provided aggregator so that it behaves (almost) as if it had
* been collected directly. * been collected directly.
*/ */
 End of changes. 5 change blocks. 
27 lines changed or deleted 34 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)