"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "core/src/java/org/apache/lucene/index/IndexWriter.java" between
lucene-7.6.0-src.tgz and lucene-7.7.0-src.tgz

About: Lucene is a Java full-text search engine (not a complete application, but rather a code library and API; java source code).

IndexWriter.java  (lucene-7.6.0-src.tgz):IndexWriter.java  (lucene-7.7.0-src.tgz)
skipping to change at line 71 skipping to change at line 71
import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.LockValidatingDirectoryWrapper; import org.apache.lucene.store.LockValidatingDirectoryWrapper;
import org.apache.lucene.store.MMapDirectory; import org.apache.lucene.store.MMapDirectory;
import org.apache.lucene.store.MergeInfo; import org.apache.lucene.store.MergeInfo;
import org.apache.lucene.store.TrackingDirectoryWrapper; import org.apache.lucene.store.TrackingDirectoryWrapper;
import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Constants; import org.apache.lucene.util.Constants;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.InfoStream;
import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.ThreadInterruptedException; import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.UnicodeUtil; import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util.Version; import org.apache.lucene.util.Version;
import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
/** /**
skipping to change at line 784 skipping to change at line 785
throw new IllegalArgumentException("cannot use IndexWriterConfig.set IndexCommit() with OpenMode.CREATE"); throw new IllegalArgumentException("cannot use IndexWriterConfig.set IndexCommit() with OpenMode.CREATE");
} else { } else {
throw new IllegalArgumentException("cannot use IndexWriterConfig.set IndexCommit() when index has no commit"); throw new IllegalArgumentException("cannot use IndexWriterConfig.set IndexCommit() when index has no commit");
} }
} }
// Try to read first. This is to allow create // Try to read first. This is to allow create
// against an index that's currently open for // against an index that's currently open for
// searching. In this case we write the next // searching. In this case we write the next
// segments_N file with no segments: // segments_N file with no segments:
final SegmentInfos sis = new SegmentInfos(Version.LATEST.major); final SegmentInfos sis = new SegmentInfos(config.getIndexCreatedVersionM ajor());
if (indexExists) { if (indexExists) {
final SegmentInfos previous = SegmentInfos.readLatestCommit(directory) ; final SegmentInfos previous = SegmentInfos.readLatestCommit(directory) ;
sis.updateGenerationVersionAndCounter(previous); sis.updateGenerationVersionAndCounter(previous);
} }
segmentInfos = sis; segmentInfos = sis;
rollbackSegments = segmentInfos.createBackupSegmentInfos(); rollbackSegments = segmentInfos.createBackupSegmentInfos();
// Record that we have a change (zero out all // Record that we have a change (zero out all
// segments) pending: // segments) pending:
changed(); changed();
skipping to change at line 1126 skipping to change at line 1127
/** Returns the analyzer used by this index. */ /** Returns the analyzer used by this index. */
public Analyzer getAnalyzer() { public Analyzer getAnalyzer() {
ensureOpen(); ensureOpen();
return analyzer; return analyzer;
} }
/** Returns total number of docs in this index, including /** Returns total number of docs in this index, including
* docs not yet flushed (still in the RAM buffer), * docs not yet flushed (still in the RAM buffer),
* not counting deletions. * not counting deletions.
* @see #numDocs */ * @see #numDocs
* @deprecated use {@link #getDocStats()} instead
* */
@Deprecated
public synchronized int maxDoc() { public synchronized int maxDoc() {
ensureOpen(); ensureOpen();
return docWriter.getNumDocs() + segmentInfos.totalMaxDoc(); return docWriter.getNumDocs() + segmentInfos.totalMaxDoc();
} }
/** If {@link SegmentInfos#getVersion} is below {@code newVersion} then update it to this value. /** If {@link SegmentInfos#getVersion} is below {@code newVersion} then update it to this value.
* *
* @lucene.internal */ * @lucene.internal */
public synchronized void advanceSegmentInfosVersion(long newVersion) { public synchronized void advanceSegmentInfosVersion(long newVersion) {
ensureOpen(); ensureOpen();
skipping to change at line 1148 skipping to change at line 1152
segmentInfos.setVersion(newVersion); segmentInfos.setVersion(newVersion);
} }
changed(); changed();
} }
/** Returns total number of docs in this index, including /** Returns total number of docs in this index, including
* docs not yet flushed (still in the RAM buffer), and * docs not yet flushed (still in the RAM buffer), and
* including deletions. <b>NOTE:</b> buffered deletions * including deletions. <b>NOTE:</b> buffered deletions
* are not counted. If you really need these to be * are not counted. If you really need these to be
* counted you should call {@link #commit()} first. * counted you should call {@link #commit()} first.
* @see #numDocs */ * @see #maxDoc
* @deprecated use {@link #getDocStats()} instead
* */
@Deprecated
public synchronized int numDocs() { public synchronized int numDocs() {
ensureOpen(); ensureOpen();
int count = docWriter.getNumDocs(); int count = docWriter.getNumDocs();
for (final SegmentCommitInfo info : segmentInfos) { for (final SegmentCommitInfo info : segmentInfos) {
count += info.info.maxDoc() - numDeletedDocs(info); count += info.info.maxDoc() - numDeletedDocs(info);
} }
return count; return count;
} }
/** /**
skipping to change at line 2597 skipping to change at line 2604
changeCount.incrementAndGet(); changeCount.incrementAndGet();
segmentInfos.changed(); segmentInfos.changed();
} }
synchronized long publishFrozenUpdates(FrozenBufferedUpdates packet) { synchronized long publishFrozenUpdates(FrozenBufferedUpdates packet) {
assert packet != null && packet.any(); assert packet != null && packet.any();
long nextGen = bufferedUpdatesStream.push(packet); long nextGen = bufferedUpdatesStream.push(packet);
// Do this as an event so it applies higher in the stack when we are not hol ding DocumentsWriterFlushQueue.purgeLock: // Do this as an event so it applies higher in the stack when we are not hol ding DocumentsWriterFlushQueue.purgeLock:
eventQueue.add(w -> { eventQueue.add(w -> {
try { try {
packet.apply(w); // we call tryApply here since we don't want to block if a refresh or a
flush is already applying the
// packet. The flush will retry this packet anyway to ensure all of them
are applied
packet.tryApply(w);
} catch (Throwable t) { } catch (Throwable t) {
try { try {
w.onTragicEvent(t, "applyUpdatesPacket"); w.onTragicEvent(t, "applyUpdatesPacket");
} catch (Throwable t1) { } catch (Throwable t1) {
t.addSuppressed(t1); t.addSuppressed(t1);
} }
throw t; throw t;
} }
w.flushDeletesCount.incrementAndGet(); w.flushDeletesCount.incrementAndGet();
}); });
skipping to change at line 4341 skipping to change at line 4350
release(rld); release(rld);
if (drop) { if (drop) {
readerPool.drop(rld.info); readerPool.drop(rld.info);
} }
}); });
} finally { } finally {
Collections.fill(merge.readers, null); Collections.fill(merge.readers, null);
} }
} }
private void countSoftDeletes(CodecReader reader, Bits wrappedLiveDocs, Bits h
ardLiveDocs, Counter softDeleteCounter,
Counter hardDeleteCounter) throws IOException {
int hardDeleteCount = 0;
int softDeletesCount = 0;
DocIdSetIterator softDeletedDocs = DocValuesFieldExistsQuery.getDocValuesDoc
IdSetIterator(config.getSoftDeletesField(), reader);
if (softDeletedDocs != null) {
int docId;
while ((docId = softDeletedDocs.nextDoc()) != DocIdSetIterator.NO_MORE_DOC
S) {
if (wrappedLiveDocs == null || wrappedLiveDocs.get(docId)) {
if (hardLiveDocs == null || hardLiveDocs.get(docId)) {
softDeletesCount++;
} else {
hardDeleteCount++;
}
}
}
}
softDeleteCounter.addAndGet(softDeletesCount);
hardDeleteCounter.addAndGet(hardDeleteCount);
}
private boolean assertSoftDeletesCount(CodecReader reader, int expectedCount)
throws IOException {
Counter count = Counter.newCounter(false);
Counter hardDeletes = Counter.newCounter(false);
countSoftDeletes(reader, reader.getLiveDocs(), null, count, hardDeletes);
assert count.get() == expectedCount : "soft-deletes count mismatch expected:
"
+ expectedCount + " but actual: " + count.get() ;
return true;
}
/** Does the actual (time-consuming) work of the merge, /** Does the actual (time-consuming) work of the merge,
* but without holding synchronized lock on IndexWriter * but without holding synchronized lock on IndexWriter
* instance */ * instance */
private int mergeMiddle(MergePolicy.OneMerge merge, MergePolicy mergePolicy) t hrows IOException { private int mergeMiddle(MergePolicy.OneMerge merge, MergePolicy mergePolicy) t hrows IOException {
merge.checkAborted(); merge.checkAborted();
Directory mergeDirectory = config.getMergeScheduler().wrapForMerge(merge, di rectory); Directory mergeDirectory = config.getMergeScheduler().wrapForMerge(merge, di rectory);
List<SegmentCommitInfo> sourceSegments = merge.segments; List<SegmentCommitInfo> sourceSegments = merge.segments;
IOContext context = new IOContext(merge.getStoreMergeInfo()); IOContext context = new IOContext(merge.getStoreMergeInfo());
skipping to change at line 4389 skipping to change at line 4428
infoStream.message("IW", "seg=" + segString(info) + " reader=" + reade r); infoStream.message("IW", "seg=" + segString(info) + " reader=" + reade r);
} }
merge.hardLiveDocs.add(mr.hardLiveDocs); merge.hardLiveDocs.add(mr.hardLiveDocs);
merge.readers.add(reader); merge.readers.add(reader);
segUpto++; segUpto++;
} }
// Let the merge wrap readers // Let the merge wrap readers
List<CodecReader> mergeReaders = new ArrayList<>(); List<CodecReader> mergeReaders = new ArrayList<>();
int softDeleteCount = 0; Counter softDeleteCount = Counter.newCounter(false);
for (int r = 0; r < merge.readers.size(); r++) { for (int r = 0; r < merge.readers.size(); r++) {
SegmentReader reader = merge.readers.get(r); SegmentReader reader = merge.readers.get(r);
CodecReader wrappedReader = merge.wrapForMerge(reader); CodecReader wrappedReader = merge.wrapForMerge(reader);
validateMergeReader(wrappedReader); validateMergeReader(wrappedReader);
if (softDeletesEnabled) { if (softDeletesEnabled) {
if (reader != wrappedReader) { // if we don't have a wrapped reader we won't preserve any soft-deletes if (reader != wrappedReader) { // if we don't have a wrapped reader we won't preserve any soft-deletes
Bits hardLiveDocs = merge.hardLiveDocs.get(r); Bits hardLiveDocs = merge.hardLiveDocs.get(r);
Bits wrappedLiveDocs = wrappedReader.getLiveDocs(); if (hardLiveDocs != null) { // we only need to do this accounting if
int hardDeleteCount = 0; we have mixed deletes
DocIdSetIterator softDeletedDocs = DocValuesFieldExistsQuery.getDocV Bits wrappedLiveDocs = wrappedReader.getLiveDocs();
aluesDocIdSetIterator(config.getSoftDeletesField(), wrappedReader); Counter hardDeleteCounter = Counter.newCounter(false);
if (softDeletedDocs != null) { countSoftDeletes(wrappedReader, wrappedLiveDocs, hardLiveDocs, sof
int docId; tDeleteCount, hardDeleteCounter);
while ((docId = softDeletedDocs.nextDoc()) != DocIdSetIterator.NO_ int hardDeleteCount = Math.toIntExact(hardDeleteCounter.get());
MORE_DOCS) { // Wrap the wrapped reader again if we have excluded some hard-del
if (wrappedLiveDocs == null || wrappedLiveDocs.get(docId)) { eted docs
if (hardLiveDocs == null || hardLiveDocs.get(docId)) { if (hardDeleteCount > 0) {
softDeleteCount++; Bits liveDocs = wrappedLiveDocs == null ? hardLiveDocs : new Bit
} else { s() {
hardDeleteCount++; @Override
public boolean get(int index) {
return hardLiveDocs.get(index) && wrappedLiveDocs.get(index)
;
} }
}
@Override
public int length() {
return hardLiveDocs.length();
}
};
wrappedReader = FilterCodecReader.wrapLiveDocs(wrappedReader, li
veDocs, wrappedReader.numDocs() - hardDeleteCount);
} }
} } else {
// Wrap the wrapped reader again if we have excluded some hard-delet final int carryOverSoftDeletes = reader.getSegmentInfo().getSoftDe
ed docs lCount() - wrappedReader.numDeletedDocs();
if (hardLiveDocs != null && hardDeleteCount > 0) { assert carryOverSoftDeletes >= 0 : "carry-over soft-deletes must b
Bits liveDocs = wrappedLiveDocs == null ? hardLiveDocs : new Bits( e positive";
) { assert assertSoftDeletesCount(wrappedReader, carryOverSoftDeletes)
@Override ;
public boolean get(int index) { softDeleteCount.addAndGet(carryOverSoftDeletes);
return hardLiveDocs.get(index) && wrappedLiveDocs.get(index);
}
@Override
public int length() {
return hardLiveDocs.length();
}
};
wrappedReader = FilterCodecReader.wrapLiveDocs(wrappedReader, live
Docs, wrappedReader.numDocs() - hardDeleteCount);
} }
} }
} }
mergeReaders.add(wrappedReader); mergeReaders.add(wrappedReader);
} }
final SegmentMerger merger = new SegmentMerger(mergeReaders, final SegmentMerger merger = new SegmentMerger(mergeReaders,
merge.info.info, infoStream , dirWrapper, merge.info.info, infoStream , dirWrapper,
globalFieldNumberMap, globalFieldNumberMap,
context); context);
merge.info.setSoftDelCount(softDeleteCount); merge.info.setSoftDelCount(Math.toIntExact(softDeleteCount.get()));
merge.checkAborted(); merge.checkAborted();
merge.mergeStartNS = System.nanoTime(); merge.mergeStartNS = System.nanoTime();
// This is where all the work happens: // This is where all the work happens:
if (merger.shouldMerge()) { if (merger.shouldMerge()) {
merger.merge(); merger.merge();
} }
MergeState mergeState = merger.mergeState; MergeState mergeState = merger.mergeState;
skipping to change at line 5250 skipping to change at line 5286
/** Returns an unmodifiable view of the list of all segments of the current se gmentInfos */ /** Returns an unmodifiable view of the list of all segments of the current se gmentInfos */
final synchronized List<SegmentCommitInfo> listOfSegmentCommitInfos() { final synchronized List<SegmentCommitInfo> listOfSegmentCommitInfos() {
return segmentInfos.asList(); return segmentInfos.asList();
} }
/** Tests should use this method to snapshot the current segmentInfos to have a consistent view */ /** Tests should use this method to snapshot the current segmentInfos to have a consistent view */
final synchronized SegmentInfos cloneSegmentInfos() { final synchronized SegmentInfos cloneSegmentInfos() {
return segmentInfos.clone(); return segmentInfos.clone();
} }
/**
* Returns accurate {@link DocStats} form this writer. This is equivalent to c
alling {@link #numDocs()} and {@link #maxDoc()}
* but is not subject to race-conditions. The numDoc for instance can change a
fter maxDoc is fetched that causes numDocs to be
* greater than maxDoc which makes it hard to get accurate document stats from
IndexWriter.
*/
public synchronized DocStats getDocStats() {
ensureOpen();
int numDocs = docWriter.getNumDocs();
int maxDoc = numDocs;
for (final SegmentCommitInfo info : segmentInfos) {
maxDoc += info.info.maxDoc();
numDocs += info.info.maxDoc() - numDeletedDocs(info);
}
assert maxDoc >= numDocs : "maxDoc is less than numDocs: " + maxDoc + " < "
+ numDocs;
return new DocStats(maxDoc, numDocs);
}
/**
* DocStats for this index
*/
public static final class DocStats {
/**
* The total number of docs in this index, including
* docs not yet flushed (still in the RAM buffer),
* not counting deletions.
*/
public final int maxDoc;
/**
* The total number of docs in this index, including
* docs not yet flushed (still in the RAM buffer), and
* including deletions. <b>NOTE:</b> buffered deletions
* are not counted. If you really need these to be
* counted you should call {@link IndexWriter#commit()} first.
*/
public final int numDocs;
private DocStats(int maxDoc, int numDocs) {
this.maxDoc = maxDoc;
this.numDocs = numDocs;
}
}
} }
 End of changes. 12 change blocks. 
37 lines changed or deleted 130 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)