-
Notifications
You must be signed in to change notification settings - Fork 682
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
A new stat for bytes read off the disk #1702
Changes from 16 commits
fd60128
6d99c50
40e12f9
bb8fd0f
00d9e54
6a6bca4
3d77ee3
a950955
230159d
54590cc
14beae4
b1ef31f
342884d
4181e23
aae6b86
05c5ea1
6284a48
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -387,13 +387,23 @@ func (s *Scorch) Batch(batch *index.Batch) (err error) { | |
analysisResults := make([]index.Document, int(numUpdates)) | ||
var itemsDeQueued uint64 | ||
var totalAnalysisSize int | ||
analysisBytes := func(tokMap index.TokenFrequencies) (rv uint64) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we add a flag here to calculate this only when the flag is set? Feel like this is something we don't always necessarily need to do - hope none of this calculation shows up in CPU profiles? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not sure if this is enough, we've to record copies - stored values and doc values. Also term vectors. I don't see you doing this in your zap PR. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just to close the thread, these two changes are going to be included in upcoming PRs. |
||
for k := range tokMap { | ||
rv += uint64(len(k)) | ||
} | ||
return rv | ||
} | ||
for itemsDeQueued < numUpdates { | ||
result := <-resultChan | ||
resultSize := result.Size() | ||
atomic.AddUint64(&s.iStats.analysisBytesAdded, uint64(resultSize)) | ||
totalAnalysisSize += resultSize | ||
analysisResults[itemsDeQueued] = result | ||
itemsDeQueued++ | ||
result.VisitFields(func(f index.Field) { | ||
atomic.AddUint64(&s.stats.TotBytesIndexedAfterAnalysis, | ||
analysisBytes(f.AnalyzedTokenFrequencies())) | ||
}) | ||
} | ||
close(resultChan) | ||
defer atomic.AddUint64(&s.iStats.analysisBytesRemoved, uint64(totalAnalysisSize)) | ||
|
@@ -525,6 +535,10 @@ func (s *Scorch) Stats() json.Marshaler { | |
return &s.stats | ||
} | ||
|
||
func (s *Scorch) BytesReadQueryTime() uint64 { | ||
return s.stats.TotBytesReadAtQueryTime | ||
} | ||
|
||
func (s *Scorch) diskFileStats(rootSegmentPaths map[string]struct{}) (uint64, | ||
uint64, uint64) { | ||
var numFilesOnDisk, numBytesUsedDisk, numBytesOnDiskByRoot uint64 | ||
|
@@ -582,7 +596,9 @@ func (s *Scorch) StatsMap() map[string]interface{} { | |
m["index_time"] = m["TotIndexTime"] | ||
m["term_searchers_started"] = m["TotTermSearchersStarted"] | ||
m["term_searchers_finished"] = m["TotTermSearchersFinished"] | ||
m["num_bytes_read_at_query_time"] = m["TotBytesReadAtQueryTime"] | ||
m["num_plain_text_bytes_indexed"] = m["TotIndexedPlainTextBytes"] | ||
m["num_bytes_indexed_after_analysis"] = m["TotBytesIndexedAfterAnalysis"] | ||
m["num_items_introduced"] = m["TotIntroducedItems"] | ||
m["num_items_persisted"] = m["TotPersistedItems"] | ||
m["num_recs_to_persist"] = m["TotItemsToPersist"] | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -60,6 +60,8 @@ var reflectStaticSizeIndexSnapshot int | |
// in the kvConfig. | ||
var DefaultFieldTFRCacheThreshold uint64 = 10 | ||
|
||
type diskStatsReporter segment.DiskStatsReporter | ||
|
||
func init() { | ||
var is interface{} = IndexSnapshot{} | ||
reflectStaticSizeIndexSnapshot = int(reflect.TypeOf(is).Size()) | ||
|
@@ -146,10 +148,19 @@ func (i *IndexSnapshot) newIndexSnapshotFieldDict(field string, | |
results := make(chan *asynchSegmentResult) | ||
for index, segment := range i.segment { | ||
go func(index int, segment *SegmentSnapshot) { | ||
var prevBytesRead uint64 | ||
seg, ok := segment.segment.(diskStatsReporter) | ||
if ok { | ||
prevBytesRead = seg.BytesRead() | ||
} | ||
dict, err := segment.segment.Dictionary(field) | ||
if err != nil { | ||
results <- &asynchSegmentResult{err: err} | ||
} else { | ||
if ok { | ||
atomic.AddUint64(&i.parent.stats.TotBytesReadAtQueryTime, | ||
seg.BytesRead()-prevBytesRead) | ||
} | ||
if randomLookup { | ||
results <- &asynchSegmentResult{dict: dict} | ||
} else { | ||
|
@@ -424,6 +435,11 @@ func (i *IndexSnapshot) Document(id string) (rv index.Document, err error) { | |
segmentIndex, localDocNum := i.segmentIndexAndLocalDocNumFromGlobal(docNum) | ||
|
||
rvd := document.NewDocument(id) | ||
var prevBytesRead uint64 | ||
seg, ok := i.segment[segmentIndex].segment.(segment.DiskStatsReporter) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Rename ok to say diskStatsAvailable so in line 472 below we know why things are ok. |
||
if ok { | ||
prevBytesRead = seg.BytesRead() | ||
} | ||
err = i.segment[segmentIndex].VisitDocument(localDocNum, func(name string, typ byte, val []byte, pos []uint64) bool { | ||
if name == "_id" { | ||
return true | ||
|
@@ -453,7 +469,10 @@ func (i *IndexSnapshot) Document(id string) (rv index.Document, err error) { | |
if err != nil { | ||
return nil, err | ||
} | ||
|
||
if ok { | ||
delta := seg.BytesRead() - prevBytesRead | ||
atomic.AddUint64(&i.parent.stats.TotBytesReadAtQueryTime, delta) | ||
} | ||
return rvd, nil | ||
} | ||
|
||
|
@@ -505,18 +524,18 @@ func (i *IndexSnapshot) InternalID(id string) (rv index.IndexInternalID, err err | |
return next.ID, nil | ||
} | ||
|
||
func (i *IndexSnapshot) TermFieldReader(term []byte, field string, includeFreq, | ||
func (is *IndexSnapshot) TermFieldReader(term []byte, field string, includeFreq, | ||
includeNorm, includeTermVectors bool) (index.TermFieldReader, error) { | ||
rv := i.allocTermFieldReaderDicts(field) | ||
rv := is.allocTermFieldReaderDicts(field) | ||
|
||
rv.term = term | ||
rv.field = field | ||
rv.snapshot = i | ||
rv.snapshot = is | ||
if rv.postings == nil { | ||
rv.postings = make([]segment.PostingsList, len(i.segment)) | ||
rv.postings = make([]segment.PostingsList, len(is.segment)) | ||
} | ||
if rv.iterators == nil { | ||
rv.iterators = make([]segment.PostingsIterator, len(i.segment)) | ||
rv.iterators = make([]segment.PostingsIterator, len(is.segment)) | ||
} | ||
rv.segmentOffset = 0 | ||
rv.includeFreq = includeFreq | ||
|
@@ -526,25 +545,54 @@ func (i *IndexSnapshot) TermFieldReader(term []byte, field string, includeFreq, | |
rv.currID = rv.currID[:0] | ||
|
||
if rv.dicts == nil { | ||
rv.dicts = make([]segment.TermDictionary, len(i.segment)) | ||
for i, segment := range i.segment { | ||
rv.dicts = make([]segment.TermDictionary, len(is.segment)) | ||
for i, segment := range is.segment { | ||
var prevBytesRead uint64 | ||
segP, ok := segment.segment.(diskStatsReporter) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same here, rename ok to diskStatsAvailable. |
||
if ok { | ||
prevBytesRead = segP.BytesRead() | ||
} | ||
dict, err := segment.segment.Dictionary(field) | ||
if err != nil { | ||
return nil, err | ||
} | ||
if ok { | ||
atomic.AddUint64(&is.parent.stats.TotBytesReadAtQueryTime, segP.BytesRead()-prevBytesRead) | ||
} | ||
rv.dicts[i] = dict | ||
} | ||
} | ||
|
||
for i, segment := range i.segment { | ||
for i, segment := range is.segment { | ||
var prevBytesReadPL uint64 | ||
if postings, ok := rv.postings[i].(diskStatsReporter); ok { | ||
prevBytesReadPL = postings.BytesRead() | ||
} | ||
pl, err := rv.dicts[i].PostingsList(term, segment.deleted, rv.postings[i]) | ||
if err != nil { | ||
return nil, err | ||
} | ||
rv.postings[i] = pl | ||
|
||
var prevBytesReadItr uint64 | ||
if itr, ok := rv.iterators[i].(diskStatsReporter); ok { | ||
prevBytesReadItr = itr.BytesRead() | ||
} | ||
rv.iterators[i] = pl.Iterator(includeFreq, includeNorm, includeTermVectors, rv.iterators[i]) | ||
|
||
if postings, ok := pl.(diskStatsReporter); ok && | ||
prevBytesReadPL < postings.BytesRead() { | ||
atomic.AddUint64(&is.parent.stats.TotBytesReadAtQueryTime, | ||
postings.BytesRead()-prevBytesReadPL) | ||
} | ||
|
||
if itr, ok := rv.iterators[i].(diskStatsReporter); ok && | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. avoid repeated interface checks for the same vars.. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. there is a re-initialisation of the var rv.iterators[i], so i can't reuse the same variable over here, since the bytes read as part of the iterator initialisation won't be part of the old variable. |
||
prevBytesReadItr < itr.BytesRead() { | ||
atomic.AddUint64(&is.parent.stats.TotBytesReadAtQueryTime, | ||
itr.BytesRead()-prevBytesReadItr) | ||
} | ||
} | ||
atomic.AddUint64(&i.parent.stats.TotTermSearchersStarted, uint64(1)) | ||
atomic.AddUint64(&is.parent.stats.TotTermSearchersStarted, uint64(1)) | ||
return rv, nil | ||
} | ||
|
||
|
@@ -661,10 +709,18 @@ func (i *IndexSnapshot) documentVisitFieldTermsOnSegment( | |
} | ||
|
||
if ssvOk && ssv != nil && len(vFields) > 0 { | ||
var prevBytesRead uint64 | ||
ssvp, ok := ssv.(segment.DiskStatsReporter) | ||
if ok { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ditto |
||
prevBytesRead = ssvp.BytesRead() | ||
} | ||
dvs, err = ssv.VisitDocValues(localDocNum, fields, visitor, dvs) | ||
if err != nil { | ||
return nil, nil, err | ||
} | ||
if ok { | ||
atomic.AddUint64(&i.parent.stats.TotBytesReadAtQueryTime, ssvp.BytesRead()-prevBytesRead) | ||
} | ||
} | ||
|
||
if errCh != nil { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -76,6 +76,11 @@ func (i *IndexSnapshotTermFieldReader) Next(preAlloced *index.TermFieldDoc) (*in | |
} | ||
// find the next hit | ||
for i.segmentOffset < len(i.iterators) { | ||
prevBytesRead := uint64(0) | ||
itr, ok := i.iterators[i.segmentOffset].(segment.DiskStatsReporter) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ditto |
||
if ok { | ||
prevBytesRead = itr.BytesRead() | ||
} | ||
next, err := i.iterators[i.segmentOffset].Next() | ||
if err != nil { | ||
return nil, err | ||
|
@@ -89,6 +94,15 @@ func (i *IndexSnapshotTermFieldReader) Next(preAlloced *index.TermFieldDoc) (*in | |
|
||
i.currID = rv.ID | ||
i.currPosting = next | ||
// postingsIterators is maintain the bytesRead stat in a cumulative fashion. | ||
// this is because there are chances of having a series of loadChunk calls, | ||
// and they have to be added together before sending the bytesRead at this point | ||
// upstream. | ||
if ok { | ||
delta := itr.BytesRead() - prevBytesRead | ||
atomic.AddUint64(&i.snapshot.parent.stats.TotBytesReadAtQueryTime, uint64(delta)) | ||
} | ||
|
||
return rv, nil | ||
} | ||
i.segmentOffset++ | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Per my latest conversation with Jon, we shouldn't be accounting for content written to disk by the merger. More conversation needed here I suppose.
https://docs.google.com/document/d/1pmrasugnCIAXUNgR14h_tix_ZxU3V28unEQYhnz1u34/edit#heading=h.w7mwv55b14ou
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
That is the case here as well, we are only accounting for the BytesRead from disk for the new merge created segment. And this would anyway be needed/accounted for the next query on this newly formed segment.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We need to carry further the stats from the older segments that got merged into the newly formed segment so that the accounting of stats is cumulative. And hence the need for the SetBytesRead() api.
If we fail to consider the stats from the pre-merge segments, then the bytes accounting go wrong.
Have posted alternative thought in zapx PR.