Documentation
¶
Index ¶
- Constants
- Variables
- func NewScorch(storeName string, config map[string]interface{}, ...) (index.Index, error)
- func RegisterSegmentPlugin(plugin SegmentPlugin, makeDefault bool)
- func ResetSegmentPlugins()
- func Rollback(path string, to *RollbackPoint) error
- func SupportedSegmentTypeVersions(typ string) (rv []uint32)
- func SupportedSegmentTypes() (rv []string)
- type AsyncPanicError
- type Builder
- type DocValueReader
- type Event
- type EventKind
- type IndexSnapshot
- func (i *IndexSnapshot) AddRef()
- func (i *IndexSnapshot) Close() error
- func (is *IndexSnapshot) CopyTo(d index.Directory) error
- func (i *IndexSnapshot) DecRef() (err error)
- func (is *IndexSnapshot) DocCount() (uint64, error)
- func (is *IndexSnapshot) DocIDReaderAll() (index.DocIDReader, error)
- func (is *IndexSnapshot) DocIDReaderOnly(ids []string) (index.DocIDReader, error)
- func (is *IndexSnapshot) DocValueReader(fields []string) (index.DocValueReader, error)
- func (is *IndexSnapshot) Document(id string) (rv index.Document, err error)
- func (is *IndexSnapshot) DumpAll() chan interface{}
- func (is *IndexSnapshot) DumpDoc(id string) chan interface{}
- func (is *IndexSnapshot) DumpFields() chan interface{}
- func (is *IndexSnapshot) ExternalID(id index.IndexInternalID) (string, error)
- func (is *IndexSnapshot) FieldDict(field string) (index.FieldDict, error)
- func (is *IndexSnapshot) FieldDictContains(field string) (index.FieldDictContains, error)
- func (is *IndexSnapshot) FieldDictFuzzy(field string, term string, fuzziness int, prefix string) (index.FieldDict, error)
- func (is *IndexSnapshot) FieldDictPrefix(field string, termPrefix []byte) (index.FieldDict, error)
- func (is *IndexSnapshot) FieldDictRange(field string, startTerm []byte, endTerm []byte) (index.FieldDict, error)
- func (is *IndexSnapshot) FieldDictRegexp(field string, termRegex string) (index.FieldDict, error)
- func (is *IndexSnapshot) Fields() ([]string, error)
- func (is *IndexSnapshot) GetInternal(key []byte) ([]byte, error)
- func (is *IndexSnapshot) GetSpatialAnalyzerPlugin(typ string) (index.SpatialAnalyzerPlugin, error)
- func (i *IndexSnapshot) Internal() map[string][]byte
- func (is *IndexSnapshot) InternalID(id string) (rv index.IndexInternalID, err error)
- func (i *IndexSnapshot) Segments() []*SegmentSnapshot
- func (i *IndexSnapshot) Size() int
- func (is *IndexSnapshot) TermFieldReader(ctx context.Context, term []byte, field string, ...) (index.TermFieldReader, error)
- func (is *IndexSnapshot) UpdateIOStats(val uint64)
- type IndexSnapshotDocIDReader
- type IndexSnapshotFieldDict
- func (i *IndexSnapshotFieldDict) BytesRead() uint64
- func (i *IndexSnapshotFieldDict) Close() error
- func (i *IndexSnapshotFieldDict) Contains(key []byte) (bool, error)
- func (i *IndexSnapshotFieldDict) Len() int
- func (i *IndexSnapshotFieldDict) Less(a, b int) bool
- func (i *IndexSnapshotFieldDict) Next() (*index.DictEntry, error)
- func (i *IndexSnapshotFieldDict) Pop() interface{}
- func (i *IndexSnapshotFieldDict) Push(x interface{})
- func (i *IndexSnapshotFieldDict) Swap(a, b int)
- type IndexSnapshotTermFieldReader
- func (i *IndexSnapshotTermFieldReader) Advance(ID index.IndexInternalID, preAlloced *index.TermFieldDoc) (*index.TermFieldDoc, error)
- func (i *IndexSnapshotTermFieldReader) Close() error
- func (i *IndexSnapshotTermFieldReader) Count() uint64
- func (i *IndexSnapshotTermFieldReader) Next(preAlloced *index.TermFieldDoc) (*index.TermFieldDoc, error)
- func (s *IndexSnapshotTermFieldReader) Optimize(kind string, octx index.OptimizableContext) (index.OptimizableContext, error)
- func (i *IndexSnapshotTermFieldReader) Size() int
- type OptimizeTFRConjunction
- type OptimizeTFRConjunctionUnadorned
- type OptimizeTFRDisjunctionUnadorned
- type RollbackPoint
- type Scorch
- func (s *Scorch) AddEligibleForRemoval(epoch uint64)
- func (s *Scorch) Analyze(d index.Document)
- func (s *Scorch) Batch(batch *index.Batch) (err error)
- func (s *Scorch) BytesReadQueryTime() uint64
- func (s *Scorch) Close() (err error)
- func (s *Scorch) Delete(id string) error
- func (s *Scorch) DeleteInternal(key []byte) error
- func (s *Scorch) ForceMerge(ctx context.Context, mo *mergeplan.MergePlanOptions) error
- func (s *Scorch) LoadSnapshot(epoch uint64) (rv *IndexSnapshot, err error)
- func (s *Scorch) MemoryUsed() (memUsed uint64)
- func (s *Scorch) NumEventsBlocking() uint64
- func (s *Scorch) Open() error
- func (s *Scorch) Reader() (index.IndexReader, error)
- func (s *Scorch) ReportBytesWritten(bytesWritten uint64)
- func (s *Scorch) RootBoltSnapshotEpochs() ([]uint64, error)
- func (s *Scorch) SetInternal(key, val []byte) error
- func (s *Scorch) Stats() json.Marshaler
- func (s *Scorch) StatsMap() map[string]interface{}
- func (s *Scorch) Update(doc index.Document) error
- type SegmentPlugin
- type SegmentSnapshot
- func (s *SegmentSnapshot) Close() error
- func (s *SegmentSnapshot) Count() uint64
- func (s *SegmentSnapshot) Deleted() *roaring.Bitmap
- func (s *SegmentSnapshot) DocID(num uint64) ([]byte, error)
- func (s *SegmentSnapshot) DocNumbers(docIDs []string) (*roaring.Bitmap, error)
- func (s *SegmentSnapshot) DocNumbersLive() *roaring.Bitmap
- func (s *SegmentSnapshot) Fields() []string
- func (s *SegmentSnapshot) FullSize() int64
- func (s *SegmentSnapshot) Id() uint64
- func (s *SegmentSnapshot) LiveSize() int64
- func (s *SegmentSnapshot) Segment() segment.Segment
- func (s *SegmentSnapshot) Size() (rv int)
- func (s *SegmentSnapshot) VisitDocument(num uint64, visitor segment.StoredFieldValueVisitor) error
- type Stats
- type UnadornedPosting
Constants ¶
const DefaultBuilderBatchSize = 1000
const DefaultBuilderMergeMax = 10
const Name = "scorch"
const Version uint8 = 2
Variables ¶
var DefaultFieldTFRCacheThreshold uint64 = 10
DefaultFieldTFRCacheThreshold limits the number of TermFieldReaders(TFR) for a field in an index snapshot. Without this limit, when recycling TFRs, it is possible that a very large number of TFRs may be added to the recycle cache, which could eventually lead to significant memory consumption. This threshold can be overwritten by users at the library level by changing the exported variable, or at the index level by setting the FieldTFRCacheThreshold in the kvConfig.
var DefaultMemoryPressurePauseThreshold uint64 = math.MaxUint64
var DefaultMinSegmentsForInMemoryMerge = 2
DefaultMinSegmentsForInMemoryMerge represents the default number of in-memory zap segments that persistSnapshotMaybeMerge() needs to see in an IndexSnapshot before it decides to merge and persist those segments
var DefaultPersisterNapTimeMSec int = 0 // ms
DefaultPersisterNapTimeMSec is kept to zero as this helps in direct persistence of segments with the default safe batch option. If the default safe batch option results in high number of files on disk, then users may initialise this configuration parameter with higher values so that the persister will nap a bit within it's work loop to favour better in-memory merging of segments to result in fewer segment files on disk. But that may come with an indexing performance overhead. Unsafe batch users are advised to override this to higher value for better performance especially with high data density.
var DefaultPersisterNapUnderNumFiles int = 1000
DefaultPersisterNapUnderNumFiles helps in controlling the pace of persister. At times of a slow merger progress with heavy file merging operations, its better to pace down the persister for letting the merger to catch up within a range defined by this parameter. Fewer files on disk (as per the merge plan) would result in keeping the file handle usage under limit, faster disk merger and a healthier index. Its been observed that such a loosely sync'ed introducer-persister-merger trio results in better overall performance.
var ErrClosed = fmt.Errorf("scorch closed")
var EventKindBatchIntroduction = EventKind(6)
EventKindBatchIntroduction is fired when Batch() completes.
var EventKindBatchIntroductionStart = EventKind(5)
EventKindBatchIntroductionStart is fired when Batch() is invoked which introduces a new segment.
var EventKindClose = EventKind(2)
EventKindClose is fired when a scorch index has been fully closed.
var EventKindCloseStart = EventKind(1)
EventKindCloseStart is fired when a Scorch.Close() has begun.
var EventKindMergeTaskIntroduction = EventKind(8)
EventKindMergeTaskIntroduction is fired when the merger has completed the introduction of merged segment from a single merge task.
var EventKindMergeTaskIntroductionStart = EventKind(7)
EventKindMergeTaskIntroductionStart is fired when the merger is about to start the introduction of merged segment from a single merge task.
var EventKindMergerProgress = EventKind(3)
EventKindMergerProgress is fired when the merger has completed a round of merge processing.
var EventKindPersisterProgress = EventKind(4)
EventKindPersisterProgress is fired when the persister has completed a round of persistence processing.
var NumSnapshotsToKeep = 1
NumSnapshotsToKeep represents how many recent, old snapshots to keep around per Scorch instance. Useful for apps that require rollback'ability.
var OptimizeConjunction = true
var OptimizeConjunctionUnadorned = true
var OptimizeDisjunctionUnadorned = true
var OptimizeDisjunctionUnadornedMinChildCardinality = uint64(256)
var OptimizeTFRConjunctionUnadornedField = "*"
var OptimizeTFRConjunctionUnadornedTerm = []byte("<conjunction:unadorned>")
var OptimizeTFRDisjunctionUnadornedField = "*"
var OptimizeTFRDisjunctionUnadornedTerm = []byte("<disjunction:unadorned>")
var RegistryAsyncErrorCallbacks = map[string]func(error, string){}
RegistryAsyncErrorCallbacks should be treated as read-only after process init()'ialization.
var RegistryEventCallbacks = map[string]func(Event){}
RegistryEventCallbacks should be treated as read-only after process init()'ialization.
var RollbackRetentionFactor = float64(0.5)
Controls what portion of the earlier rollback points to retain during a infrequent/sparse mutation scenario
var RollbackSamplingInterval = 0 * time.Minute
RollbackSamplingInterval controls how far back we are looking in the history to get the rollback points. For example, a value of 10 minutes ensures that the protected snapshots (NumSnapshotsToKeep = 3) are:
the very latest snapshot(ie the current one), the snapshot that was persisted 10 minutes before the current one, the snapshot that was persisted 20 minutes before the current one
By default however, the timeseries way of protecting snapshots is disabled, and we protect the latest three contiguous snapshots
var TermSeparator byte = 0xff
var TermSeparatorSplitSlice = []byte{TermSeparator}
var TotBytesWrittenKey = []byte("TotBytesWritten")
Functions ¶
func RegisterSegmentPlugin ¶
func RegisterSegmentPlugin(plugin SegmentPlugin, makeDefault bool)
func ResetSegmentPlugins ¶
func ResetSegmentPlugins()
func Rollback ¶
func Rollback(path string, to *RollbackPoint) error
Rollback atomically and durably brings the store back to the point in time as represented by the RollbackPoint. Rollback() should only be passed a RollbackPoint that came from the same store using the RollbackPoints() API along with the index path.
func SupportedSegmentTypes ¶
func SupportedSegmentTypes() (rv []string)
Types ¶
type AsyncPanicError ¶
AsyncPanicError is passed to scorch asyncErrorHandler when panic occurs in scorch background process
func (*AsyncPanicError) Error ¶
func (e *AsyncPanicError) Error() string
type DocValueReader ¶
type DocValueReader struct {
// contains filtered or unexported fields
}
func (*DocValueReader) BytesRead ¶
func (dvr *DocValueReader) BytesRead() uint64
func (*DocValueReader) VisitDocValues ¶
func (dvr *DocValueReader) VisitDocValues(id index.IndexInternalID, visitor index.DocValueVisitor) (err error)
type IndexSnapshot ¶
type IndexSnapshot struct {
// contains filtered or unexported fields
}
func (*IndexSnapshot) AddRef ¶
func (i *IndexSnapshot) AddRef()
func (*IndexSnapshot) Close ¶
func (i *IndexSnapshot) Close() error
func (*IndexSnapshot) DecRef ¶
func (i *IndexSnapshot) DecRef() (err error)
func (*IndexSnapshot) DocCount ¶
func (is *IndexSnapshot) DocCount() (uint64, error)
func (*IndexSnapshot) DocIDReaderAll ¶
func (is *IndexSnapshot) DocIDReaderAll() (index.DocIDReader, error)
func (*IndexSnapshot) DocIDReaderOnly ¶
func (is *IndexSnapshot) DocIDReaderOnly(ids []string) (index.DocIDReader, error)
func (*IndexSnapshot) DocValueReader ¶
func (is *IndexSnapshot) DocValueReader(fields []string) ( index.DocValueReader, error)
func (*IndexSnapshot) Document ¶
func (is *IndexSnapshot) Document(id string) (rv index.Document, err error)
func (*IndexSnapshot) DumpAll ¶
func (is *IndexSnapshot) DumpAll() chan interface{}
func (*IndexSnapshot) DumpDoc ¶
func (is *IndexSnapshot) DumpDoc(id string) chan interface{}
func (*IndexSnapshot) DumpFields ¶
func (is *IndexSnapshot) DumpFields() chan interface{}
func (*IndexSnapshot) ExternalID ¶
func (is *IndexSnapshot) ExternalID(id index.IndexInternalID) (string, error)
func (*IndexSnapshot) FieldDict ¶
func (is *IndexSnapshot) FieldDict(field string) (index.FieldDict, error)
func (*IndexSnapshot) FieldDictContains ¶
func (is *IndexSnapshot) FieldDictContains(field string) (index.FieldDictContains, error)
func (*IndexSnapshot) FieldDictFuzzy ¶
func (*IndexSnapshot) FieldDictPrefix ¶
func (*IndexSnapshot) FieldDictRange ¶
func (*IndexSnapshot) FieldDictRegexp ¶
func (*IndexSnapshot) Fields ¶
func (is *IndexSnapshot) Fields() ([]string, error)
func (*IndexSnapshot) GetInternal ¶
func (is *IndexSnapshot) GetInternal(key []byte) ([]byte, error)
func (*IndexSnapshot) GetSpatialAnalyzerPlugin ¶
func (is *IndexSnapshot) GetSpatialAnalyzerPlugin(typ string) ( index.SpatialAnalyzerPlugin, error)
func (*IndexSnapshot) Internal ¶
func (i *IndexSnapshot) Internal() map[string][]byte
func (*IndexSnapshot) InternalID ¶
func (is *IndexSnapshot) InternalID(id string) (rv index.IndexInternalID, err error)
func (*IndexSnapshot) Segments ¶
func (i *IndexSnapshot) Segments() []*SegmentSnapshot
func (*IndexSnapshot) Size ¶
func (i *IndexSnapshot) Size() int
func (*IndexSnapshot) TermFieldReader ¶
func (is *IndexSnapshot) TermFieldReader(ctx context.Context, term []byte, field string, includeFreq, includeNorm, includeTermVectors bool) (index.TermFieldReader, error)
func (*IndexSnapshot) UpdateIOStats ¶
func (is *IndexSnapshot) UpdateIOStats(val uint64)
type IndexSnapshotDocIDReader ¶
type IndexSnapshotDocIDReader struct {
// contains filtered or unexported fields
}
func (*IndexSnapshotDocIDReader) Advance ¶
func (i *IndexSnapshotDocIDReader) Advance(ID index.IndexInternalID) (index.IndexInternalID, error)
func (*IndexSnapshotDocIDReader) Close ¶
func (i *IndexSnapshotDocIDReader) Close() error
func (*IndexSnapshotDocIDReader) Next ¶
func (i *IndexSnapshotDocIDReader) Next() (index.IndexInternalID, error)
func (*IndexSnapshotDocIDReader) Size ¶
func (i *IndexSnapshotDocIDReader) Size() int
type IndexSnapshotFieldDict ¶
type IndexSnapshotFieldDict struct {
// contains filtered or unexported fields
}
func (*IndexSnapshotFieldDict) BytesRead ¶
func (i *IndexSnapshotFieldDict) BytesRead() uint64
func (*IndexSnapshotFieldDict) Close ¶
func (i *IndexSnapshotFieldDict) Close() error
func (*IndexSnapshotFieldDict) Contains ¶
func (i *IndexSnapshotFieldDict) Contains(key []byte) (bool, error)
func (*IndexSnapshotFieldDict) Len ¶
func (i *IndexSnapshotFieldDict) Len() int
func (*IndexSnapshotFieldDict) Less ¶
func (i *IndexSnapshotFieldDict) Less(a, b int) bool
func (*IndexSnapshotFieldDict) Next ¶
func (i *IndexSnapshotFieldDict) Next() (*index.DictEntry, error)
func (*IndexSnapshotFieldDict) Pop ¶
func (i *IndexSnapshotFieldDict) Pop() interface{}
func (*IndexSnapshotFieldDict) Push ¶
func (i *IndexSnapshotFieldDict) Push(x interface{})
func (*IndexSnapshotFieldDict) Swap ¶
func (i *IndexSnapshotFieldDict) Swap(a, b int)
type IndexSnapshotTermFieldReader ¶
type IndexSnapshotTermFieldReader struct {
// contains filtered or unexported fields
}
func (*IndexSnapshotTermFieldReader) Advance ¶
func (i *IndexSnapshotTermFieldReader) Advance(ID index.IndexInternalID, preAlloced *index.TermFieldDoc) (*index.TermFieldDoc, error)
func (*IndexSnapshotTermFieldReader) Close ¶
func (i *IndexSnapshotTermFieldReader) Close() error
func (*IndexSnapshotTermFieldReader) Count ¶
func (i *IndexSnapshotTermFieldReader) Count() uint64
func (*IndexSnapshotTermFieldReader) Next ¶
func (i *IndexSnapshotTermFieldReader) Next(preAlloced *index.TermFieldDoc) (*index.TermFieldDoc, error)
func (*IndexSnapshotTermFieldReader) Optimize ¶
func (s *IndexSnapshotTermFieldReader) Optimize(kind string, octx index.OptimizableContext) (index.OptimizableContext, error)
func (*IndexSnapshotTermFieldReader) Size ¶
func (i *IndexSnapshotTermFieldReader) Size() int
type OptimizeTFRConjunction ¶
type OptimizeTFRConjunction struct {
// contains filtered or unexported fields
}
type OptimizeTFRConjunctionUnadorned ¶
type OptimizeTFRConjunctionUnadorned struct {
// contains filtered or unexported fields
}
func (*OptimizeTFRConjunctionUnadorned) Finish ¶
func (o *OptimizeTFRConjunctionUnadorned) Finish() (rv index.Optimized, err error)
Finish of an unadorned conjunction optimization will compute a termFieldReader with an "actual" bitmap that represents the constituent bitmaps AND'ed together. This termFieldReader cannot provide any freq-norm or termVector associated information.
type OptimizeTFRDisjunctionUnadorned ¶
type OptimizeTFRDisjunctionUnadorned struct {
// contains filtered or unexported fields
}
func (*OptimizeTFRDisjunctionUnadorned) Finish ¶
func (o *OptimizeTFRDisjunctionUnadorned) Finish() (rv index.Optimized, err error)
Finish of an unadorned disjunction optimization will compute a termFieldReader with an "actual" bitmap that represents the constituent bitmaps OR'ed together. This termFieldReader cannot provide any freq-norm or termVector associated information.
type RollbackPoint ¶
type RollbackPoint struct {
// contains filtered or unexported fields
}
func RollbackPoints ¶
func RollbackPoints(path string) ([]*RollbackPoint, error)
RollbackPoints returns an array of rollback points available for the application to rollback to, with more recent rollback points (higher epochs) coming first.
func (*RollbackPoint) GetInternal ¶
func (r *RollbackPoint) GetInternal(key []byte) []byte
type Scorch ¶
type Scorch struct {
// contains filtered or unexported fields
}
func (*Scorch) AddEligibleForRemoval ¶
func (*Scorch) BytesReadQueryTime ¶
func (*Scorch) DeleteInternal ¶
func (*Scorch) ForceMerge ¶
ForceMerge helps users trigger a merge operation on an online scorch index.
func (*Scorch) LoadSnapshot ¶
func (s *Scorch) LoadSnapshot(epoch uint64) (rv *IndexSnapshot, err error)
LoadSnapshot loads the segment with the specified epoch NOTE: this is currently ONLY intended to be used by the command-line tool
func (*Scorch) MemoryUsed ¶
func (*Scorch) NumEventsBlocking ¶
func (*Scorch) Reader ¶
func (s *Scorch) Reader() (index.IndexReader, error)
Reader returns a low-level accessor on the index data. Close it to release associated resources.
func (*Scorch) ReportBytesWritten ¶
func (*Scorch) RootBoltSnapshotEpochs ¶
func (*Scorch) SetInternal ¶
type SegmentPlugin ¶
type SegmentPlugin interface {
// Type is the name for this segment plugin
Type() string
// Version is a numeric value identifying a specific version of this type.
// When incompatible changes are made to a particular type of plugin, the
// version must be incremented.
Version() uint32
// New takes a set of Documents and turns them into a new Segment
New(results []index.Document) (segment.Segment, uint64, error)
// Open attempts to open the file at the specified path and
// return the corresponding Segment
Open(path string) (segment.Segment, error)
// Merge takes a set of Segments, and creates a new segment on disk at
// the specified path.
// Drops is a set of bitmaps (one for each segment) indicating which
// documents can be dropped from the segments during the merge.
// If the closeCh channel is closed, Merge will cease doing work at
// the next opportunity, and return an error (closed).
// StatsReporter can optionally be provided, in which case progress
// made during the merge is reported while operation continues.
// Returns:
// A slice of new document numbers (one for each input segment),
// this allows the caller to know a particular document's new
// document number in the newly merged segment.
// The number of bytes written to the new segment file.
// An error, if any occurred.
Merge(segments []segment.Segment, drops []*roaring.Bitmap, path string,
closeCh chan struct{}, s segment.StatsReporter) (
[][]uint64, uint64, error)
}
SegmentPlugin represents the essential functions required by a package to plug in it's segment implementation
type SegmentSnapshot ¶
type SegmentSnapshot struct {
// contains filtered or unexported fields
}
func (*SegmentSnapshot) Close ¶
func (s *SegmentSnapshot) Close() error
func (*SegmentSnapshot) Count ¶
func (s *SegmentSnapshot) Count() uint64
func (*SegmentSnapshot) Deleted ¶
func (s *SegmentSnapshot) Deleted() *roaring.Bitmap
func (*SegmentSnapshot) DocNumbers ¶
func (s *SegmentSnapshot) DocNumbers(docIDs []string) (*roaring.Bitmap, error)
func (*SegmentSnapshot) DocNumbersLive ¶
func (s *SegmentSnapshot) DocNumbersLive() *roaring.Bitmap
DocNumbersLive returns a bitmap containing doc numbers for all live docs
func (*SegmentSnapshot) Fields ¶
func (s *SegmentSnapshot) Fields() []string
func (*SegmentSnapshot) FullSize ¶
func (s *SegmentSnapshot) FullSize() int64
func (*SegmentSnapshot) Id ¶
func (s *SegmentSnapshot) Id() uint64
func (*SegmentSnapshot) LiveSize ¶
func (s *SegmentSnapshot) LiveSize() int64
func (*SegmentSnapshot) Segment ¶
func (s *SegmentSnapshot) Segment() segment.Segment
func (*SegmentSnapshot) Size ¶
func (s *SegmentSnapshot) Size() (rv int)
func (*SegmentSnapshot) VisitDocument ¶
func (s *SegmentSnapshot) VisitDocument(num uint64, visitor segment.StoredFieldValueVisitor) error
type Stats ¶
type Stats struct {
TotUpdates uint64
TotDeletes uint64
TotBatches uint64
TotBatchesEmpty uint64
TotBatchIntroTime uint64
MaxBatchIntroTime uint64
CurRootEpoch uint64
LastPersistedEpoch uint64
LastMergedEpoch uint64
TotOnErrors uint64
TotAnalysisTime uint64
TotIndexTime uint64
TotIndexedPlainTextBytes uint64
TotBytesReadAtQueryTime uint64
TotBytesWrittenAtIndexTime uint64
TotTermSearchersStarted uint64
TotTermSearchersFinished uint64
TotEventTriggerStarted uint64
TotEventTriggerCompleted uint64
TotIntroduceLoop uint64
TotIntroduceSegmentBeg uint64
TotIntroduceSegmentEnd uint64
TotIntroducePersistBeg uint64
TotIntroducePersistEnd uint64
TotIntroduceMergeBeg uint64
TotIntroduceMergeEnd uint64
TotIntroduceRevertBeg uint64
TotIntroduceRevertEnd uint64
TotIntroducedItems uint64
TotIntroducedSegmentsBatch uint64
TotIntroducedSegmentsMerge uint64
TotPersistLoopBeg uint64
TotPersistLoopErr uint64
TotPersistLoopProgress uint64
TotPersistLoopWait uint64
TotPersistLoopWaitNotified uint64
TotPersistLoopEnd uint64
TotPersistedItems uint64
TotItemsToPersist uint64
TotPersistedSegments uint64
TotPersisterSlowMergerPause uint64
TotPersisterSlowMergerResume uint64
TotPersisterNapPauseCompleted uint64
TotPersisterMergerNapBreak uint64
TotFileMergeLoopBeg uint64
TotFileMergeLoopErr uint64
TotFileMergeLoopEnd uint64
TotFileMergeForceOpsStarted uint64
TotFileMergeForceOpsCompleted uint64
TotFileMergePlan uint64
TotFileMergePlanErr uint64
TotFileMergePlanNone uint64
TotFileMergePlanOk uint64
TotFileMergePlanTasks uint64
TotFileMergePlanTasksDone uint64
TotFileMergePlanTasksErr uint64
TotFileMergePlanTasksSegments uint64
TotFileMergePlanTasksSegmentsEmpty uint64
TotFileMergeSegmentsEmpty uint64
TotFileMergeSegments uint64
TotFileSegmentsAtRoot uint64
TotFileMergeWrittenBytes uint64
TotFileMergeZapBeg uint64
TotFileMergeZapEnd uint64
TotFileMergeZapTime uint64
MaxFileMergeZapTime uint64
TotFileMergeZapIntroductionTime uint64
MaxFileMergeZapIntroductionTime uint64
TotFileMergeIntroductions uint64
TotFileMergeIntroductionsDone uint64
TotFileMergeIntroductionsSkipped uint64
TotFileMergeIntroductionsObsoleted uint64
CurFilesIneligibleForRemoval uint64
TotSnapshotsRemovedFromMetaStore uint64
TotMemMergeBeg uint64
TotMemMergeErr uint64
TotMemMergeDone uint64
TotMemMergeZapBeg uint64
TotMemMergeZapEnd uint64
TotMemMergeZapTime uint64
MaxMemMergeZapTime uint64
TotMemMergeSegments uint64
TotMemorySegmentsAtRoot uint64
}
Stats tracks statistics about the index, fields that are prefixed like CurXxxx are gauges (can go up and down), and fields that are prefixed like TotXxxx are monotonically increasing counters.
func (*Stats) MarshalJSON ¶
MarshalJSON implements json.Marshaler, and in contrast to standard json marshaling provides atomic safety
type UnadornedPosting ¶
type UnadornedPosting uint64
func (UnadornedPosting) Frequency ¶
func (p UnadornedPosting) Frequency() uint64
func (UnadornedPosting) Locations ¶
func (p UnadornedPosting) Locations() []segment.Location
func (UnadornedPosting) Norm ¶
func (p UnadornedPosting) Norm() float64
func (UnadornedPosting) Number ¶
func (p UnadornedPosting) Number() uint64
func (UnadornedPosting) Size ¶
func (p UnadornedPosting) Size() int
Source Files
¶
Directories
¶
| Path | Synopsis |
|---|---|
|
Package mergeplan provides a segment merge planning approach that's inspired by Lucene's TieredMergePolicy.java and descriptions like http://blog.mikemccandless.com/2011/02/visualizing-lucenes-segment-merges.html
|
Package mergeplan provides a segment merge planning approach that's inspired by Lucene's TieredMergePolicy.java and descriptions like http://blog.mikemccandless.com/2011/02/visualizing-lucenes-segment-merges.html |