Documentation
¶
Index ¶
- Constants
- Variables
- func AsBinaryDocValues(iterator DocValuesFieldUpdatesIterator) index.BinaryDocValues
- func AsNumericDocValues(iterator DocValuesFieldUpdatesIterator) index.NumericDocValues
- func Compare[T int | int32 | int64 | float32 | float64](x, y T) int
- func CreateCompoundFile(ctx context.Context, directory *store.TrackingDirectoryWrapper, ...) error
- func DirectoryReaderOpen(ctx context.Context, writer *IndexWriter) (index.DirectoryReader, error)
- func DirectoryReaderOpenV1(ctx context.Context, writer *IndexWriter, ...) (index.DirectoryReader, error)
- func FeatureRequested(flags, feature int) bool
- func FileNameFromGeneration(base, ext string, gen int64) string
- func GenerationFromSegmentsFileName(fileName string) (int64, error)
- func GetActualMaxDocs() int
- func GetCodecByName(name string) (index.Codec, bool)
- func GetLastCommitGeneration(files []string) (int64, error)
- func GetLastCommitSegmentsFileName(files []string) (string, error)
- func GetNumeric(reader index.LeafReader, field string) (index.NumericDocValues, error)
- func GetSortFieldProviderByName(name string) index.SortFieldProvider
- func GetSorted(reader index.LeafReader, field string) (index.SortedDocValues, error)
- func ImpactComparator(c1, c2 index.Impact) int
- func IsCacheable(ctx index.LeafReaderContext, fields ...string) bool
- func IsIndexExists(dir store.Directory) (bool, error)
- func MergeFromReaders(ctx context.Context, consumer index.FieldsConsumer, mergeState *MergeState, ...) error
- func NewFieldInfos(infos []*document.FieldInfo) index.FieldInfos
- func NewImpact(freq int, norm int64) index.Impact
- func NewLeafMetaData(createdVersionMajor int, minVersion *version.Version, sort index.Sort) index.LeafMetaData
- func NewLeafReaderContext(leafReader index.LeafReader) index.LeafReaderContext
- func NewLeafReaderContextV1(parent *CompositeReaderContext, reader index.LeafReader, ...) index.LeafReaderContext
- func NewSort(fields []index.SortField) index.Sort
- func NewTerm(field string, values []byte) index.Term
- func OpenDirectoryReader(ctx context.Context, directory store.Directory, commit IndexCommit, ...) (index.DirectoryReader, error)
- func OpenStandardDirectoryReader(writer *IndexWriter, ...) (index.DirectoryReader, error)
- func ParseGeneration(filename string) int64
- func ParseSegmentName(filename string) string
- func Partition(config *bkd.Config, maxDoc, splitDim, commonPrefixLen int, ...)
- func ReadCodec(ctx context.Context, input store.DataInput) (index.Codec, error)
- func RegisterCodec(codec index.Codec)
- func RegisterSortFieldProvider(provider index.SortFieldProvider)
- func SegmentFileName(segmentName, segmentSuffix, ext string) string
- func SetDiagnostics(info index.SegmentInfo, source string, details map[string]string) error
- func SortByComparator(maxDoc int, comparator index.DocComparator) index.DocMap
- func SortByComparators(maxDoc int, comparators []index.DocComparator) (index.DocMap, error)
- func SortByDim(config *bkd.Config, sortedDim int, commonPrefixLengths []int, ...)
- func SortFreqProxTermsWriterPerField(fields []*FreqProxTermsWriterPerField)
- func SortTermVectorsConsumerPerField(fields []*TermVectorsConsumerPerField)
- func StripExtension(filename string) string
- func StripSegmentName(filename string) string
- func SubIndex(n int, docStarts []int) int
- func TestLiveDocs(reader index.CodecReader) error
- func WriteSortField(sf index.SortField, output store.DataOutput) error
- type AcceptStatus
- type AutomatonTermsEnum
- type BaseBinaryDocValues
- func (n *BaseBinaryDocValues) Advance(ctx context.Context, target int) (int, error)
- func (n *BaseBinaryDocValues) AdvanceExact(target int) (bool, error)
- func (n *BaseBinaryDocValues) BinaryValue() ([]byte, error)
- func (n *BaseBinaryDocValues) Cost() int64
- func (n *BaseBinaryDocValues) DocID() int
- func (n *BaseBinaryDocValues) NextDoc(ctx context.Context) (int, error)
- func (n *BaseBinaryDocValues) SlowAdvance(ctx context.Context, target int) (int, error)
- type BaseCodecReader
- func (c *BaseCodecReader) CheckIntegrity() error
- func (r BaseCodecReader) Close() error
- func (r BaseCodecReader) DecRef() error
- func (r BaseCodecReader) Document(ctx context.Context, docID int) (*document.Document, error)
- func (r BaseCodecReader) DocumentWithFields(ctx context.Context, docID int, fieldsToLoad []string) (*document.Document, error)
- func (c *BaseCodecReader) DocumentWithVisitor(ctx context.Context, docID int, visitor document.StoredFieldVisitor) error
- func (c *BaseCodecReader) GetBinaryDocValues(field string) (index.BinaryDocValues, error)
- func (c *BaseCodecReader) GetNormValues(field string) (index.NumericDocValues, error)
- func (c *BaseCodecReader) GetNumericDocValues(field string) (index.NumericDocValues, error)
- func (c *BaseCodecReader) GetPointValues(field string) (types.PointValues, bool)
- func (r BaseCodecReader) GetRefCount() int
- func (c *BaseCodecReader) GetSortedDocValues(field string) (index.SortedDocValues, error)
- func (c *BaseCodecReader) GetSortedNumericDocValues(field string) (index.SortedNumericDocValues, error)
- func (c *BaseCodecReader) GetSortedSetDocValues(field string) (index.SortedSetDocValues, error)
- func (r BaseCodecReader) GetTermVector(docID int, field string) (index.Terms, error)
- func (c *BaseCodecReader) GetTermVectors(docID int) (index.Fields, error)
- func (r BaseCodecReader) HasDeletions() bool
- func (r BaseCodecReader) IncRef() error
- func (r BaseCodecReader) Leaves() ([]index.LeafReaderContext, error)
- func (r BaseCodecReader) NotifyReaderClosedListeners() error
- func (r BaseCodecReader) NumDeletedDocs() int
- func (r BaseCodecReader) RegisterParentReader(reader index.IndexReader)
- func (c *BaseCodecReader) Terms(field string) (index.Terms, error)
- func (r BaseCodecReader) TryIncRef() bool
- type BaseCompoundDirectory
- func (*BaseCompoundDirectory) CreateOutput(ctx context.Context, name string) (store.IndexOutput, error)
- func (*BaseCompoundDirectory) CreateTempOutput(ctx context.Context, prefix, suffix string) (store.IndexOutput, error)
- func (*BaseCompoundDirectory) DeleteFile(ctx context.Context, name string) error
- func (*BaseCompoundDirectory) ObtainLock(name string) (store.Lock, error)
- func (*BaseCompoundDirectory) Rename(ctx context.Context, source, dest string) error
- func (*BaseCompoundDirectory) Sync(names map[string]struct{}) error
- func (*BaseCompoundDirectory) SyncMetaData(ctx context.Context) error
- type BaseDocValuesFieldUpdates
- func (d *BaseDocValuesFieldUpdates) Any() bool
- func (d *BaseDocValuesFieldUpdates) Field() string
- func (d *BaseDocValuesFieldUpdates) Finish() error
- func (d *BaseDocValuesFieldUpdates) GetFinished() bool
- func (d *BaseDocValuesFieldUpdates) Grow(size int) error
- func (d *BaseDocValuesFieldUpdates) Resize(size int) error
- func (d *BaseDocValuesFieldUpdates) Size() int
- func (d *BaseDocValuesFieldUpdates) Swap(i, j int) error
- type BaseFieldsConsumer
- type BaseIndexReaderContext
- type BaseLeafReader
- func (r BaseLeafReader) Close() error
- func (r BaseLeafReader) DecRef() error
- func (r *BaseLeafReader) DocFreq(ctx context.Context, term index.Term) (int, error)
- func (r BaseLeafReader) Document(ctx context.Context, docID int) (*document.Document, error)
- func (r BaseLeafReader) DocumentWithFields(ctx context.Context, docID int, fieldsToLoad []string) (*document.Document, error)
- func (r *BaseLeafReader) GetContext() (index.IndexReaderContext, error)
- func (r *BaseLeafReader) GetDocCount(field string) (int, error)
- func (r BaseLeafReader) GetRefCount() int
- func (r *BaseLeafReader) GetSumDocFreq(field string) (int64, error)
- func (r *BaseLeafReader) GetSumTotalTermFreq(field string) (int64, error)
- func (r BaseLeafReader) GetTermVector(docID int, field string) (index.Terms, error)
- func (r BaseLeafReader) HasDeletions() bool
- func (r BaseLeafReader) IncRef() error
- func (r BaseLeafReader) Leaves() ([]index.LeafReaderContext, error)
- func (r BaseLeafReader) NotifyReaderClosedListeners() error
- func (r BaseLeafReader) NumDeletedDocs() int
- func (r *BaseLeafReader) Postings(ctx context.Context, term index.Term, flags int) (index.PostingsEnum, error)
- func (r BaseLeafReader) RegisterParentReader(reader index.IndexReader)
- func (r *BaseLeafReader) TotalTermFreq(ctx context.Context, term index.Term) (int64, error)
- func (r BaseLeafReader) TryIncRef() bool
- type BaseMultiLevelSkipListReader
- func (m *BaseMultiLevelSkipListReader) ReadChildPointer(skipStream store.IndexInput) (int64, error)
- func (m *BaseMultiLevelSkipListReader) ReadLevelLength(skipStream store.IndexInput) (int64, error)
- func (m *BaseMultiLevelSkipListReader) ReadSkipData(level int, skipStream store.IndexInput) (int64, error)
- type BaseMultiLevelSkipListWriter
- type BaseMultiLevelSkipListWriterConfig
- type BaseParallelPostingsArray
- func (p *BaseParallelPostingsArray) AddressOffset() []int
- func (p *BaseParallelPostingsArray) ByteStarts() []int
- func (p *BaseParallelPostingsArray) GetAddressOffset(index int) int
- func (p *BaseParallelPostingsArray) GetByteStarts(index int) int
- func (p *BaseParallelPostingsArray) GetTextStarts(index int) int
- func (p *BaseParallelPostingsArray) Grow()
- func (p *BaseParallelPostingsArray) SetAddressOffset(termID, v int)
- func (p *BaseParallelPostingsArray) SetByteStarts(termID, v int)
- func (p *BaseParallelPostingsArray) SetTextStarts(termID, v int)
- func (p *BaseParallelPostingsArray) TextStarts() []uint32
- type BasePointsWriter
- type BaseSimScorer
- type BaseSortField
- func (s *BaseSortField) Equals(other index.SortField) bool
- func (s *BaseSortField) GetBytesComparator() index.BytesComparator
- func (s *BaseSortField) GetCanUsePoints() bool
- func (s *BaseSortField) GetComparator(numHits, sortPos int) index.FieldComparator
- func (s *BaseSortField) GetComparatorSource() index.FieldComparatorSource
- func (s *BaseSortField) GetField() string
- func (s *BaseSortField) GetIndexSorter() index.IndexSorter
- func (s *BaseSortField) GetMissingValue() any
- func (s *BaseSortField) GetReverse() bool
- func (s *BaseSortField) GetType() index.SortFieldType
- func (s *BaseSortField) NeedsScores() bool
- func (s *BaseSortField) Serialize(ctx context.Context, out store.DataOutput) error
- func (s *BaseSortField) SetBytesComparator(fn index.BytesComparator)
- func (s *BaseSortField) SetCanUsePoints()
- func (s *BaseSortField) SetMissingValue(missingValue any) error
- func (s *BaseSortField) String() string
- type BaseSortedDocValues
- type BaseTerms
- type BaseTermsEnum
- func (b *BaseTermsEnum) Attributes() *attribute.Source
- func (b *BaseTermsEnum) SeekExact(ctx context.Context, text []byte) (bool, error)
- func (b *BaseTermsEnum) SeekExactExpert(ctx context.Context, term []byte, state index.TermState) error
- func (b *BaseTermsEnum) TermState() (index.TermState, error)
- type BaseTermsEnumConfig
- type BaseTermsHash
- func (h *BaseTermsHash) Abort() error
- func (h *BaseTermsHash) FinishDocument(ctx context.Context, docID int) error
- func (h *BaseTermsHash) Flush(ctx context.Context, fieldsToFlush map[string]TermsHashPerField, ...) error
- func (h *BaseTermsHash) GetBytePool() *bytesref.BlockPool
- func (h *BaseTermsHash) GetIntPool() *ints.BlockPool
- func (h *BaseTermsHash) GetTermBytePool() *bytesref.BlockPool
- func (h *BaseTermsHash) Reset() error
- func (h *BaseTermsHash) StartDocument() error
- type BinaryDocValuesFieldUpdates
- func (b *BinaryDocValuesFieldUpdates) AddBytes(doc int, value []byte) error
- func (b *BinaryDocValuesFieldUpdates) AddInt64(doc int, value int64) error
- func (b *BinaryDocValuesFieldUpdates) AddIterator(doc int, it DocValuesFieldUpdatesIterator) error
- func (b *BinaryDocValuesFieldUpdates) EnsureFinished() error
- func (b *BinaryDocValuesFieldUpdates) Finish() error
- func (b *BinaryDocValuesFieldUpdates) Grow(size int) error
- func (b *BinaryDocValuesFieldUpdates) Iterator() (DocValuesFieldUpdatesIterator, error)
- func (b *BinaryDocValuesFieldUpdates) Reset(doc int) error
- func (b *BinaryDocValuesFieldUpdates) Resize(size int) error
- func (b *BinaryDocValuesFieldUpdates) Swap(i, j int) error
- type BinaryDocValuesWriter
- type BitSetIterator
- func (b *BitSetIterator) Advance(ctx context.Context, target int) (int, error)
- func (b *BitSetIterator) Cost() int64
- func (b *BitSetIterator) DocID() int
- func (b *BitSetIterator) GetBitSet() *bitset.BitSet
- func (b *BitSetIterator) NextDoc(ctx context.Context) (int, error)
- func (b *BitSetIterator) SlowAdvance(ctx context.Context, target int) (int, error)
- type BufferedBinaryDocValues
- func (b *BufferedBinaryDocValues) Advance(ctx context.Context, target int) (int, error)
- func (b *BufferedBinaryDocValues) AdvanceExact(target int) (bool, error)
- func (b *BufferedBinaryDocValues) BinaryValue() ([]byte, error)
- func (b *BufferedBinaryDocValues) Cost() int64
- func (b *BufferedBinaryDocValues) DocID() int
- func (b *BufferedBinaryDocValues) NextDoc(ctx context.Context) (int, error)
- func (b *BufferedBinaryDocValues) SlowAdvance(ctx context.Context, target int) (int, error)
- type BufferedNorms
- func (b *BufferedNorms) Advance(ctx context.Context, target int) (int, error)
- func (b *BufferedNorms) AdvanceExact(target int) (bool, error)
- func (b *BufferedNorms) Cost() int64
- func (b *BufferedNorms) DocID() int
- func (b *BufferedNorms) LongValue() (int64, error)
- func (b *BufferedNorms) NextDoc(ctx context.Context) (int, error)
- func (b *BufferedNorms) SlowAdvance(ctx context.Context, target int) (int, error)
- type BufferedNumericDocValues
- func (b *BufferedNumericDocValues) Advance(ctx context.Context, target int) (int, error)
- func (b *BufferedNumericDocValues) AdvanceExact(target int) (bool, error)
- func (b *BufferedNumericDocValues) Cost() int64
- func (b *BufferedNumericDocValues) DocID() int
- func (b *BufferedNumericDocValues) LongValue() (int64, error)
- func (b *BufferedNumericDocValues) NextDoc(ctx context.Context) (int, error)
- func (b *BufferedNumericDocValues) SlowAdvance(ctx context.Context, target int) (int, error)
- type BufferedUpdatesStream
- type ByteSliceReader
- type ClosedListener
- type CommitPoint
- func (c *CommitPoint) CompareTo(commit IndexCommit) int
- func (c *CommitPoint) Delete() error
- func (c *CommitPoint) GetDirectory() store.Directory
- func (c *CommitPoint) GetFileNames() (map[string]struct{}, error)
- func (c *CommitPoint) GetGeneration() int64
- func (c *CommitPoint) GetReader() *StandardDirectoryReader
- func (c *CommitPoint) GetSegmentCount() int
- func (c *CommitPoint) GetSegmentsFileName() string
- func (c *CommitPoint) GetUserData() (map[string]string, error)
- func (c *CommitPoint) IsDeleted() bool
- type CompareIndexReader
- type CompareLeafReader
- type CompetitiveImpactAccumulator
- type CompositeReaderBuilder
- type CompositeReaderContext
- type CompositeReaderContextOption
- func WithCompositeReaderContextV1(reader index.CompositeReader) CompositeReaderContextOption
- func WithCompositeReaderContextV2(parent *CompositeReaderContext, reader index.CompositeReader, ...) CompositeReaderContextOption
- func WithCompositeReaderContextV3(reader index.CompositeReader, ...) CompositeReaderContextOption
- type DVFUIterator
- type DataFields
- type DataPostingsEnum
- func (d *DataPostingsEnum) Advance(ctx context.Context, target int) (int, error)
- func (d *DataPostingsEnum) Cost() int64
- func (d *DataPostingsEnum) DocID() int
- func (d *DataPostingsEnum) EndOffset() (int, error)
- func (d *DataPostingsEnum) Freq() (int, error)
- func (d *DataPostingsEnum) GetPayload() ([]byte, error)
- func (d *DataPostingsEnum) NextDoc(ctx context.Context) (int, error)
- func (d *DataPostingsEnum) NextPosition() (int, error)
- func (d *DataPostingsEnum) SlowAdvance(ctx context.Context, target int) (int, error)
- func (d *DataPostingsEnum) StartOffset() (int, error)
- type DataTerms
- func (d *DataTerms) GetDocCount() (int, error)
- func (d *DataTerms) GetSumDocFreq() (int64, error)
- func (d *DataTerms) GetSumTotalTermFreq() (int64, error)
- func (d *DataTerms) HasFreqs() bool
- func (d *DataTerms) HasOffsets() bool
- func (d *DataTerms) HasPayloads() bool
- func (d *DataTerms) HasPositions() bool
- func (d *DataTerms) Iterator() (index.TermsEnum, error)
- func (d *DataTerms) Size() (int, error)
- type DataTermsEnum
- func (d *DataTermsEnum) DocFreq() (int, error)
- func (d *DataTermsEnum) Impacts(flags int) (index.ImpactsEnum, error)
- func (d *DataTermsEnum) Next(context.Context) ([]byte, error)
- func (d *DataTermsEnum) Ord() (int64, error)
- func (d *DataTermsEnum) Postings(reuse index.PostingsEnum, flags int) (index.PostingsEnum, error)
- func (d *DataTermsEnum) SeekCeil(ctx context.Context, text []byte) (index.SeekStatus, error)
- func (d *DataTermsEnum) SeekExactByOrd(ctx context.Context, ord int64) error
- func (d *DataTermsEnum) Term() ([]byte, error)
- func (d *DataTermsEnum) TotalTermFreq() (int64, error)
- type DefaultIndexingChain
- func (d *DefaultIndexingChain) Abort() error
- func (d *DefaultIndexingChain) Flush(ctx context.Context, state *index.SegmentWriteState) (index.DocMap, error)
- func (d *DefaultIndexingChain) GetHasDocValues(field string) types.DocIdSetIterator
- func (d *DefaultIndexingChain) NewPerField(indexCreatedVersionMajor int, fieldInfo *document.FieldInfo, invert bool, ...) (*PerField, error)
- func (d *DefaultIndexingChain) ProcessDocument(ctx context.Context, docId int, doc *document.Document) error
- type DeleteSlice
- type DirectoryReaderBuilder
- type DocValueSorter
- type DocValues
- type DocValuesFieldUpdates
- type DocValuesFieldUpdatesIterator
- type DocValuesLeafReader
- func (d *DocValuesLeafReader) CheckIntegrity() error
- func (r DocValuesLeafReader) Close() error
- func (r DocValuesLeafReader) DecRef() error
- func (d *DocValuesLeafReader) DoClose() error
- func (r DocValuesLeafReader) Document(ctx context.Context, docID int) (*document.Document, error)
- func (r DocValuesLeafReader) DocumentWithFields(ctx context.Context, docID int, fieldsToLoad []string) (*document.Document, error)
- func (d *DocValuesLeafReader) DocumentWithVisitor(ctx context.Context, docID int, visitor document.StoredFieldVisitor) error
- func (d *DocValuesLeafReader) GetBinaryDocValues(field string) (index.BinaryDocValues, error)
- func (d *DocValuesLeafReader) GetFieldInfos() index.FieldInfos
- func (d *DocValuesLeafReader) GetLiveDocs() util.Bits
- func (d *DocValuesLeafReader) GetMetaData() index.LeafMetaData
- func (d *DocValuesLeafReader) GetNormValues(field string) (index.NumericDocValues, error)
- func (d *DocValuesLeafReader) GetNumericDocValues(field string) (index.NumericDocValues, error)
- func (d *DocValuesLeafReader) GetPointValues(field string) (types.PointValues, bool)
- func (d *DocValuesLeafReader) GetReaderCacheHelper() index.CacheHelper
- func (r DocValuesLeafReader) GetRefCount() int
- func (d *DocValuesLeafReader) GetSortedDocValues(field string) (index.SortedDocValues, error)
- func (d *DocValuesLeafReader) GetSortedNumericDocValues(field string) (index.SortedNumericDocValues, error)
- func (d *DocValuesLeafReader) GetSortedSetDocValues(field string) (index.SortedSetDocValues, error)
- func (r DocValuesLeafReader) GetTermVector(docID int, field string) (index.Terms, error)
- func (d *DocValuesLeafReader) GetTermVectors(docID int) (index.Fields, error)
- func (r DocValuesLeafReader) HasDeletions() bool
- func (r DocValuesLeafReader) IncRef() error
- func (r DocValuesLeafReader) Leaves() ([]index.LeafReaderContext, error)
- func (d *DocValuesLeafReader) MaxDoc() int
- func (r DocValuesLeafReader) NotifyReaderClosedListeners() error
- func (r DocValuesLeafReader) NumDeletedDocs() int
- func (d *DocValuesLeafReader) NumDocs() int
- func (r DocValuesLeafReader) RegisterParentReader(reader index.IndexReader)
- func (d *DocValuesLeafReader) Terms(field string) (index.Terms, error)
- func (r DocValuesLeafReader) TryIncRef() bool
- type DocValuesUpdatesNode
- type DocValuesWriter
- type DocsWithFieldSet
- type DocumentsWriter
- type DocumentsWriterDeleteQueue
- type DocumentsWriterFlushControl
- func (d *DocumentsWriterFlushControl) DoAfterFlush(dwpt *DocumentsWriterPerThread) error
- func (d *DocumentsWriterFlushControl) MarkForFullFlush() int64
- func (d *DocumentsWriterFlushControl) NextPendingFlush() *DocumentsWriterPerThread
- func (d *DocumentsWriterFlushControl) ObtainAndLock() *DocumentsWriterPerThread
- type DocumentsWriterFlushQueue
- type DocumentsWriterPerThread
- type DocumentsWriterPerThreadPool
- type DoubleComparableProvider
- type DoubleDocComparator
- type DoubleSorter
- type EmptyDocComparator
- type EmptyDocValuesProducer
- func (e *EmptyDocValuesProducer) CheckIntegrity() error
- func (e *EmptyDocValuesProducer) Close() error
- func (e *EmptyDocValuesProducer) GetBinary(ctx context.Context, field *document.FieldInfo) (index.BinaryDocValues, error)
- func (e *EmptyDocValuesProducer) GetMergeInstance() index.DocValuesProducer
- func (e *EmptyDocValuesProducer) GetNumeric(ctx context.Context, field *document.FieldInfo) (index.NumericDocValues, error)
- func (e *EmptyDocValuesProducer) GetSorted(ctx context.Context, fieldInfo *document.FieldInfo) (index.SortedDocValues, error)
- func (e *EmptyDocValuesProducer) GetSortedNumeric(ctx context.Context, field *document.FieldInfo) (index.SortedNumericDocValues, error)
- func (e *EmptyDocValuesProducer) GetSortedSet(ctx context.Context, field *document.FieldInfo) (index.SortedSetDocValues, error)
- type EmptyNumericDocValuesProvider
- type EmptySortedDocValuesProvider
- type Event
- type EventQueue
- type FieldData
- type FieldDataList
- type FieldDimensions
- type FieldInfosBuilder
- func (f *FieldInfosBuilder) Add(other *fieldInfos) error
- func (f *FieldInfosBuilder) AddFieldInfo(fi *document.FieldInfo) (*document.FieldInfo, error)
- func (f *FieldInfosBuilder) AddFieldInfoV(fi *document.FieldInfo, dvGen int64) (*document.FieldInfo, error)
- func (f *FieldInfosBuilder) Finish() index.FieldInfos
- func (f *FieldInfosBuilder) GetOrAdd(name string) (*document.FieldInfo, error)
- type FieldNumbers
- type FieldTermIterator
- type FilteredTermsEnum
- type FilteredTermsEnumBase
- func (f *FilteredTermsEnumBase) Attributes() *attribute.Source
- func (f *FilteredTermsEnumBase) DocFreq() (int, error)
- func (f *FilteredTermsEnumBase) Impacts(flags int) (index.ImpactsEnum, error)
- func (f *FilteredTermsEnumBase) Next(context.Context) ([]byte, error)
- func (f *FilteredTermsEnumBase) Ord() (int64, error)
- func (f *FilteredTermsEnumBase) Postings(reuse index.PostingsEnum, flags int) (index.PostingsEnum, error)
- func (f *FilteredTermsEnumBase) SeekCeil(ctx context.Context, text []byte) (index.SeekStatus, error)
- func (f *FilteredTermsEnumBase) SeekExact(ctx context.Context, text []byte) (bool, error)
- func (f *FilteredTermsEnumBase) SeekExactByOrd(ctx context.Context, ord int64) error
- func (f *FilteredTermsEnumBase) SeekExactExpert(ctx context.Context, term []byte, state index.TermState) error
- func (f *FilteredTermsEnumBase) Term() ([]byte, error)
- func (f *FilteredTermsEnumBase) TermState() (index.TermState, error)
- func (f *FilteredTermsEnumBase) TotalTermFreq() (int64, error)
- type FilteredTermsEnumDefaultConfig
- type FindSegmentsFile
- type FinishedSegments
- type FloatComparableProvider
- type FloatDocComparator
- type FloatSorter
- type FlushPolicy
- type FlushTicket
- type FlushedSegment
- type FreqProxFields
- type FreqProxPostingsArray
- func (f *FreqProxPostingsArray) BytesPerPosting() int
- func (f *FreqProxPostingsArray) NewInstance() ParallelPostingsArray
- func (f *FreqProxPostingsArray) SetLastDocCodes(termID, v int)
- func (f *FreqProxPostingsArray) SetLastDocIDs(termID, v int)
- func (f *FreqProxPostingsArray) SetLastOffsets(termID, v int)
- func (f *FreqProxPostingsArray) SetLastPositions(termID, v int)
- func (f *FreqProxPostingsArray) SetTermFreqs(termID, v int)
- type FreqProxPostingsEnum
- func (f *FreqProxPostingsEnum) Advance(ctx context.Context, target int) (int, error)
- func (f *FreqProxPostingsEnum) Cost() int64
- func (f *FreqProxPostingsEnum) DocID() int
- func (f *FreqProxPostingsEnum) EndOffset() (int, error)
- func (f *FreqProxPostingsEnum) Freq() (int, error)
- func (f *FreqProxPostingsEnum) GetPayload() ([]byte, error)
- func (f *FreqProxPostingsEnum) NextDoc(context.Context) (int, error)
- func (f *FreqProxPostingsEnum) NextPosition() (int, error)
- func (f *FreqProxPostingsEnum) SlowAdvance(ctx context.Context, target int) (int, error)
- func (f *FreqProxPostingsEnum) StartOffset() (int, error)
- type FreqProxTerms
- func (f *FreqProxTerms) GetDocCount() (int, error)
- func (f *FreqProxTerms) GetMax() ([]byte, error)
- func (f *FreqProxTerms) GetMin() ([]byte, error)
- func (f *FreqProxTerms) GetSumDocFreq() (int64, error)
- func (f *FreqProxTerms) GetSumTotalTermFreq() (int64, error)
- func (f *FreqProxTerms) HasFreqs() bool
- func (f *FreqProxTerms) HasOffsets() bool
- func (f *FreqProxTerms) HasPayloads() bool
- func (f *FreqProxTerms) HasPositions() bool
- func (f *FreqProxTerms) Intersect(compiled *automaton.CompiledAutomaton, startTerm []byte) (index.TermsEnum, error)
- func (f *FreqProxTerms) Iterator() (index.TermsEnum, error)
- func (f *FreqProxTerms) Size() (int, error)
- type FreqProxTermsEnum
- func (f *FreqProxTermsEnum) DocFreq() (int, error)
- func (f *FreqProxTermsEnum) Impacts(flags int) (index.ImpactsEnum, error)
- func (f *FreqProxTermsEnum) Next(context.Context) ([]byte, error)
- func (f *FreqProxTermsEnum) Ord() (int64, error)
- func (f *FreqProxTermsEnum) Postings(reuse index.PostingsEnum, flags int) (index.PostingsEnum, error)
- func (f *FreqProxTermsEnum) SeekCeil(ctx context.Context, text []byte) (index.SeekStatus, error)
- func (f *FreqProxTermsEnum) SeekExactByOrd(ctx context.Context, ord int64) error
- func (f *FreqProxTermsEnum) Term() ([]byte, error)
- func (f *FreqProxTermsEnum) TotalTermFreq() (int64, error)
- type FreqProxTermsWriter
- func (f *FreqProxTermsWriter) AddField(invertState *index.FieldInvertState, fieldInfo *document.FieldInfo) (TermsHashPerField, error)
- func (f *FreqProxTermsWriter) Flush(ctx context.Context, fieldsToFlush map[string]TermsHashPerField, ...) error
- func (f *FreqProxTermsWriter) SetTermBytePool(termBytePool *bytesref.BlockPool)
- type FreqProxTermsWriterPerField
- func (t FreqProxTermsWriterPerField) Add(termBytes []byte, docID int) error
- func (t FreqProxTermsWriterPerField) Add2nd(textStart, docID int) error
- func (f *FreqProxTermsWriterPerField) AddTerm(termID, docID int) error
- func (f *FreqProxTermsWriterPerField) CreatePostingsArray(size int) ParallelPostingsArray
- func (f *FreqProxTermsWriterPerField) Finish() error
- func (t FreqProxTermsWriterPerField) GetNextPerField() TermsHashPerField
- func (t FreqProxTermsWriterPerField) GetPostingsArray() ParallelPostingsArray
- func (f *FreqProxTermsWriterPerField) NewPostingsArray()
- func (f *FreqProxTermsWriterPerField) NewTerm(termID, docID int) error
- func (t FreqProxTermsWriterPerField) Reset() error
- func (t FreqProxTermsWriterPerField) SetPostingsArray(v ParallelPostingsArray)
- func (f *FreqProxTermsWriterPerField) Start(field document.IndexableField, first bool) bool
- type FreqProxTermsWriterPerFields
- type FrozenBufferedUpdates
- type IndexCommit
- type IndexCommits
- type IndexDeletionPolicy
- type IndexFileDeleter
- func (r *IndexFileDeleter) Checkpoint(segmentInfos *SegmentInfos, isCommit bool) error
- func (r *IndexFileDeleter) DecRef(files map[string]struct{}) error
- func (r *IndexFileDeleter) IncRef(segmentInfos *SegmentInfos, isCommit bool) error
- func (r *IndexFileDeleter) IncRefFiles(files map[string]struct{}) error
- type IndexReaderSPI
- type IndexWriter
- func (w *IndexWriter) AddDocument(ctx context.Context, doc *document.Document) (int64, error)
- func (w *IndexWriter) AddIndexesFromReaders(readers ...index.CodecReader) (int64, error)
- func (w *IndexWriter) Changed()
- func (w *IndexWriter) Close() error
- func (w *IndexWriter) Commit(ctx context.Context) error
- func (w *IndexWriter) GetConfig() *IndexWriterConfig
- func (w *IndexWriter) GetDirectory() store.Directory
- func (w *IndexWriter) GetReader(ctx context.Context, applyAllDeletes bool, writeAllDeletes bool) (index.DirectoryReader, error)
- func (w *IndexWriter) IncRefDeleter(segmentInfos *SegmentInfos) error
- func (w *IndexWriter) IsClosed() bool
- func (w *IndexWriter) MaybeMerge() error
- func (w *IndexWriter) Release(readersAndUpdates *ReadersAndUpdates) error
- func (w *IndexWriter) SoftUpdateDocument(ctx context.Context, term index.Term, doc *document.Document, ...) (int64, error)
- func (w *IndexWriter) UpdateDocument(ctx context.Context, term index.Term, doc *document.Document) (int64, error)
- type IndexWriterConfig
- func (r IndexWriterConfig) GetAnalyzer() analysis.Analyzer
- func (r IndexWriterConfig) GetCodec() index.Codec
- func (c *IndexWriterConfig) GetCommitOnClose() bool
- func (c *IndexWriterConfig) GetFlushPolicy() FlushPolicy
- func (c *IndexWriterConfig) GetIndexCommit() IndexCommit
- func (c *IndexWriterConfig) GetIndexCreatedVersionMajor() int
- func (r IndexWriterConfig) GetIndexDeletionPolicy() IndexDeletionPolicy
- func (r IndexWriterConfig) GetIndexSort() index.Sort
- func (r IndexWriterConfig) GetIndexSortFields() map[string]struct{}
- func (r IndexWriterConfig) GetIndexingChain() IndexingChain
- func (r IndexWriterConfig) GetLeafSorter() func(a, b index.LeafReader) int
- func (r IndexWriterConfig) GetMaxBufferedDocs() int
- func (r IndexWriterConfig) GetMaxFullFlushMergeWaitMillis() int64
- func (r IndexWriterConfig) GetMergePolicy() MergePolicy
- func (c *IndexWriterConfig) GetMergeScheduler() MergeScheduler
- func (r IndexWriterConfig) GetMergedSegmentWarmer() ReaderWarmer
- func (c *IndexWriterConfig) GetOpenMode() OpenMode
- func (r IndexWriterConfig) GetReaderPooling() bool
- func (r IndexWriterConfig) GetSimilarity() index.Similarity
- func (r IndexWriterConfig) GetSoftDeletesField() string
- func (r IndexWriterConfig) GetUseCompoundFile() bool
- func (r IndexWriterConfig) IsCheckPendingFlushOnUpdate() bool
- func (r IndexWriterConfig) SetCheckPendingFlushUpdate(checkPendingFlushOnUpdate bool) LiveIndexWriterConfig
- func (c *IndexWriterConfig) SetIndexSort(sort index.Sort) error
- func (r IndexWriterConfig) SetMaxBufferedDocs(maxBufferedDocs int) LiveIndexWriterConfig
- func (r IndexWriterConfig) SetMergePolicy(mergePolicy MergePolicy) LiveIndexWriterConfig
- func (r IndexWriterConfig) SetMergedSegmentWarmer(mergeSegmentWarmer ReaderWarmer) LiveIndexWriterConfig
- func (r IndexWriterConfig) SetUseCompoundFile(useCompoundFile bool) LiveIndexWriterConfig
- type IndexingChain
- type IntComparableProvider
- type IntDocComparator
- type IntSorter
- type KV
- type KeepOnlyLastCommitDeletionPolicy
- type LeafAndDocID
- type LeafReaderBaseInner
- type LeafReaderContextImpl
- func (l *LeafReaderContextImpl) Children() []index.IndexReaderContext
- func (l *LeafReaderContextImpl) DocBase() int
- func (l *LeafReaderContextImpl) Identity() string
- func (l *LeafReaderContextImpl) LeafReader() index.LeafReader
- func (l *LeafReaderContextImpl) Leaves() ([]index.LeafReaderContext, error)
- func (l *LeafReaderContextImpl) Ord() int
- func (l *LeafReaderContextImpl) Reader() index.IndexReader
- type LiveIndexWriterConfig
- type Locker
- type LongComparableProvider
- type LongDocComparator
- type LongSorter
- type MergeContext
- type MergePolicy
- type MergePolicyBase
- func (m *MergePolicyBase) FindFullFlushMerges(mergeTrigger MergeTrigger, segmentInfos *SegmentInfos, ...) (*MergeSpecification, error)
- func (m *MergePolicyBase) KeepFullyDeletedSegment(func() index.CodecReader) bool
- func (m *MergePolicyBase) UseCompoundFile(infos *SegmentInfos, mergedInfo index.SegmentCommitInfo, ...) (bool, error)
- type MergePolicySPI
- type MergeReader
- type MergeScheduler
- type MergeSource
- type MergeSpecification
- type MergeState
- type MergeStateDocMap
- type MergeTrigger
- type Merges
- type MultiLevelSkipListReader
- type MultiLevelSkipListReaderContext
- func (m *MultiLevelSkipListReaderContext) Close() error
- func (m *MultiLevelSkipListReaderContext) GetDoc() int
- func (m *MultiLevelSkipListReaderContext) GetSkipDoc(idx int) int
- func (m *MultiLevelSkipListReaderContext) Init(ctx context.Context, skipPointer int64, df int, ...) error
- func (m *MultiLevelSkipListReaderContext) MaxNumberOfSkipLevels() int
- func (m *MultiLevelSkipListReaderContext) SkipToWithSPI(ctx context.Context, target int, spi MultiLevelSkipListReaderSPI) (int, error)
- type MultiLevelSkipListReaderSPI
- type MultiLevelSkipListWriter
- type MultiLevelSkipListWriterContext
- type MultiLevelSkipListWriterSPI
- type NoMergePolicy
- func (n *NoMergePolicy) FindForcedDeletesMerges(segmentInfos *SegmentInfos, mergeContext MergeContext) (*MergeSpecification, error)
- func (n *NoMergePolicy) FindForcedMerges(segmentInfos *SegmentInfos, maxSegmentCount int, ...) (*MergeSpecification, error)
- func (n *NoMergePolicy) FindFullFlushMerges(mergeTrigger MergeTrigger, segmentInfos *SegmentInfos, ...) (*MergeSpecification, error)
- func (n *NoMergePolicy) FindMerges(mergeTrigger MergeTrigger, segmentInfos *SegmentInfos, ...) (*MergeSpecification, error)
- func (n *NoMergePolicy) GetNoCFSRatio() float64
- func (n *NoMergePolicy) Size(info index.SegmentCommitInfo, mergeContext MergeContext) (int64, error)
- func (n *NoMergePolicy) UseCompoundFile(infos *SegmentInfos, newSegment index.SegmentCommitInfo, ...) (bool, error)
- type NoMergeScheduler
- type Node
- type NodeApply
- type NormValuesWriter
- type NormsConsumer
- type NormsConsumerDefault
- type NumericDVs
- type NumericDocValuesDefault
- func (n *NumericDocValuesDefault) Advance(ctx context.Context, target int) (int, error)
- func (n *NumericDocValuesDefault) AdvanceExact(target int) (bool, error)
- func (n *NumericDocValuesDefault) Cost() int64
- func (n *NumericDocValuesDefault) DocID() int
- func (n *NumericDocValuesDefault) LongValue() (int64, error)
- func (n *NumericDocValuesDefault) NextDoc(ctx context.Context) (int, error)
- func (n *NumericDocValuesDefault) SlowAdvance(ctx context.Context, target int) (int, error)
- type NumericDocValuesProvider
- type NumericDocValuesSub
- type NumericDocValuesWriter
- type OneMerge
- type OneMergeProgress
- type OpenMode
- type OrdTermState
- type OrdinalMap
- type PagedBytes
- func (r *PagedBytes) CloneWithoutBlocks() *PagedBytes
- func (r *PagedBytes) CopyV1(in store.IndexInput, byteCount int) error
- func (r *PagedBytes) CopyV2(bytes []byte, out *bytes.Buffer) error
- func (r *PagedBytes) Freeze(trim bool) (*PagedBytesReader, error)
- func (r *PagedBytes) GetDataInput() *PagedBytesDataInput
- func (r *PagedBytes) GetDataOutput() *PagedBytesDataOutput
- func (r *PagedBytes) GetPointer() int64
- type PagedBytesDataInput
- type PagedBytesDataOutput
- type PagedBytesReader
- type ParallelPostingsArray
- type PauseReason
- type PendingDeletes
- type PendingSoftDeletes
- func (p *PendingSoftDeletes) Delete(docID int) (bool, error)
- func (p *PendingSoftDeletes) DropChanges()
- func (p PendingSoftDeletes) GetDelCount() int
- func (p *PendingSoftDeletes) GetHardLiveDocs() util.Bits
- func (p PendingSoftDeletes) GetLiveDocs() util.Bits
- func (p PendingSoftDeletes) GetMutableBits() *bitset.BitSet
- func (p *PendingSoftDeletes) IsFullyDeleted(ctx context.Context, readerIOSupplier func() index.CodecReader) (bool, error)
- func (p *PendingSoftDeletes) MustInitOnDelete() bool
- func (p PendingSoftDeletes) NeedsRefresh(reader index.CodecReader) bool
- func (p PendingSoftDeletes) NumDocs() (int, error)
- func (p *PendingSoftDeletes) NumPendingDeletes() int
- func (p *PendingSoftDeletes) OnDocValuesUpdate(info *document.FieldInfo, iterator DocValuesFieldUpdatesIterator)
- func (p *PendingSoftDeletes) OnNewReader(reader index.CodecReader, info index.SegmentCommitInfo) error
- func (p *PendingSoftDeletes) WriteLiveDocs(ctx context.Context, dir store.Directory) (bool, error)
- type PerField
- type PointValuesWriter
- type PositionData
- type PostingsBytesStartArray
- type PrefixCodedTerms
- type ReaderCommit
- func (r *ReaderCommit) CompareTo(commit index.IndexCommit) int
- func (r *ReaderCommit) Delete() error
- func (r *ReaderCommit) GetDirectory() store.Directory
- func (r *ReaderCommit) GetFileNames() (map[string]struct{}, error)
- func (r *ReaderCommit) GetGeneration() int64
- func (r *ReaderCommit) GetReader() index.DirectoryReader
- func (r *ReaderCommit) GetSegmentCount() int
- func (r *ReaderCommit) GetSegmentsFileName() string
- func (r *ReaderCommit) GetUserData() (map[string]string, error)
- func (r *ReaderCommit) IsDeleted() bool
- type ReaderPool
- type ReaderSorter
- type ReaderWarmer
- type ReadersAndUpdates
- func (r *ReadersAndUpdates) AddDVUpdate(update DocValuesFieldUpdates) error
- func (r *ReadersAndUpdates) DecRef()
- func (r *ReadersAndUpdates) GetDelCount() int
- func (r *ReadersAndUpdates) GetNumDVUpdates() int
- func (r *ReadersAndUpdates) GetReader(ctx context.Context, ioContext *store.IOContext) (*SegmentReader, error)
- func (r *ReadersAndUpdates) IncRef()
- func (r *ReadersAndUpdates) IsFullyDeleted() (bool, error)
- func (r *ReadersAndUpdates) RefCount() int64
- func (r *ReadersAndUpdates) Release(sr *SegmentReader) error
- type RefCount
- type SegmentCoreReaders
- type SegmentDocValues
- type SegmentDocValuesProducer
- func (s *SegmentDocValuesProducer) CheckIntegrity() error
- func (s *SegmentDocValuesProducer) Close() error
- func (s *SegmentDocValuesProducer) GetBinary(ctx context.Context, field *document.FieldInfo) (index.BinaryDocValues, error)
- func (s *SegmentDocValuesProducer) GetMergeInstance() index.DocValuesProducer
- func (s *SegmentDocValuesProducer) GetNumeric(ctx context.Context, field *document.FieldInfo) (index.NumericDocValues, error)
- func (s *SegmentDocValuesProducer) GetSorted(ctx context.Context, fieldInfo *document.FieldInfo) (index.SortedDocValues, error)
- func (s *SegmentDocValuesProducer) GetSortedNumeric(ctx context.Context, field *document.FieldInfo) (index.SortedNumericDocValues, error)
- func (s *SegmentDocValuesProducer) GetSortedSet(ctx context.Context, field *document.FieldInfo) (index.SortedSetDocValues, error)
- type SegmentInfo
- func (s *SegmentInfo) AddFile(file string) error
- func (s *SegmentInfo) Dir() store.Directory
- func (s *SegmentInfo) Files() map[string]struct{}
- func (s *SegmentInfo) FilesNum() int
- func (s *SegmentInfo) GetAttributes() map[string]string
- func (s *SegmentInfo) GetCodec() index.Codec
- func (s *SegmentInfo) GetDiagnostics() map[string]string
- func (s *SegmentInfo) GetID() []byte
- func (s *SegmentInfo) GetIndexSort() index.Sort
- func (s *SegmentInfo) GetMinVersion() *version.Version
- func (s *SegmentInfo) GetUseCompoundFile() bool
- func (s *SegmentInfo) GetVersion() *version.Version
- func (s *SegmentInfo) MaxDoc() (int, error)
- func (s *SegmentInfo) Name() string
- func (s *SegmentInfo) NamedForThisSegment(file string) string
- func (s *SegmentInfo) PutAttribute(key, value string) string
- func (s *SegmentInfo) SetCodec(codec index.Codec)
- func (s *SegmentInfo) SetDiagnostics(diagnostics map[string]string)
- func (s *SegmentInfo) SetFiles(files map[string]struct{})
- func (s *SegmentInfo) SetMaxDoc(maxDoc int) error
- func (s *SegmentInfo) SetUseCompoundFile(isCompoundFile bool)
- type SegmentInfos
- func NewSegmentInfos(indexCreatedVersionMajor int) *SegmentInfos
- func ReadCommit(ctx context.Context, directory store.Directory, segmentFileName string) (*SegmentInfos, error)
- func ReadCommitFromChecksumIndexInput(ctx context.Context, directory store.Directory, input store.ChecksumIndexInput, ...) (*SegmentInfos, error)
- func ReadLatestCommit(ctx context.Context, directory store.Directory) (*SegmentInfos, error)
- func (s *SegmentInfos) Add(si index.SegmentCommitInfo) error
- func (s *SegmentInfos) AddAll(sis []index.SegmentCommitInfo) error
- func (s *SegmentInfos) AsList() []index.SegmentCommitInfo
- func (s *SegmentInfos) Changed()
- func (s *SegmentInfos) Clone() *SegmentInfos
- func (s *SegmentInfos) Commit(ctx context.Context, dir store.Directory) error
- func (s *SegmentInfos) CreateBackupSegmentInfos() []index.SegmentCommitInfo
- func (s *SegmentInfos) Files(includeSegmentsFile bool) (map[string]struct{}, error)
- func (s *SegmentInfos) GetGeneration() int64
- func (s *SegmentInfos) GetLastGeneration() int64
- func (s *SegmentInfos) GetSegmentsFileName() string
- func (s *SegmentInfos) GetUserData() map[string]string
- func (s *SegmentInfos) GetVersion() int64
- func (s *SegmentInfos) Info(j int) index.SegmentCommitInfo
- func (s *SegmentInfos) Remove(index int)
- func (s *SegmentInfos) Replace(other *SegmentInfos) error
- func (s *SegmentInfos) RollbackCommit(directory store.Directory) error
- func (s *SegmentInfos) SetNextWriteGeneration(generation int64)
- func (s *SegmentInfos) SetUserData(data map[string]string, b bool)
- func (s *SegmentInfos) Size() int
- func (s *SegmentInfos) TotalMaxDoc() int64
- func (s *SegmentInfos) UpdateGeneration(other *SegmentInfos)
- func (s *SegmentInfos) UpdateGenerationVersionAndCounter(other *SegmentInfos)
- type SegmentMap
- type SegmentMerger
- type SegmentReader
- func (r SegmentReader) Close() error
- func (r SegmentReader) DecRef() error
- func (s *SegmentReader) Directory() store.Directory
- func (s *SegmentReader) DoClose() error
- func (r SegmentReader) Document(ctx context.Context, docID int) (*document.Document, error)
- func (r SegmentReader) DocumentWithFields(ctx context.Context, docID int, fieldsToLoad []string) (*document.Document, error)
- func (s *SegmentReader) GetDocValuesReader() index.DocValuesProducer
- func (s *SegmentReader) GetFieldInfos() index.FieldInfos
- func (s *SegmentReader) GetFieldsReader() index.StoredFieldsReader
- func (s *SegmentReader) GetHardLiveDocs() util.Bits
- func (s *SegmentReader) GetLiveDocs() util.Bits
- func (s *SegmentReader) GetMetaData() index.LeafMetaData
- func (s *SegmentReader) GetNormsReader() index.NormsProducer
- func (s *SegmentReader) GetOriginalSegmentInfo() index.SegmentCommitInfo
- func (s *SegmentReader) GetPointsReader() index.PointsReader
- func (s *SegmentReader) GetPostingsReader() index.FieldsProducer
- func (s *SegmentReader) GetReaderCacheHelper() index.CacheHelper
- func (r SegmentReader) GetRefCount() int
- func (r SegmentReader) GetTermVector(docID int, field string) (index.Terms, error)
- func (s *SegmentReader) GetTermVectorsReader() index.TermVectorsReader
- func (r SegmentReader) HasDeletions() bool
- func (r SegmentReader) IncRef() error
- func (r SegmentReader) Leaves() ([]index.LeafReaderContext, error)
- func (s *SegmentReader) MaxDoc() int
- func (s *SegmentReader) New(si index.SegmentCommitInfo, liveDocs, hardLiveDocs util.Bits, numDocs int, ...) (*SegmentReader, error)
- func (s *SegmentReader) NewReadersAndUpdates(indexCreatedVersionMajor int, pendingDeletes PendingDeletes) (*ReadersAndUpdates, error)
- func (r SegmentReader) NotifyReaderClosedListeners() error
- func (r SegmentReader) NumDeletedDocs() int
- func (s *SegmentReader) NumDocs() int
- func (r SegmentReader) RegisterParentReader(reader index.IndexReader)
- func (r SegmentReader) TryIncRef() bool
- type SegmentState
- type SimScorerSPI
- type SingleTermsEnum
- type SingleValueDocValuesFieldUpdates
- type SkipBuffer
- func (s *SkipBuffer) Clone() store.CloneReader
- func (s *SkipBuffer) Close() error
- func (s *SkipBuffer) GetFilePointer() int64
- func (s *SkipBuffer) Length() int64
- func (s *SkipBuffer) Read(b []byte) (int, error)
- func (s *SkipBuffer) ReadByte() (byte, error)
- func (s *SkipBuffer) Seek(pos int64, whence int) (int64, error)
- func (s *SkipBuffer) Slice(sliceDescription string, offset, length int64) (store.IndexInput, error)
- type SlowImpactsEnum
- func (s *SlowImpactsEnum) Advance(ctx context.Context, target int) (int, error)
- func (s *SlowImpactsEnum) AdvanceShallow(ctx context.Context, target int) error
- func (s *SlowImpactsEnum) Cost() int64
- func (s *SlowImpactsEnum) DocID() int
- func (s *SlowImpactsEnum) EndOffset() (int, error)
- func (s *SlowImpactsEnum) Freq() (int, error)
- func (s *SlowImpactsEnum) GetImpacts() (index.Impacts, error)
- func (s *SlowImpactsEnum) GetPayload() ([]byte, error)
- func (s *SlowImpactsEnum) NextDoc(ctx context.Context) (int, error)
- func (s *SlowImpactsEnum) NextPosition() (int, error)
- func (s *SlowImpactsEnum) SlowAdvance(ctx context.Context, target int) (int, error)
- func (s *SlowImpactsEnum) StartOffset() (int, error)
- type SortedDocValuesDefaultConfig
- type SortedDocValuesProvider
- type SortedDocValuesTermsEnum
- func (s *SortedDocValuesTermsEnum) Attributes() *attribute.Source
- func (s *SortedDocValuesTermsEnum) DocFreq() (int, error)
- func (s *SortedDocValuesTermsEnum) Impacts(flags int) (index.ImpactsEnum, error)
- func (s *SortedDocValuesTermsEnum) Next(context.Context) ([]byte, error)
- func (s *SortedDocValuesTermsEnum) Ord() (int64, error)
- func (s *SortedDocValuesTermsEnum) Postings(reuse index.PostingsEnum, flags int) (index.PostingsEnum, error)
- func (s *SortedDocValuesTermsEnum) SeekCeil(ctx context.Context, text []byte) (index.SeekStatus, error)
- func (s *SortedDocValuesTermsEnum) SeekExact(ctx context.Context, text []byte) (bool, error)
- func (s *SortedDocValuesTermsEnum) SeekExactByOrd(ctx context.Context, ord int64) error
- func (s *SortedDocValuesTermsEnum) SeekExactExpert(ctx context.Context, term []byte, state index.TermState) error
- func (s *SortedDocValuesTermsEnum) Term() ([]byte, error)
- func (s *SortedDocValuesTermsEnum) TermState() (index.TermState, error)
- func (s *SortedDocValuesTermsEnum) TotalTermFreq() (int64, error)
- type SortedDocValuesWriter
- type SortedNumericDocValuesWriter
- type SortedSetDocValuesWriter
- type SortedSetSelector
- type SortedSetSelectorType
- type SortedSetSortField
- type SortedSetSortFieldProvider
- type SorterDefault
- type SortingNumericDocValues
- func (s *SortingNumericDocValues) Advance(ctx context.Context, target int) (int, error)
- func (s *SortingNumericDocValues) AdvanceExact(target int) (bool, error)
- func (s *SortingNumericDocValues) Cost() int64
- func (s *SortingNumericDocValues) DocID() int
- func (s *SortingNumericDocValues) LongValue() (int64, error)
- func (s *SortingNumericDocValues) NextDoc(ctx context.Context) (int, error)
- func (s *SortingNumericDocValues) SlowAdvance(ctx context.Context, target int) (int, error)
- type StandardDirectoryReader
- func (d StandardDirectoryReader) Directory() store.Directory
- func (s *StandardDirectoryReader) GetIndexCommit() (index.IndexCommit, error)
- func (s *StandardDirectoryReader) GetSegmentInfos() *SegmentInfos
- func (s *StandardDirectoryReader) GetVersion() int64
- func (s *StandardDirectoryReader) IsCurrent(ctx context.Context) (bool, error)
- type StoredFieldsConsumer
- func (s *StoredFieldsConsumer) Finish(ctx context.Context, maxDoc int) error
- func (s *StoredFieldsConsumer) FinishDocument() error
- func (s *StoredFieldsConsumer) Flush(ctx context.Context, state *index.SegmentWriteState, sortMap index.DocMap) error
- func (s *StoredFieldsConsumer) StartDocument(ctx context.Context, docID int) error
- type StringDocComparator
- type StringSorter
- type TermData
- type TermDataList
- type TermDocsIterator
- type TermNode
- type TermStates
- func (r *TermStates) AccumulateStatistics(docFreq int, totalTermFreq int64)
- func (r *TermStates) DocFreq() (int, error)
- func (r *TermStates) Get(ctx index.LeafReaderContext) (index.TermState, error)
- func (r *TermStates) Register(state index.TermState, ord, docFreq int, totalTermFreq int64)
- func (r *TermStates) Register2(state index.TermState, ord int)
- func (r *TermStates) TotalTermFreq() (int64, error)
- func (r *TermStates) WasBuiltFor(context index.IndexReaderContext) bool
- type TermVectorsConsumer
- func (t *TermVectorsConsumer) AddField(invertState *index.FieldInvertState, fieldInfo *document.FieldInfo) (TermsHashPerField, error)
- func (t *TermVectorsConsumer) FinishDocument(ctx context.Context, docID int) error
- func (t *TermVectorsConsumer) Flush(ctx context.Context, fieldsToFlush map[string]TermsHashPerField, ...) error
- func (t *TermVectorsConsumer) SetTermBytePool(termBytePool *bytesref.BlockPool)
- type TermVectorsConsumerPerField
- func (t TermVectorsConsumerPerField) Add(termBytes []byte, docID int) error
- func (t TermVectorsConsumerPerField) Add2nd(textStart, docID int) error
- func (t *TermVectorsConsumerPerField) AddTerm(termID, docID int) error
- func (t *TermVectorsConsumerPerField) CreatePostingsArray(size int) ParallelPostingsArray
- func (t *TermVectorsConsumerPerField) Finish() error
- func (t *TermVectorsConsumerPerField) FinishDocument() error
- func (t TermVectorsConsumerPerField) GetNextPerField() TermsHashPerField
- func (t TermVectorsConsumerPerField) GetPostingsArray() ParallelPostingsArray
- func (t *TermVectorsConsumerPerField) NewPostingsArray()
- func (t *TermVectorsConsumerPerField) NewTerm(termID, docID int) error
- func (t *TermVectorsConsumerPerField) Reset() error
- func (t TermVectorsConsumerPerField) SetPostingsArray(v ParallelPostingsArray)
- func (t *TermVectorsConsumerPerField) Start(field document.IndexableField, first bool) bool
- type TermVectorsConsumerPerFields
- type TermVectorsPostingsArray
- func (t *TermVectorsPostingsArray) BytesPerPosting() int
- func (t *TermVectorsPostingsArray) Grow()
- func (t *TermVectorsPostingsArray) NewInstance() ParallelPostingsArray
- func (t *TermVectorsPostingsArray) SetFreqs(termID, v int)
- func (t *TermVectorsPostingsArray) SetLastOffsets(termID, v int)
- func (t *TermVectorsPostingsArray) SetLastPositions(termID, v int)
- type TermsEnumIndex
- type TermsHash
- type TermsHashPerField
- type TermsSPI
Constants ¶
const ( HAS_VALUE_MASK = 1 HAS_NO_VALUE_MASK = 0 SHIFT = 1 )
const ( // ACCEPT_STATUS_YES Accept the term and position the enum at the next term. ACCEPT_STATUS_YES = AcceptStatus(iota) // ACCEPT_STATUS_YES_AND_SEEK Accept the term and advance (nextSeekTerm(BytesRef)) to the next term. ACCEPT_STATUS_YES_AND_SEEK // ACCEPT_STATUS_NO Reject the term and position the enum at the next term. ACCEPT_STATUS_NO // ACCEPT_STATUS_NO_AND_SEEK Reject the term and advance (nextSeekTerm(BytesRef)) to the next term. ACCEPT_STATUS_NO_AND_SEEK // ACCEPT_STATUS_END Reject the term and stop enumerating. ACCEPT_STATUS_END )
const ( // SEGMENTS Name of the index segment file SEGMENTS = "segments" // PENDING_SEGMENTS Name of pending index segment file PENDING_SEGMENTS = "pending_segments" // OLD_SEGMENTS_GEN Name of the generation reference file name OLD_SEGMENTS_GEN = "segments.gen" )
const ( // MAX_DOCS // Hard limit on maximum number of documents that may be added to the index. // If you try to add more than this you'll hit IllegalArgumentException MAX_DOCS = math.MaxInt32 - 128 // MAX_POSITION // Maximum item of the token position in an indexed field. MAX_POSITION = math.MaxInt32 - 128 UNBOUNDED_MAX_MERGE_SEGMENTS = -1 // WRITE_LOCK_NAME // Name of the write lock in the index. WRITE_LOCK_NAME = "write.lock" // SOURCE // key for the source of a segment in the diagnostics. SOURCE = "source" // SOURCE_MERGE // Source of a segment which results from a merge of other segments. SOURCE_MERGE = "merge" // SOURCE_FLUSH // Source of a segment which results from a Flush. SOURCE_FLUSH = "Flush" // SOURCE_ADDINDEXES_READERS // Source of a segment which results from a call to addIndexes(CodecReader...). SOURCE_ADDINDEXES_READERS = "addIndexes(CodecReader...)" BYTE_BLOCK_SHIFT = 15 BYTE_BLOCK_SIZE = 1 << BYTE_BLOCK_SHIFT // MAX_TERM_LENGTH // Absolute hard maximum length for a term, in bytes once encoded as UTF8. // If a term arrives from the analyzer longer than this length, an IllegalArgumentException // is thrown and a message is printed to infoStream, if set (see IndexWriterConfig.setInfoStream(InfoStream)). MAX_TERM_LENGTH = BYTE_BLOCK_SIZE - 2 MAX_STORED_STRING_LENGTH = math.MaxInt )
const ( // DISABLE_AUTO_FLUSH // Denotes a Flush trigger is disabled. DISABLE_AUTO_FLUSH = -1 // DEFAULT_MAX_BUFFERED_DELETE_TERMS // Disabled by default (because IndexWriter flushes by RAM usage by default). DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH // DEFAULT_MAX_BUFFERED_DOCS // Disabled by default (because IndexWriter flushes by RAM usage by default). DEFAULT_MAX_BUFFERED_DOCS = DISABLE_AUTO_FLUSH // DEFAULT_RAM_BUFFER_SIZE_MB // Default item is 16 MB (which means Flush when buffered docs consume approximately 16 MB RAM). DEFAULT_RAM_BUFFER_SIZE_MB = 16.0 // DEFAULT_READER_POOLING // Default setting (true) for setReaderPooling. // We Changed this default to true with concurrent deletes/updates (LUCENE-7868), // because we will otherwise need to open and close segment readers more frequently. // False is still supported, but will have worse performance since readers will // be forced to aggressively move all state to disk. DEFAULT_READER_POOLING = true // DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB // Default item is 1945. Change using setRAMPerThreadHardLimitMB(int) DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB = 1945 // DEFAULT_USE_COMPOUND_FILE_SYSTEM // Default item for compound file system for newly // written segments (set to true). For batch indexing with very large ram buffers use false DEFAULT_USE_COMPOUND_FILE_SYSTEM = true // DEFAULT_COMMIT_ON_CLOSE // Default item for whether calls to IndexWriter.close() include a commit. DEFAULT_COMMIT_ON_CLOSE = true // DEFAULT_MAX_FULL_FLUSH_MERGE_WAIT_MILLIS // Default item for time to wait for merges // on commit or getReader (when using a MergePolicy that implements MergePolicy.findFullFlushMerges). DEFAULT_MAX_FULL_FLUSH_MERGE_WAIT_MILLIS = 0 )
const ( // CREATE Creates a new index or overwrites an existing one. CREATE = OpenMode(iota) // APPEND Opens an existing index. APPEND // CREATE_OR_APPEND Creates a new index if one does not exist, // otherwise it opens the index and documents will be appended. CREATE_OR_APPEND )
const ( DEFAULT_NO_CFS_RATIO = 1.0 DEFAULT_MAX_CFS_SEGMENT_SIZE = math.MaxInt64 )
const ( STOPPED = PauseReason(iota) // Stopped (because of throughput rate set to 0, typically). PAUSED // Temporarily paused because of exceeded throughput rate. OTHER // Other reason. )
const ( // MERGE_TRIGGER_SEGMENT_FLUSH // Merge was triggered by a segment Flush. // 由一个段的flush来触发的 MERGE_TRIGGER_SEGMENT_FLUSH = MergeTrigger(iota) // MERGE_TRIGGER_FULL_FLUSH // Merge was triggered by a full Flush. Full flushes can be caused by a commit, // NRT reader reopen or a close call on the index writer. MERGE_TRIGGER_FULL_FLUSH // MERGE_TRIGGER_EXPLICIT // Merge has been triggered explicitly by the user. MERGE_TRIGGER_EXPLICIT // MERGE_TRIGGER_MERGE_FINISHED // Merge was triggered by a successfully finished merge. MERGE_TRIGGER_MERGE_FINISHED // MERGE_TRIGGER_CLOSING // Merge was triggered by a closing IndexWriter. MERGE_TRIGGER_CLOSING // MERGE_TRIGGER_COMMIT // Merge was triggered on commit. MERGE_TRIGGER_COMMIT // MERGE_TRIGGER_GET_READER // Merge was triggered on opening NRT readers. MERGE_TRIGGER_GET_READER )
const ( POSTINGS_ENUM_NONE = 0 POSTINGS_ENUM_FREQS = 1 << 3 POSTINGS_ENUM_POSITIONS = POSTINGS_ENUM_FREQS | 1<<4 POSTINGS_ENUM_OFFSETS = POSTINGS_ENUM_POSITIONS | 1<<5 POSTINGS_ENUM_PAYLOADS = POSTINGS_ENUM_POSITIONS | 1<<6 POSTINGS_ENUM_ALL = POSTINGS_ENUM_OFFSETS | POSTINGS_ENUM_PAYLOADS )
const ( // SegmentInfoNO // Used by some member fields to mean not present (e.g., norms, deletions). // e.g. no norms; no deletes; SegmentInfoNO = -1 // SegmentInfoYES // Used by some member fields to mean present (e.g., norms, deletions). // e.g. have norms; have deletes; SegmentInfoYES = 1 )
const ( // VERSION_70 // The version that added information about the Lucene version at the time when the index has been created. VERSION_70 = 7 // VERSION_72 // The version that updated segment name counter to be long instead of int. VERSION_72 = 8 // VERSION_74 // The version that recorded softDelCount VERSION_74 = 9 // VERSION_86 // The version that recorded SegmentCommitInfo IDs VERSION_86 = 10 VERSION_CURRENT = VERSION_86 )
const ( MIN = SortedSetSelectorType(iota) // Selects the minimum item in the set MAX // Selects the maximum item in the set MIDDLE_MIN // Selects the middle item in the set. If the set has an even number of values, the lower of the middle two is chosen. MIDDLE_MAX // Selects the middle item in the set. If the set has an even number of values, the higher of the middle two is chosen )
const ( BINARY_SORT_THRESHOLD = 20 INSERTION_SORT_THRESHOLD = 16 )
const ( STRING_FIRST = "SortField.STRING_FIRST" STRING_LAST = "SortField.STRING_LAST" )
const BYTES_PER_POSTING = 3 * 4
const HASH_INIT_SIZE = 4
const (
ID_LENGTH = 16
)
const (
INFO_VERBOSE = false
)
const (
NO_MORE_ORDS = -1
)
const (
ProviderName = "SortField"
)
const ( // VERBOSE_REF_COUNTS Change to true to see details of reference counts when infoStream is enabled VERBOSE_REF_COUNTS = false )
Variables ¶
var ( FIELD_SCORE = NewSortField("", index.SCORE) FIELD_DOC = NewSortField("", index.DOC) )
var (
CODEC_FILE_PATTERN = regexp.MustCompilePOSIX("_[a-z0-9]+(_.*)?\\..*")
)
CODEC_FILE_PATTERN All files created by codecs much match this pattern (checked in SegmentInfo).
var (
EMPTY_TERMSTATE index.TermState
)
var EmptyTermsEnum = &emptyTermsEnum{}
var (
ErrUnsupportedOperation = errors.New("unsupported operation exception")
)
Functions ¶
func AsBinaryDocValues ¶
func AsBinaryDocValues(iterator DocValuesFieldUpdatesIterator) index.BinaryDocValues
func AsNumericDocValues ¶
func AsNumericDocValues(iterator DocValuesFieldUpdatesIterator) index.NumericDocValues
func CreateCompoundFile ¶
func CreateCompoundFile(ctx context.Context, directory *store.TrackingDirectoryWrapper, info index.SegmentInfo, ioContext *store.IOContext, deleteFiles func(files map[string]struct{})) error
func DirectoryReaderOpen ¶
func DirectoryReaderOpen(ctx context.Context, writer *IndexWriter) (index.DirectoryReader, error)
func DirectoryReaderOpenV1 ¶
func DirectoryReaderOpenV1(ctx context.Context, writer *IndexWriter, applyAllDeletes, writeAllDeletes bool) (index.DirectoryReader, error)
func FeatureRequested ¶
FeatureRequested Returns true if the given feature is requested in the flags, false otherwise.
func FileNameFromGeneration ¶
func GenerationFromSegmentsFileName ¶
GenerationFromSegmentsFileName Parse the generation off the segments file name and return it.
func GetActualMaxDocs ¶
func GetActualMaxDocs() int
func GetLastCommitGeneration ¶
GetLastCommitGeneration Get the generation of the most recent commit to the list of index files (N in the segments_N file). files: array of file names to check
func GetNumeric ¶
func GetNumeric(reader index.LeafReader, field string) (index.NumericDocValues, error)
func GetSortFieldProviderByName ¶
func GetSortFieldProviderByName(name string) index.SortFieldProvider
func GetSorted ¶
func GetSorted(reader index.LeafReader, field string) (index.SortedDocValues, error)
func ImpactComparator ¶
func IsCacheable ¶
func IsCacheable(ctx index.LeafReaderContext, fields ...string) bool
IsCacheable Returns true if the specified docvalues fields have not been updated
func IsIndexExists ¶
IsIndexExists Returns true if an index likely exists at the specified directory. Note that if a corrupt index exists, or if an index in the process of committing Params: directory – the directory to check for an index Returns: true if an index exists; false otherwise
func MergeFromReaders ¶
func MergeFromReaders(ctx context.Context, consumer index.FieldsConsumer, mergeState *MergeState, norms index.NormsProducer) error
MergeFromReaders Merges in the fields from the readers in mergeState. The default implementation skips and maps around deleted documents, and calls write(Fields, NormsProducer). Implementations can override this method for more sophisticated merging (bulk-byte copying, etc).
func NewFieldInfos ¶
func NewFieldInfos(infos []*document.FieldInfo) index.FieldInfos
func NewLeafMetaData ¶
func NewLeafReaderContext ¶
func NewLeafReaderContext(leafReader index.LeafReader) index.LeafReaderContext
func NewLeafReaderContextV1 ¶
func NewLeafReaderContextV1(parent *CompositeReaderContext, reader index.LeafReader, ord, docBase, leafOrd, leafDocBase int) index.LeafReaderContext
func OpenDirectoryReader ¶
func OpenDirectoryReader(ctx context.Context, directory store.Directory, commit IndexCommit, compareFunc CompareLeafReader) (index.DirectoryReader, error)
OpenDirectoryReader called from DirectoryReader.open(...) methods
func OpenStandardDirectoryReader ¶
func OpenStandardDirectoryReader(writer *IndexWriter, readerFunction func(index.SegmentCommitInfo) (*SegmentReader, error), infos *SegmentInfos, applyAllDeletes, writeAllDeletes bool) (index.DirectoryReader, error)
OpenStandardDirectoryReader Used by near real-time search
func ParseGeneration ¶
ParseGeneration Returns the generation from this file name, or 0 if there is no generation.
func ParseSegmentName ¶
ParseSegmentName Parses the segment name out of the given file name. Returns: the segment name only, or filename if it does not contain a '.' and '_'.
func Partition ¶
func Partition(config *bkd.Config, maxDoc, splitDim, commonPrefixLen int, reader types.MutablePointValues, from, to, mid int, _scratch1, _scratch2 *bytes.Buffer)
Partition points around mid. All values on the left must be less than or equal to it and all values on the right must be greater than or equal to it.
func RegisterCodec ¶
func RegisterSortFieldProvider ¶
func RegisterSortFieldProvider(provider index.SortFieldProvider)
func SegmentFileName ¶
SegmentFileName Returns a file name that includes the given segment name, your own custom name and extension. The format of the filename is: <segmentName>(_<name>)(.<ext>). NOTE: .<ext> is added to the result file name only if ext is not empty. NOTE: _<segmentSuffix> is added to the result file name only if it's not the empty string NOTE: all custom files should be named using this method, or otherwise some structures may fail to handle them properly (such as if they are added to compound files).
func SetDiagnostics ¶
func SortByComparator ¶
func SortByComparator(maxDoc int, comparator index.DocComparator) index.DocMap
func SortByComparators ¶
func SortByDim ¶
func SortByDim(config *bkd.Config, sortedDim int, commonPrefixLengths []int, reader types.MutablePointValues, from, to int, scratch1, scratch2 *bytes.Buffer)
SortByDim Sort points on the given dimension.
func SortFreqProxTermsWriterPerField ¶
func SortFreqProxTermsWriterPerField(fields []*FreqProxTermsWriterPerField)
func SortTermVectorsConsumerPerField ¶
func SortTermVectorsConsumerPerField(fields []*TermVectorsConsumerPerField)
func StripExtension ¶
StripExtension Removes the extension (anything after the first '.'), otherwise returns the original filename.
func StripSegmentName ¶
StripSegmentName Strips the segment name out of the given file name. If you used segmentFileName or fileNameFromGeneration to create your files, then this method simply removes whatever comes before the first '.', or the second '_' (excluding both). Returns: the filename with the segment name removed, or the given filename if it does not contain a '.' and '_'.
func SubIndex ¶
SubIndex Returns index of the searcher/reader for document n in the array used to construct this searcher/reader.
func TestLiveDocs ¶
func TestLiveDocs(reader index.CodecReader) error
func WriteSortField ¶
func WriteSortField(sf index.SortField, output store.DataOutput) error
Types ¶
type AcceptStatus ¶
type AcceptStatus int
AcceptStatus Return item, if term should be accepted or the iteration should END. The *_SEEK values denote, that after handling the current term the enum should call nextSeekTerm and step forward. See Also: accept(BytesRef)
type AutomatonTermsEnum ¶
type AutomatonTermsEnum struct { *FilteredTermsEnumBase // contains filtered or unexported fields }
AutomatonTermsEnum A FilteredTermsEnum that enumerates terms based upon what is accepted by a DFA. The algorithm is such: As long as matches are successful, keep reading sequentially. When a match fails, skip to the next string in lexicographic order that does not enter a reject state. The algorithm does not attempt to actually skip to the next string that is completely accepted. This is not possible when the language accepted by the FSM is not finite (i.e. * operator). lucene.internal
func (*AutomatonTermsEnum) Accept ¶
func (a *AutomatonTermsEnum) Accept(term []byte) (AcceptStatus, error)
type BaseBinaryDocValues ¶
type BaseBinaryDocValues struct { FnDocID func() int FnNextDoc func(ctx context.Context) (int, error) FnAdvance func(ctx context.Context, target int) (int, error) FnSlowAdvance func(ctx context.Context, target int) (int, error) FnCost func() int64 FnAdvanceExact func(target int) (bool, error) FnBinaryValue func() ([]byte, error) }
func (*BaseBinaryDocValues) AdvanceExact ¶
func (n *BaseBinaryDocValues) AdvanceExact(target int) (bool, error)
func (*BaseBinaryDocValues) BinaryValue ¶
func (n *BaseBinaryDocValues) BinaryValue() ([]byte, error)
func (*BaseBinaryDocValues) Cost ¶
func (n *BaseBinaryDocValues) Cost() int64
func (*BaseBinaryDocValues) DocID ¶
func (n *BaseBinaryDocValues) DocID() int
func (*BaseBinaryDocValues) NextDoc ¶
func (n *BaseBinaryDocValues) NextDoc(ctx context.Context) (int, error)
func (*BaseBinaryDocValues) SlowAdvance ¶
type BaseCodecReader ¶
type BaseCodecReader struct { *BaseLeafReader index.CodecReaderSPI }
func NewBaseCodecReader ¶
func NewBaseCodecReader(reader index.CodecReader) *BaseCodecReader
func (*BaseCodecReader) CheckIntegrity ¶
func (c *BaseCodecReader) CheckIntegrity() error
func (BaseCodecReader) DocumentWithFields ¶
func (*BaseCodecReader) DocumentWithVisitor ¶
func (c *BaseCodecReader) DocumentWithVisitor(ctx context.Context, docID int, visitor document.StoredFieldVisitor) error
func (*BaseCodecReader) GetBinaryDocValues ¶
func (c *BaseCodecReader) GetBinaryDocValues(field string) (index.BinaryDocValues, error)
func (*BaseCodecReader) GetNormValues ¶
func (c *BaseCodecReader) GetNormValues(field string) (index.NumericDocValues, error)
func (*BaseCodecReader) GetNumericDocValues ¶
func (c *BaseCodecReader) GetNumericDocValues(field string) (index.NumericDocValues, error)
func (*BaseCodecReader) GetPointValues ¶
func (c *BaseCodecReader) GetPointValues(field string) (types.PointValues, bool)
func (BaseCodecReader) GetRefCount ¶
func (r BaseCodecReader) GetRefCount() int
func (*BaseCodecReader) GetSortedDocValues ¶
func (c *BaseCodecReader) GetSortedDocValues(field string) (index.SortedDocValues, error)
func (*BaseCodecReader) GetSortedNumericDocValues ¶
func (c *BaseCodecReader) GetSortedNumericDocValues(field string) (index.SortedNumericDocValues, error)
func (*BaseCodecReader) GetSortedSetDocValues ¶
func (c *BaseCodecReader) GetSortedSetDocValues(field string) (index.SortedSetDocValues, error)
func (BaseCodecReader) GetTermVector ¶
func (*BaseCodecReader) GetTermVectors ¶
func (c *BaseCodecReader) GetTermVectors(docID int) (index.Fields, error)
func (BaseCodecReader) HasDeletions ¶
func (r BaseCodecReader) HasDeletions() bool
func (BaseCodecReader) Leaves ¶
func (r BaseCodecReader) Leaves() ([]index.LeafReaderContext, error)
func (BaseCodecReader) NotifyReaderClosedListeners ¶
func (r BaseCodecReader) NotifyReaderClosedListeners() error
NotifyReaderClosedListeners overridden by StandardDirectoryReader and SegmentReader
func (BaseCodecReader) NumDeletedDocs ¶
func (r BaseCodecReader) NumDeletedDocs() int
func (BaseCodecReader) RegisterParentReader ¶
func (r BaseCodecReader) RegisterParentReader(reader index.IndexReader)
RegisterParentReader Expert: This method is called by IndexReaders which wrap other readers (e.g. CompositeReader or FilterLeafReader) to register the parent at the child (this reader) on construction of the parent. When this reader is closed, it will mark all registered parents as closed, too. The references to parent readers are weak only, so they can be GCed once they are no longer in use.
type BaseCompoundDirectory ¶
type BaseCompoundDirectory struct { }
func (*BaseCompoundDirectory) CreateOutput ¶
func (*BaseCompoundDirectory) CreateOutput(ctx context.Context, name string) (store.IndexOutput, error)
func (*BaseCompoundDirectory) CreateTempOutput ¶
func (*BaseCompoundDirectory) CreateTempOutput(ctx context.Context, prefix, suffix string) (store.IndexOutput, error)
func (*BaseCompoundDirectory) DeleteFile ¶
func (*BaseCompoundDirectory) DeleteFile(ctx context.Context, name string) error
func (*BaseCompoundDirectory) ObtainLock ¶
func (*BaseCompoundDirectory) ObtainLock(name string) (store.Lock, error)
func (*BaseCompoundDirectory) Rename ¶
func (*BaseCompoundDirectory) Rename(ctx context.Context, source, dest string) error
func (*BaseCompoundDirectory) Sync ¶
func (*BaseCompoundDirectory) Sync(names map[string]struct{}) error
func (*BaseCompoundDirectory) SyncMetaData ¶
func (*BaseCompoundDirectory) SyncMetaData(ctx context.Context) error
type BaseDocValuesFieldUpdates ¶
type BaseDocValuesFieldUpdates struct {
// contains filtered or unexported fields
}
func (*BaseDocValuesFieldUpdates) Any ¶
func (d *BaseDocValuesFieldUpdates) Any() bool
Any Returns true if this instance contains any updates.
func (*BaseDocValuesFieldUpdates) Field ¶
func (d *BaseDocValuesFieldUpdates) Field() string
func (*BaseDocValuesFieldUpdates) Finish ¶
func (d *BaseDocValuesFieldUpdates) Finish() error
func (*BaseDocValuesFieldUpdates) GetFinished ¶
func (d *BaseDocValuesFieldUpdates) GetFinished() bool
func (*BaseDocValuesFieldUpdates) Grow ¶
func (d *BaseDocValuesFieldUpdates) Grow(size int) error
func (*BaseDocValuesFieldUpdates) Resize ¶
func (d *BaseDocValuesFieldUpdates) Resize(size int) error
func (*BaseDocValuesFieldUpdates) Size ¶
func (d *BaseDocValuesFieldUpdates) Size() int
func (*BaseDocValuesFieldUpdates) Swap ¶
func (d *BaseDocValuesFieldUpdates) Swap(i, j int) error
type BaseFieldsConsumer ¶
type BaseFieldsConsumer struct { }
func (*BaseFieldsConsumer) Merge ¶
func (f *BaseFieldsConsumer) Merge(ctx context.Context, mergeState *MergeState, norms index.NormsProducer) error
type BaseIndexReaderContext ¶
type BaseIndexReaderContext struct {
// contains filtered or unexported fields
}
func NewBaseIndexReaderContext ¶
func NewBaseIndexReaderContext(parent *CompositeReaderContext, ordInParent, docBaseInParent int) *BaseIndexReaderContext
func (*BaseIndexReaderContext) Identity ¶
func (r *BaseIndexReaderContext) Identity() string
type BaseLeafReader ¶
type BaseLeafReader struct { LeafReaderBaseInner // contains filtered or unexported fields }
func NewBaseLeafReader ¶
func NewBaseLeafReader(reader index.LeafReader) *BaseLeafReader
func (BaseLeafReader) DocumentWithFields ¶
func (*BaseLeafReader) GetContext ¶
func (r *BaseLeafReader) GetContext() (index.IndexReaderContext, error)
func (*BaseLeafReader) GetDocCount ¶
func (r *BaseLeafReader) GetDocCount(field string) (int, error)
func (BaseLeafReader) GetRefCount ¶
func (r BaseLeafReader) GetRefCount() int
func (*BaseLeafReader) GetSumDocFreq ¶
func (r *BaseLeafReader) GetSumDocFreq(field string) (int64, error)
func (*BaseLeafReader) GetSumTotalTermFreq ¶
func (r *BaseLeafReader) GetSumTotalTermFreq(field string) (int64, error)
func (BaseLeafReader) GetTermVector ¶
func (BaseLeafReader) HasDeletions ¶
func (r BaseLeafReader) HasDeletions() bool
func (BaseLeafReader) Leaves ¶
func (r BaseLeafReader) Leaves() ([]index.LeafReaderContext, error)
func (BaseLeafReader) NotifyReaderClosedListeners ¶
func (r BaseLeafReader) NotifyReaderClosedListeners() error
NotifyReaderClosedListeners overridden by StandardDirectoryReader and SegmentReader
func (BaseLeafReader) NumDeletedDocs ¶
func (r BaseLeafReader) NumDeletedDocs() int
func (*BaseLeafReader) Postings ¶
func (r *BaseLeafReader) Postings(ctx context.Context, term index.Term, flags int) (index.PostingsEnum, error)
func (BaseLeafReader) RegisterParentReader ¶
func (r BaseLeafReader) RegisterParentReader(reader index.IndexReader)
RegisterParentReader Expert: This method is called by IndexReaders which wrap other readers (e.g. CompositeReader or FilterLeafReader) to register the parent at the child (this reader) on construction of the parent. When this reader is closed, it will mark all registered parents as closed, too. The references to parent readers are weak only, so they can be GCed once they are no longer in use.
func (*BaseLeafReader) TotalTermFreq ¶
type BaseMultiLevelSkipListReader ¶
type BaseMultiLevelSkipListReader struct {
// contains filtered or unexported fields
}
func (*BaseMultiLevelSkipListReader) ReadChildPointer ¶
func (m *BaseMultiLevelSkipListReader) ReadChildPointer(skipStream store.IndexInput) (int64, error)
ReadChildPointer read the child pointer written via MultiLevelSkipListWriter.writeChildPointer(long, DataOutput). Params: skipStream – the IndexInput the child pointer shall be read from Returns: child pointer
func (*BaseMultiLevelSkipListReader) ReadLevelLength ¶
func (m *BaseMultiLevelSkipListReader) ReadLevelLength(skipStream store.IndexInput) (int64, error)
ReadLevelLength read the length of the current level written via MultiLevelSkipListWriter.writeLevelLength(long, IndexOutput). Params: skipStream – the IndexInput the length shall be read from Returns: level length
func (*BaseMultiLevelSkipListReader) ReadSkipData ¶
func (m *BaseMultiLevelSkipListReader) ReadSkipData(level int, skipStream store.IndexInput) (int64, error)
ReadSkipData Subclasses must implement the actual skip data encoding in this method. Params: level – the level skip data shall be read from skipStream – the skip stream to read from
type BaseMultiLevelSkipListWriter ¶
type BaseMultiLevelSkipListWriter struct {
// contains filtered or unexported fields
}
func NewBaseMultiLevelSkipListWriter ¶
func NewBaseMultiLevelSkipListWriter(cfg *BaseMultiLevelSkipListWriterConfig) *BaseMultiLevelSkipListWriter
func (*BaseMultiLevelSkipListWriter) NumberOfSkipLevels ¶
func (m *BaseMultiLevelSkipListWriter) NumberOfSkipLevels() int
func (*BaseMultiLevelSkipListWriter) SetNumberOfSkipLevels ¶
func (m *BaseMultiLevelSkipListWriter) SetNumberOfSkipLevels(numberOfSkipLevels int)
type BaseMultiLevelSkipListWriterConfig ¶
type BaseMultiLevelSkipListWriterConfig struct { SkipInterval int SkipMultiplier int MaxSkipLevels int DF int WriteSkipData func(level int, skipBuffer store.IndexOutput) error WriteLevelLength func(levelLength int64, output store.IndexOutput) error WriteChildPointer func(childPointer int64, skipBuffer store.DataOutput) error }
type BaseParallelPostingsArray ¶
type BaseParallelPostingsArray struct {
// contains filtered or unexported fields
}
func NewBaseParallelPostingsArray ¶
func NewBaseParallelPostingsArray() *BaseParallelPostingsArray
func (*BaseParallelPostingsArray) AddressOffset ¶
func (p *BaseParallelPostingsArray) AddressOffset() []int
func (*BaseParallelPostingsArray) ByteStarts ¶
func (p *BaseParallelPostingsArray) ByteStarts() []int
func (*BaseParallelPostingsArray) GetAddressOffset ¶
func (p *BaseParallelPostingsArray) GetAddressOffset(index int) int
func (*BaseParallelPostingsArray) GetByteStarts ¶
func (p *BaseParallelPostingsArray) GetByteStarts(index int) int
func (*BaseParallelPostingsArray) GetTextStarts ¶
func (p *BaseParallelPostingsArray) GetTextStarts(index int) int
func (*BaseParallelPostingsArray) Grow ¶
func (p *BaseParallelPostingsArray) Grow()
func (*BaseParallelPostingsArray) SetAddressOffset ¶
func (p *BaseParallelPostingsArray) SetAddressOffset(termID, v int)
func (*BaseParallelPostingsArray) SetByteStarts ¶
func (p *BaseParallelPostingsArray) SetByteStarts(termID, v int)
func (*BaseParallelPostingsArray) SetTextStarts ¶
func (p *BaseParallelPostingsArray) SetTextStarts(termID, v int)
func (*BaseParallelPostingsArray) TextStarts ¶
func (p *BaseParallelPostingsArray) TextStarts() []uint32
type BasePointsWriter ¶
type BasePointsWriter struct { WriteField func(ctx context.Context, fieldInfo *document.FieldInfo, values index.PointsReader) error Finish func() error }
func (*BasePointsWriter) Merge ¶
func (p *BasePointsWriter) Merge(mergeState *MergeState) error
Merge Default merge implementation to merge incoming points readers by visiting all their points and adding to this writer
func (*BasePointsWriter) MergeOneField ¶
func (p *BasePointsWriter) MergeOneField(ctx context.Context, mergeState *MergeState, fieldInfo *document.FieldInfo) error
MergeOneField Default naive merge implementation for one field: it just re-indexes all the values from the incoming segment. The default codec overrides this for 1D fields and uses a faster but more complex implementation.
type BaseSimScorer ¶
type BaseSimScorer struct {
SimScorerSPI
}
func NewBaseSimScorer ¶
func NewBaseSimScorer(simScorerSPI SimScorerSPI) *BaseSimScorer
func (*BaseSimScorer) Explain ¶
func (s *BaseSimScorer) Explain(freq types.Explanation, norm int64) (types.Explanation, error)
type BaseSortField ¶
type BaseSortField struct {
// contains filtered or unexported fields
}
BaseSortField Stores information about how to sort documents by terms in an individual field. Fields must be indexed in order to sort by them. Created: Feb 11, 2004 1:25:29 PM Since: lucene 1.4 See Also: Sort
func NewSortField ¶
func NewSortField(field string, _type index.SortFieldType) *BaseSortField
func NewSortFieldV1 ¶
func NewSortFieldV1(field string, _type index.SortFieldType, reverse bool) *BaseSortField
func (*BaseSortField) GetBytesComparator ¶
func (s *BaseSortField) GetBytesComparator() index.BytesComparator
func (*BaseSortField) GetCanUsePoints ¶
func (s *BaseSortField) GetCanUsePoints() bool
func (*BaseSortField) GetComparator ¶
func (s *BaseSortField) GetComparator(numHits, sortPos int) index.FieldComparator
func (*BaseSortField) GetComparatorSource ¶
func (s *BaseSortField) GetComparatorSource() index.FieldComparatorSource
func (*BaseSortField) GetField ¶
func (s *BaseSortField) GetField() string
GetField Returns the name of the field. Could return null if the sort is by SCORE or DOC. Returns: Name of field, possibly null.
func (*BaseSortField) GetIndexSorter ¶
func (s *BaseSortField) GetIndexSorter() index.IndexSorter
GetIndexSorter Returns an IndexSorter used for sorting index segments by this SortField. If the SortField cannot be used for index sorting (for example, if it uses scores or other query-dependent values) then this method should return null SortFields that implement this method should also implement a companion SortFieldProvider to serialize and deserialize the sort in index segment headers lucene.experimental
func (*BaseSortField) GetMissingValue ¶
func (s *BaseSortField) GetMissingValue() any
GetMissingValue Return the item to use for documents that don't have a item. A item of null indicates that default should be used.
func (*BaseSortField) GetReverse ¶
func (s *BaseSortField) GetReverse() bool
func (*BaseSortField) GetType ¶
func (s *BaseSortField) GetType() index.SortFieldType
GetType Returns the type of contents in the field. Returns: One of the constants SCORE, DOC, STRING, INT or FLOAT.
func (*BaseSortField) NeedsScores ¶
func (s *BaseSortField) NeedsScores() bool
NeedsScores Whether the relevance score is needed to sort documents.
func (*BaseSortField) Serialize ¶
func (s *BaseSortField) Serialize(ctx context.Context, out store.DataOutput) error
func (*BaseSortField) SetBytesComparator ¶
func (s *BaseSortField) SetBytesComparator(fn index.BytesComparator)
func (*BaseSortField) SetCanUsePoints ¶
func (s *BaseSortField) SetCanUsePoints()
SetCanUsePoints For numeric sort fields, setting this field, indicates that the same numeric data has been indexed with two fields: doc values and points and that these fields have the same name. This allows to use sort optimization and skip non-competitive documents.
func (*BaseSortField) SetMissingValue ¶
func (s *BaseSortField) SetMissingValue(missingValue any) error
SetMissingValue Set the item to use for documents that don't have a item.
func (*BaseSortField) String ¶
func (s *BaseSortField) String() string
type BaseSortedDocValues ¶
type BaseSortedDocValues struct { BaseBinaryDocValues FnOrdValue func() (int, error) FnLookupOrd func(ord int) ([]byte, error) FnGetValueCount func() int }
func NewBaseSortedDocValues ¶
func NewBaseSortedDocValues(cfg *SortedDocValuesDefaultConfig) *BaseSortedDocValues
func (*BaseSortedDocValues) BinaryValue ¶
func (r *BaseSortedDocValues) BinaryValue() ([]byte, error)
func (*BaseSortedDocValues) Intersect ¶
func (r *BaseSortedDocValues) Intersect(automaton *automaton.CompiledAutomaton) (index.TermsEnum, error)
func (*BaseSortedDocValues) LookupTerm ¶
func (r *BaseSortedDocValues) LookupTerm(key []byte) (int, error)
type BaseTerms ¶
type BaseTermsEnum ¶
type BaseTermsEnum struct {
// contains filtered or unexported fields
}
BaseTermsEnum A base TermsEnum that adds default implementations for * attributes() * termState() * seekExact(BytesRef) * seekExact(BytesRef, TermState) In some cases, the default implementation may be slow and consume huge memory, so subclass SHOULD have its own implementation if possible.
func NewBaseTermsEnum ¶
func NewBaseTermsEnum(cfg *BaseTermsEnumConfig) *BaseTermsEnum
func (*BaseTermsEnum) Attributes ¶
func (b *BaseTermsEnum) Attributes() *attribute.Source
func (*BaseTermsEnum) SeekExactExpert ¶
type BaseTermsEnumConfig ¶
type BaseTermsHash ¶
type BaseTermsHash struct {
// contains filtered or unexported fields
}
func NewBaseTermsHash ¶
func NewBaseTermsHash(intBlockAllocator ints.IntsAllocator, byteBlockAllocator bytesref.Allocator, nextTermsHash TermsHash) *BaseTermsHash
func (*BaseTermsHash) Abort ¶
func (h *BaseTermsHash) Abort() error
func (*BaseTermsHash) FinishDocument ¶
func (h *BaseTermsHash) FinishDocument(ctx context.Context, docID int) error
func (*BaseTermsHash) Flush ¶
func (h *BaseTermsHash) Flush(ctx context.Context, fieldsToFlush map[string]TermsHashPerField, state *index.SegmentWriteState, sortMap index.DocMap, norms index.NormsProducer) error
func (*BaseTermsHash) GetBytePool ¶
func (h *BaseTermsHash) GetBytePool() *bytesref.BlockPool
func (*BaseTermsHash) GetIntPool ¶
func (h *BaseTermsHash) GetIntPool() *ints.BlockPool
func (*BaseTermsHash) GetTermBytePool ¶
func (h *BaseTermsHash) GetTermBytePool() *bytesref.BlockPool
func (*BaseTermsHash) Reset ¶
func (h *BaseTermsHash) Reset() error
func (*BaseTermsHash) StartDocument ¶
func (h *BaseTermsHash) StartDocument() error
type BinaryDocValuesFieldUpdates ¶
type BinaryDocValuesFieldUpdates struct { *BaseDocValuesFieldUpdates // contains filtered or unexported fields }
BinaryDocValuesFieldUpdates A DocValuesFieldUpdates which holds updates of documents, of a single BinaryDocValuesField. lucene.experimental
func (*BinaryDocValuesFieldUpdates) AddBytes ¶
func (b *BinaryDocValuesFieldUpdates) AddBytes(doc int, value []byte) error
func (*BinaryDocValuesFieldUpdates) AddInt64 ¶
func (b *BinaryDocValuesFieldUpdates) AddInt64(doc int, value int64) error
func (*BinaryDocValuesFieldUpdates) AddIterator ¶
func (b *BinaryDocValuesFieldUpdates) AddIterator(doc int, it DocValuesFieldUpdatesIterator) error
func (*BinaryDocValuesFieldUpdates) EnsureFinished ¶
func (b *BinaryDocValuesFieldUpdates) EnsureFinished() error
func (*BinaryDocValuesFieldUpdates) Finish ¶
func (b *BinaryDocValuesFieldUpdates) Finish() error
func (*BinaryDocValuesFieldUpdates) Grow ¶
func (b *BinaryDocValuesFieldUpdates) Grow(size int) error
func (*BinaryDocValuesFieldUpdates) Iterator ¶
func (b *BinaryDocValuesFieldUpdates) Iterator() (DocValuesFieldUpdatesIterator, error)
func (*BinaryDocValuesFieldUpdates) Reset ¶
func (b *BinaryDocValuesFieldUpdates) Reset(doc int) error
func (*BinaryDocValuesFieldUpdates) Resize ¶
func (b *BinaryDocValuesFieldUpdates) Resize(size int) error
func (*BinaryDocValuesFieldUpdates) Swap ¶
func (b *BinaryDocValuesFieldUpdates) Swap(i, j int) error
type BinaryDocValuesWriter ¶
type BinaryDocValuesWriter struct {
// contains filtered or unexported fields
}
func NewBinaryDocValuesWriter ¶
func NewBinaryDocValuesWriter(fieldInfo *document.FieldInfo) *BinaryDocValuesWriter
func (*BinaryDocValuesWriter) AddValue ¶
func (b *BinaryDocValuesWriter) AddValue(docID int, value []byte) error
func (*BinaryDocValuesWriter) Flush ¶
func (b *BinaryDocValuesWriter) Flush(state *index.SegmentWriteState, sortMap index.DocMap, consumer index.DocValuesConsumer) error
func (*BinaryDocValuesWriter) GetDocValues ¶
func (b *BinaryDocValuesWriter) GetDocValues() types.DocIdSetIterator
type BitSetIterator ¶
type BitSetIterator struct {
// contains filtered or unexported fields
}
func NewBitSetIterator ¶
func NewBitSetIterator(bits *bitset.BitSet, cost int64) *BitSetIterator
func (*BitSetIterator) Cost ¶
func (b *BitSetIterator) Cost() int64
func (*BitSetIterator) DocID ¶
func (b *BitSetIterator) DocID() int
func (*BitSetIterator) GetBitSet ¶
func (b *BitSetIterator) GetBitSet() *bitset.BitSet
func (*BitSetIterator) SlowAdvance ¶
type BufferedBinaryDocValues ¶
type BufferedBinaryDocValues struct {
// contains filtered or unexported fields
}
func NewBufferedBinaryDocValues ¶
func NewBufferedBinaryDocValues(values [][]byte, docsWithField types.DocIdSetIterator) *BufferedBinaryDocValues
func (*BufferedBinaryDocValues) AdvanceExact ¶
func (b *BufferedBinaryDocValues) AdvanceExact(target int) (bool, error)
func (*BufferedBinaryDocValues) BinaryValue ¶
func (b *BufferedBinaryDocValues) BinaryValue() ([]byte, error)
func (*BufferedBinaryDocValues) Cost ¶
func (b *BufferedBinaryDocValues) Cost() int64
func (*BufferedBinaryDocValues) DocID ¶
func (b *BufferedBinaryDocValues) DocID() int
func (*BufferedBinaryDocValues) NextDoc ¶
func (b *BufferedBinaryDocValues) NextDoc(ctx context.Context) (int, error)
func (*BufferedBinaryDocValues) SlowAdvance ¶
type BufferedNorms ¶
type BufferedNorms struct {
// contains filtered or unexported fields
}
func (*BufferedNorms) AdvanceExact ¶
func (b *BufferedNorms) AdvanceExact(target int) (bool, error)
func (*BufferedNorms) Cost ¶
func (b *BufferedNorms) Cost() int64
func (*BufferedNorms) DocID ¶
func (b *BufferedNorms) DocID() int
func (*BufferedNorms) LongValue ¶
func (b *BufferedNorms) LongValue() (int64, error)
func (*BufferedNorms) SlowAdvance ¶
type BufferedNumericDocValues ¶
type BufferedNumericDocValues struct {
// contains filtered or unexported fields
}
func NewBufferedNumericDocValues ¶
func NewBufferedNumericDocValues(values *packed.PackedLongValues, docsWithFields types.DocIdSetIterator) *BufferedNumericDocValues
func (*BufferedNumericDocValues) AdvanceExact ¶
func (b *BufferedNumericDocValues) AdvanceExact(target int) (bool, error)
func (*BufferedNumericDocValues) Cost ¶
func (b *BufferedNumericDocValues) Cost() int64
func (*BufferedNumericDocValues) DocID ¶
func (b *BufferedNumericDocValues) DocID() int
func (*BufferedNumericDocValues) LongValue ¶
func (b *BufferedNumericDocValues) LongValue() (int64, error)
func (*BufferedNumericDocValues) NextDoc ¶
func (b *BufferedNumericDocValues) NextDoc(ctx context.Context) (int, error)
func (*BufferedNumericDocValues) SlowAdvance ¶
type BufferedUpdatesStream ¶
BufferedUpdatesStream Tracks the stream of FrozenBufferedUpdates. When DocumentsWriterPerThread flushes, its buffered deletes and updates are appended to this stream and immediately resolved (to actual docIDs, per segment) using the indexing thread that triggered the flush for concurrency. When a merge kicks off, we sync to ensure all resolving packets complete. We also apply to all segments when NRT reader is pulled, commit/close is called, or when too many deletes or updates are buffered and must be flushed (by RAM usage or by count). Each packet is assigned a generation, and each flushed or merged segment is also assigned a generation, so we can track which BufferedDeletes packets to apply to any given segment.
func NewBufferedUpdatesStream ¶
func NewBufferedUpdatesStream() *BufferedUpdatesStream
func (*BufferedUpdatesStream) FinishedSegment ¶
func (b *BufferedUpdatesStream) FinishedSegment(delGen int64)
func (*BufferedUpdatesStream) GetCompletedDelGen ¶
func (b *BufferedUpdatesStream) GetCompletedDelGen() int64
GetCompletedDelGen All frozen packets up to and including this del gen are guaranteed to be finished.
func (*BufferedUpdatesStream) GetNextGen ¶
func (b *BufferedUpdatesStream) GetNextGen() int64
type ByteSliceReader ¶
type ByteSliceReader struct { *store.BaseDataInput // contains filtered or unexported fields }
ByteSliceReader IndexInput that knows how to read the byte slices written by Posting and PostingVector. We read the bytes in each slice until we hit the end of that slice at which point we read the forwarding address of the next slice and then jump to it.
func NewByteSliceReader ¶
func NewByteSliceReader() *ByteSliceReader
func (*ByteSliceReader) Clone ¶
func (b *ByteSliceReader) Clone() store.CloneReader
func (*ByteSliceReader) EOF ¶
func (b *ByteSliceReader) EOF() bool
type ClosedListener ¶
type ClosedListener interface { }
type CommitPoint ¶
type CommitPoint struct {
// contains filtered or unexported fields
}
CommitPoint Holds details for each commit point. This class is also passed to the deletion policy. Note: this class has a natural ordering that is inconsistent with equals.
func NewCommitPoint ¶
func NewCommitPoint(commitsToDelete *[]*CommitPoint, directoryOrig store.Directory, segmentInfos *SegmentInfos) (*CommitPoint, error)
func (*CommitPoint) CompareTo ¶
func (c *CommitPoint) CompareTo(commit IndexCommit) int
func (*CommitPoint) Delete ¶
func (c *CommitPoint) Delete() error
Delete Called only be the deletion policy, to remove this commit point from the index.
func (*CommitPoint) GetDirectory ¶
func (c *CommitPoint) GetDirectory() store.Directory
func (*CommitPoint) GetFileNames ¶
func (c *CommitPoint) GetFileNames() (map[string]struct{}, error)
func (*CommitPoint) GetGeneration ¶
func (c *CommitPoint) GetGeneration() int64
func (*CommitPoint) GetReader ¶
func (c *CommitPoint) GetReader() *StandardDirectoryReader
func (*CommitPoint) GetSegmentCount ¶
func (c *CommitPoint) GetSegmentCount() int
func (*CommitPoint) GetSegmentsFileName ¶
func (c *CommitPoint) GetSegmentsFileName() string
func (*CommitPoint) GetUserData ¶
func (c *CommitPoint) GetUserData() (map[string]string, error)
func (*CommitPoint) IsDeleted ¶
func (c *CommitPoint) IsDeleted() bool
type CompareIndexReader ¶
type CompareIndexReader func(a, b index.IndexReader) int
type CompareLeafReader ¶
type CompareLeafReader func(a, b index.LeafReader) int
type CompetitiveImpactAccumulator ¶
type CompetitiveImpactAccumulator struct {
// contains filtered or unexported fields
}
CompetitiveImpactAccumulator accumulates the (freq, norm) pairs that may produce competitive scores.
func NewCompetitiveImpactAccumulator ¶
func NewCompetitiveImpactAccumulator() *CompetitiveImpactAccumulator
func (*CompetitiveImpactAccumulator) Add ¶
func (c *CompetitiveImpactAccumulator) Add(freq int, norm int64)
Add Accumulate a (freq,norm) pair, updating this structure if there is no equivalent or more competitive entry already.
func (*CompetitiveImpactAccumulator) AddAll ¶
func (c *CompetitiveImpactAccumulator) AddAll(acc *CompetitiveImpactAccumulator)
AddAll Merge acc into this.
func (*CompetitiveImpactAccumulator) Clear ¶
func (c *CompetitiveImpactAccumulator) Clear()
func (*CompetitiveImpactAccumulator) GetCompetitiveFreqNormPairs ¶
func (c *CompetitiveImpactAccumulator) GetCompetitiveFreqNormPairs() []index.Impact
GetCompetitiveFreqNormPairs Get the set of competitive freq and norm pairs, ordered by increasing freq and norm.
type CompositeReaderBuilder ¶
type CompositeReaderBuilder struct {
// contains filtered or unexported fields
}
func NewCompositeReaderBuilder ¶
func NewCompositeReaderBuilder(reader index.CompositeReader) *CompositeReaderBuilder
func (*CompositeReaderBuilder) Build ¶
func (c *CompositeReaderBuilder) Build() (*CompositeReaderContext, error)
type CompositeReaderContext ¶
type CompositeReaderContext struct { *BaseIndexReaderContext // contains filtered or unexported fields }
CompositeReaderContext IndexReaderContext for CompositeReader instance.
func NewCompositeReaderContext ¶
func NewCompositeReaderContext(fn CompositeReaderContextOption) (*CompositeReaderContext, error)
func (*CompositeReaderContext) Children ¶
func (c *CompositeReaderContext) Children() []index.IndexReaderContext
func (*CompositeReaderContext) Leaves ¶
func (c *CompositeReaderContext) Leaves() ([]index.LeafReaderContext, error)
func (*CompositeReaderContext) Reader ¶
func (c *CompositeReaderContext) Reader() index.IndexReader
type CompositeReaderContextOption ¶
type CompositeReaderContextOption func(*compositeReaderContextOption)
func WithCompositeReaderContextV1 ¶
func WithCompositeReaderContextV1(reader index.CompositeReader) CompositeReaderContextOption
func WithCompositeReaderContextV2 ¶
func WithCompositeReaderContextV2(parent *CompositeReaderContext, reader index.CompositeReader, ordInParent, docbaseInParent int, children *arraylist.List[index.IndexReaderContext]) CompositeReaderContextOption
func WithCompositeReaderContextV3 ¶
func WithCompositeReaderContextV3(reader index.CompositeReader, children, leaves *arraylist.List[index.IndexReaderContext]) CompositeReaderContextOption
type DVFUIterator ¶
type DVFUIterator struct { }
func (*DVFUIterator) AdvanceExact ¶
func (*DVFUIterator) AdvanceExact(target int) (bool, error)
func (*DVFUIterator) Cost ¶
func (*DVFUIterator) Cost() int64
type DataFields ¶
type DataFields struct {
// contains filtered or unexported fields
}
func NewDataFields ¶
func NewDataFields(fields []*FieldData) *DataFields
func (*DataFields) Names ¶
func (d *DataFields) Names() []string
func (*DataFields) Size ¶
func (d *DataFields) Size() int
type DataPostingsEnum ¶
type DataPostingsEnum struct {
// contains filtered or unexported fields
}
func (*DataPostingsEnum) Cost ¶
func (d *DataPostingsEnum) Cost() int64
func (*DataPostingsEnum) DocID ¶
func (d *DataPostingsEnum) DocID() int
func (*DataPostingsEnum) EndOffset ¶
func (d *DataPostingsEnum) EndOffset() (int, error)
func (*DataPostingsEnum) Freq ¶
func (d *DataPostingsEnum) Freq() (int, error)
func (*DataPostingsEnum) GetPayload ¶
func (d *DataPostingsEnum) GetPayload() ([]byte, error)
func (*DataPostingsEnum) NextDoc ¶
func (d *DataPostingsEnum) NextDoc(ctx context.Context) (int, error)
func (*DataPostingsEnum) NextPosition ¶
func (d *DataPostingsEnum) NextPosition() (int, error)
func (*DataPostingsEnum) SlowAdvance ¶
func (*DataPostingsEnum) StartOffset ¶
func (d *DataPostingsEnum) StartOffset() (int, error)
type DataTerms ¶
type DataTerms struct { *BaseTerms // contains filtered or unexported fields }
func NewDataTerms ¶
func (*DataTerms) GetDocCount ¶
func (*DataTerms) GetSumDocFreq ¶
func (*DataTerms) GetSumTotalTermFreq ¶
func (*DataTerms) HasOffsets ¶
func (*DataTerms) HasPayloads ¶
func (*DataTerms) HasPositions ¶
type DataTermsEnum ¶
type DataTermsEnum struct { *BaseTermsEnum // contains filtered or unexported fields }
func NewDataTermsEnum ¶
func NewDataTermsEnum(fieldData *FieldData) *DataTermsEnum
func (*DataTermsEnum) DocFreq ¶
func (d *DataTermsEnum) DocFreq() (int, error)
func (*DataTermsEnum) Impacts ¶
func (d *DataTermsEnum) Impacts(flags int) (index.ImpactsEnum, error)
func (*DataTermsEnum) Ord ¶
func (d *DataTermsEnum) Ord() (int64, error)
func (*DataTermsEnum) Postings ¶
func (d *DataTermsEnum) Postings(reuse index.PostingsEnum, flags int) (index.PostingsEnum, error)
func (*DataTermsEnum) SeekCeil ¶
func (d *DataTermsEnum) SeekCeil(ctx context.Context, text []byte) (index.SeekStatus, error)
func (*DataTermsEnum) SeekExactByOrd ¶
func (d *DataTermsEnum) SeekExactByOrd(ctx context.Context, ord int64) error
func (*DataTermsEnum) Term ¶
func (d *DataTermsEnum) Term() ([]byte, error)
func (*DataTermsEnum) TotalTermFreq ¶
func (d *DataTermsEnum) TotalTermFreq() (int64, error)
type DefaultIndexingChain ¶
type DefaultIndexingChain struct {
// contains filtered or unexported fields
}
DefaultIndexingChain Default general purpose indexing chain, which handles indexing all types of fields. 默认的通用索引链,用于处理所有类型的字段的索引。
func NewDefaultIndexingChain ¶
func NewDefaultIndexingChain(indexCreatedVersionMajor int, segmentInfo *SegmentInfo, dir store.Directory, fieldInfos *FieldInfosBuilder, indexWriterConfig *liveIndexWriterConfig) *DefaultIndexingChain
func (*DefaultIndexingChain) Abort ¶
func (d *DefaultIndexingChain) Abort() error
func (*DefaultIndexingChain) Flush ¶
func (d *DefaultIndexingChain) Flush(ctx context.Context, state *index.SegmentWriteState) (index.DocMap, error)
func (*DefaultIndexingChain) GetHasDocValues ¶
func (d *DefaultIndexingChain) GetHasDocValues(field string) types.DocIdSetIterator
func (*DefaultIndexingChain) NewPerField ¶
func (*DefaultIndexingChain) ProcessDocument ¶
type DeleteSlice ¶
type DeleteSlice struct {
// contains filtered or unexported fields
}
func NewDeleteSlice ¶
func NewDeleteSlice(currentTail *Node) *DeleteSlice
func (*DeleteSlice) Apply ¶
func (d *DeleteSlice) Apply(del *index.BufferedUpdates, docIDUpto int) error
func (*DeleteSlice) Reset ¶
func (d *DeleteSlice) Reset()
type DirectoryReaderBuilder ¶
type DirectoryReaderBuilder struct { }
type DocValueSorter ¶
type DocValueSorter struct {
// contains filtered or unexported fields
}
func NewDocValueSorter ¶
func NewDocValueSorter(docs []int, comparator index.DocComparator) *DocValueSorter
func (*DocValueSorter) Len ¶
func (d *DocValueSorter) Len() int
func (*DocValueSorter) Less ¶
func (d *DocValueSorter) Less(i, j int) bool
func (*DocValueSorter) Swap ¶
func (d *DocValueSorter) Swap(i, j int)
type DocValuesFieldUpdates ¶
type DocValuesFieldUpdates interface { Field() string AddInt64(doc int, value int64) error AddBytes(doc int, value []byte) error // AddIterator // Adds the item for the given docID. This method prevents conditional calls to // DocValuesFieldUpdates.Iterator.longValue() or DocValuesFieldUpdates.Iterator.binaryValue() // since the implementation knows if it's a long item iterator or binary item AddIterator(doc int, it DocValuesFieldUpdatesIterator) error // Iterator // Returns an DocValuesFieldUpdates.Iterator over the updated documents and their values. Iterator() (DocValuesFieldUpdatesIterator, error) // Finish // Freezes internal data structures and sorts updates by docID for efficient iteration. Finish() error // Any // Returns true if this instance contains any updates. Any() bool Size() int // Reset // Adds an update that resets the documents item. // Params: doc – the doc to update Reset(doc int) error Swap(i, j int) error Grow(i int) error Resize(i int) error EnsureFinished() error GetFinished() bool }
DocValuesFieldUpdates holds updates of a single docvalues field, for a set of documents within one segment.
type DocValuesFieldUpdatesIterator ¶
type DocValuesFieldUpdatesIterator interface { types.DocValuesIterator // LongValue // Returns a long item for the current document if this iterator is a long iterator. LongValue() (int64, error) // BinaryValue // Returns a binary item for the current document if this iterator is a binary item iterator. BinaryValue() ([]byte, error) // DelGen // Returns delGen for this packet. DelGen() int64 // HasValue // Returns true if this doc has a item HasValue() bool }
DocValuesFieldUpdatesIterator An iterator over documents and their updated values. Only documents with updates are returned by this iterator, and the documents are returned in increasing order.
type DocValuesLeafReader ¶
type DocValuesLeafReader struct {
*BaseLeafReader
}
func NewDocValuesLeafReader ¶
func NewDocValuesLeafReader() *DocValuesLeafReader
func (*DocValuesLeafReader) CheckIntegrity ¶
func (d *DocValuesLeafReader) CheckIntegrity() error
func (*DocValuesLeafReader) DoClose ¶
func (d *DocValuesLeafReader) DoClose() error
func (DocValuesLeafReader) DocumentWithFields ¶
func (*DocValuesLeafReader) DocumentWithVisitor ¶
func (d *DocValuesLeafReader) DocumentWithVisitor(ctx context.Context, docID int, visitor document.StoredFieldVisitor) error
func (*DocValuesLeafReader) GetBinaryDocValues ¶
func (d *DocValuesLeafReader) GetBinaryDocValues(field string) (index.BinaryDocValues, error)
func (*DocValuesLeafReader) GetFieldInfos ¶
func (d *DocValuesLeafReader) GetFieldInfos() index.FieldInfos
func (*DocValuesLeafReader) GetLiveDocs ¶
func (d *DocValuesLeafReader) GetLiveDocs() util.Bits
func (*DocValuesLeafReader) GetMetaData ¶
func (d *DocValuesLeafReader) GetMetaData() index.LeafMetaData
func (*DocValuesLeafReader) GetNormValues ¶
func (d *DocValuesLeafReader) GetNormValues(field string) (index.NumericDocValues, error)
func (*DocValuesLeafReader) GetNumericDocValues ¶
func (d *DocValuesLeafReader) GetNumericDocValues(field string) (index.NumericDocValues, error)
func (*DocValuesLeafReader) GetPointValues ¶
func (d *DocValuesLeafReader) GetPointValues(field string) (types.PointValues, bool)
func (*DocValuesLeafReader) GetReaderCacheHelper ¶
func (d *DocValuesLeafReader) GetReaderCacheHelper() index.CacheHelper
func (DocValuesLeafReader) GetRefCount ¶
func (r DocValuesLeafReader) GetRefCount() int
func (*DocValuesLeafReader) GetSortedDocValues ¶
func (d *DocValuesLeafReader) GetSortedDocValues(field string) (index.SortedDocValues, error)
func (*DocValuesLeafReader) GetSortedNumericDocValues ¶
func (d *DocValuesLeafReader) GetSortedNumericDocValues(field string) (index.SortedNumericDocValues, error)
func (*DocValuesLeafReader) GetSortedSetDocValues ¶
func (d *DocValuesLeafReader) GetSortedSetDocValues(field string) (index.SortedSetDocValues, error)
func (DocValuesLeafReader) GetTermVector ¶
func (*DocValuesLeafReader) GetTermVectors ¶
func (d *DocValuesLeafReader) GetTermVectors(docID int) (index.Fields, error)
func (DocValuesLeafReader) HasDeletions ¶
func (r DocValuesLeafReader) HasDeletions() bool
func (DocValuesLeafReader) Leaves ¶
func (r DocValuesLeafReader) Leaves() ([]index.LeafReaderContext, error)
func (*DocValuesLeafReader) MaxDoc ¶
func (d *DocValuesLeafReader) MaxDoc() int
func (DocValuesLeafReader) NotifyReaderClosedListeners ¶
func (r DocValuesLeafReader) NotifyReaderClosedListeners() error
NotifyReaderClosedListeners overridden by StandardDirectoryReader and SegmentReader
func (DocValuesLeafReader) NumDeletedDocs ¶
func (r DocValuesLeafReader) NumDeletedDocs() int
func (*DocValuesLeafReader) NumDocs ¶
func (d *DocValuesLeafReader) NumDocs() int
func (DocValuesLeafReader) RegisterParentReader ¶
func (r DocValuesLeafReader) RegisterParentReader(reader index.IndexReader)
RegisterParentReader Expert: This method is called by IndexReaders which wrap other readers (e.g. CompositeReader or FilterLeafReader) to register the parent at the child (this reader) on construction of the parent. When this reader is closed, it will mark all registered parents as closed, too. The references to parent readers are weak only, so they can be GCed once they are no longer in use.
type DocValuesUpdatesNode ¶
type DocValuesUpdatesNode struct {
// contains filtered or unexported fields
}
func NewDocValuesUpdatesNode ¶
func NewDocValuesUpdatesNode(updates []index.DocValuesUpdate) *DocValuesUpdatesNode
func (*DocValuesUpdatesNode) Apply ¶
func (d *DocValuesUpdatesNode) Apply(bufferedDeletes *index.BufferedUpdates, docIDUpto int) error
func (*DocValuesUpdatesNode) IsDelete ¶
func (d *DocValuesUpdatesNode) IsDelete() bool
type DocValuesWriter ¶
type DocValuesWriter interface { Flush(state *index.SegmentWriteState, sortMap index.DocMap, consumer index.DocValuesConsumer) error GetDocValues() types.DocIdSetIterator }
type DocsWithFieldSet ¶
type DocsWithFieldSet struct {
// contains filtered or unexported fields
}
DocsWithFieldSet Accumulator for documents that have a value for a field. This is optimized for the case that all documents have a value.
func NewDocsWithFieldSet ¶
func NewDocsWithFieldSet() *DocsWithFieldSet
func (*DocsWithFieldSet) Add ¶
func (d *DocsWithFieldSet) Add(docID int) error
func (*DocsWithFieldSet) Iterator ¶
func (d *DocsWithFieldSet) Iterator() (types.DocIdSetIterator, error)
type DocumentsWriter ¶
type DocumentsWriter struct {
// contains filtered or unexported fields
}
DocumentsWriter This class accepts multiple added documents and directly writes segment files. Each added document is passed to the indexing chain, which in turn processes the document into the different codec formats. Some formats write bytes to files immediately, e.g. stored fields and term vectors, while others are buffered by the indexing chain and written only on Flush. Once we have used our allowed RAM buffer, or the number of added docs is large enough (in the case we are flushing by doc count instead of RAM usage), we create a real segment and Flush it to the Directory. Threads: Multiple threads are allowed into addDocument at once. There is an initial synchronized call to DocumentsWriterFlushControl.ObtainAndLock() which allocates a DWPT for this indexing thread. The same thread will not necessarily get the same DWPT over time. Then updateDocuments is called on that DWPT without synchronization (most of the "heavy lifting" is in this call). Once a DWPT fills up enough RAM or hold enough documents in memory the DWPT is checked out for Flush and all changes are written to the directory. Each DWPT corresponds to one segment being written. When Flush is called by IndexWriter we check out all DWPTs that are associated with the current DocumentsWriterDeleteQueue out of the DocumentsWriterPerThreadPool and write them to disk. The Flush process can piggy-back on incoming indexing threads or even block them from adding documents if flushing can't keep up with new documents being added. Unless the stall control kicks in to block indexing threads flushes are happening concurrently to actual index requests. Exceptions: Because this class directly updates in-memory posting lists, and flushes stored fields and term vectors directly to files in the directory, there are certain limited times when an exception can corrupt this state. For example, a disk full while flushing stored fields leaves this file in a corrupt state. Or, an OOM exception while appending to the in-memory posting lists can corrupt that posting list. We call such exceptions "aborting exceptions". In these cases we must call abort() to discard all docs added since the last Flush. All other exceptions ("non-aborting exceptions") can still partially update the index structures. These updates are consistent, but, they represent only a part of the document seen up until the exception was hit. When this happens, we immediately mark the document as deleted so that the document is always atomically ("all or none") added to the index.
func NewDocumentsWriter ¶
func NewDocumentsWriter(flushNotifications index.FlushNotifications, indexCreatedVersionMajor int, pendingNumDocs *atomic.Int64, enableTestPoints bool, segmentName string, config *liveIndexWriterConfig, directoryOrig, directory store.Directory, globalFieldNumberMap *FieldNumbers) *DocumentsWriter
func (*DocumentsWriter) Close ¶
func (d *DocumentsWriter) Close() error
type DocumentsWriterDeleteQueue ¶
type DocumentsWriterDeleteQueue struct {
// contains filtered or unexported fields
}
DocumentsWriterDeleteQueue is a non-blocking linked pending deletes queue. In contrast to other queue implementation we only maintain the tail of the queue. A delete queue is always used in a context of a set of DWPTs and a global delete pool. Each of the DWPT and the global pool need to maintain their 'own' head of the queue (as a DeleteSlice instance per DocumentsWriterPerThread).
The difference between the DWPT and the global pool is that the DWPT starts maintaining a head once it has added its first document since for its segments private deletes only the deletes after that document are relevant. The global pool instead starts maintaining the head once this instance is created by taking the sentinel instance as its initial head.
Since each DocumentsWriterDeleteQueue.DeleteSlice maintains its own head and the list is only single linked the garbage collector takes care of pruning the list for us. All nodes in the list that are still relevant should be either directly or indirectly referenced by one of the DWPT's private DocumentsWriterDeleteQueue.DeleteSlice or by the global BufferedUpdates slice. Each DWPT as well as the global delete pool maintain their private DeleteSlice instance. In the DWPT case updating a slice is equivalent to atomically finishing the document. The slice update guarantees a "happens before" relationship to all other updates in the same indexing session. When a DWPT updates a document it:
1. consumes a document and finishes its processing 2. updates its private DocumentsWriterDeleteQueue.DeleteSlice either by calling updateSlice(DocumentsWriterDeleteQueue.DeleteSlice) or add(DocumentsWriterDeleteQueue.Node, DocumentsWriterDeleteQueue.DeleteSlice) (if the document has a delTerm) 3. applies all deletes in the slice to its private BufferedUpdates and resets it 4. increments its internal document id
The DWPT also doesn't apply its current documents delete term until it has updated its delete slice which ensures the consistency of the update. If the update fails before the DeleteSlice could have been updated the deleteTerm will also not be added to its private deletes neither to the global deletes.
func NewDocumentsWriterDeleteQueue ¶
func NewDocumentsWriterDeleteQueue() *DocumentsWriterDeleteQueue
func (*DocumentsWriterDeleteQueue) Add ¶
func (d *DocumentsWriterDeleteQueue) Add(deleteNode *Node, slice *DeleteSlice) int64
func (*DocumentsWriterDeleteQueue) Close ¶
func (d *DocumentsWriterDeleteQueue) Close()
func (*DocumentsWriterDeleteQueue) UpdateSlice ¶
func (d *DocumentsWriterDeleteQueue) UpdateSlice(slice *DeleteSlice) int64
UpdateSlice Negative result means there were new deletes since we last applied
type DocumentsWriterFlushControl ¶
type DocumentsWriterFlushControl struct {
// contains filtered or unexported fields
}
DocumentsWriterFlushControl This class controls DocumentsWriterPerThread flushing during indexing. It tracks the memory consumption per DocumentsWriterPerThread and uses a configured FlushPolicy to decide if a DocumentsWriterPerThread must Flush.
In addition to the FlushPolicy the Flush control might set certain DocumentsWriterPerThread as Flush pending iff a DocumentsWriterPerThread exceeds the IndexWriterConfig.getRAMPerThreadHardLimitMB() to prevent address space exhaustion.
func (*DocumentsWriterFlushControl) DoAfterFlush ¶
func (d *DocumentsWriterFlushControl) DoAfterFlush(dwpt *DocumentsWriterPerThread) error
func (*DocumentsWriterFlushControl) MarkForFullFlush ¶
func (d *DocumentsWriterFlushControl) MarkForFullFlush() int64
func (*DocumentsWriterFlushControl) NextPendingFlush ¶
func (d *DocumentsWriterFlushControl) NextPendingFlush() *DocumentsWriterPerThread
func (*DocumentsWriterFlushControl) ObtainAndLock ¶
func (d *DocumentsWriterFlushControl) ObtainAndLock() *DocumentsWriterPerThread
type DocumentsWriterFlushQueue ¶
type DocumentsWriterFlushQueue struct {
// contains filtered or unexported fields
}
DocumentsWriterFlushQueue lucene.internal
func NewDocumentsWriterFlushQueue ¶
func NewDocumentsWriterFlushQueue() *DocumentsWriterFlushQueue
func (*DocumentsWriterFlushQueue) AddDeletes ¶
func (q *DocumentsWriterFlushQueue) AddDeletes(queue *DocumentsWriterDeleteQueue) (bool, error)
func (*DocumentsWriterFlushQueue) AddFlushTicket ¶
func (q *DocumentsWriterFlushQueue) AddFlushTicket(dwpt *DocumentsWriterPerThread) (*FlushTicket, error)
func (*DocumentsWriterFlushQueue) AddSegment ¶
func (q *DocumentsWriterFlushQueue) AddSegment(ticket *FlushTicket, segment *FlushedSegment)
type DocumentsWriterPerThread ¶
type DocumentsWriterPerThread struct {
// contains filtered or unexported fields
}
func NewDocumentsWriterPerThread ¶
func NewDocumentsWriterPerThread(indexVersionCreated int, segmentName string, dirOrig, dir store.Directory, indexWriterConfig *liveIndexWriterConfig, deleteQueue *DocumentsWriterDeleteQueue, fieldInfos *FieldInfosBuilder, pendingNumDocs *atomic.Int64, enableTestPoints bool) *DocumentsWriterPerThread
func (*DocumentsWriterPerThread) Flush ¶
func (d *DocumentsWriterPerThread) Flush(ctx context.Context) error
func (*DocumentsWriterPerThread) GetNumDocsInRAM ¶
func (d *DocumentsWriterPerThread) GetNumDocsInRAM() int
func (*DocumentsWriterPerThread) GetSegmentInfo ¶
func (d *DocumentsWriterPerThread) GetSegmentInfo() *SegmentInfo
func (*DocumentsWriterPerThread) PendingFilesToDelete ¶
func (d *DocumentsWriterPerThread) PendingFilesToDelete() map[string]struct{}
type DocumentsWriterPerThreadPool ¶
type DocumentsWriterPerThreadPool struct { }
DocumentsWriterPerThreadPool controls DocumentsWriterPerThread instances and their thread assignments during indexing. Each DocumentsWriterPerThread is once a obtained from the pool exclusively used for indexing a single document or list of documents by the obtaining thread. Each indexing thread must obtain such a DocumentsWriterPerThread to make progress. Depending on the DocumentsWriterPerThreadPool implementation DocumentsWriterPerThread assignments might differ from document to document. Once a DocumentsWriterPerThread is selected for Flush the DocumentsWriterPerThread will be checked out of the thread pool and won't be reused for indexing. See checkout(DocumentsWriterPerThread).
type DoubleComparableProvider ¶
type DoubleComparableProvider struct {
// contains filtered or unexported fields
}
func (*DoubleComparableProvider) GetAsComparableLong ¶
func (d *DoubleComparableProvider) GetAsComparableLong(docID int) (int64, error)
type DoubleDocComparator ¶
type DoubleDocComparator struct {
// contains filtered or unexported fields
}
func (*DoubleDocComparator) Compare ¶
func (d *DoubleDocComparator) Compare(docID1, docID2 int) int
type DoubleSorter ¶
type DoubleSorter struct {
// contains filtered or unexported fields
}
DoubleSorter Sorts documents based on double values from a NumericDocValues instance
func NewDoubleSorter ¶
func NewDoubleSorter(providerName string, missingValue float64, reverse bool, valuesProvider NumericDocValuesProvider) *DoubleSorter
func (*DoubleSorter) GetComparableProviders ¶
func (d *DoubleSorter) GetComparableProviders(readers []index.LeafReader) ([]index.ComparableProvider, error)
func (*DoubleSorter) GetDocComparator ¶
func (d *DoubleSorter) GetDocComparator(reader index.LeafReader, maxDoc int) (index.DocComparator, error)
func (*DoubleSorter) GetProviderName ¶
func (d *DoubleSorter) GetProviderName() string
type EmptyDocComparator ¶
func (*EmptyDocComparator) Compare ¶
func (e *EmptyDocComparator) Compare(docID1, docID2 int) int
type EmptyDocValuesProducer ¶
type EmptyDocValuesProducer struct { FnGetNumeric func(ctx context.Context, field *document.FieldInfo) (index.NumericDocValues, error) FnGetBinary func(ctx context.Context, field *document.FieldInfo) (index.BinaryDocValues, error) FnGetSorted func(ctx context.Context, field *document.FieldInfo) (index.SortedDocValues, error) FnGetSortedNumeric func(ctx context.Context, field *document.FieldInfo) (index.SortedNumericDocValues, error) FnGetSortedSet func(ctx context.Context, field *document.FieldInfo) (index.SortedSetDocValues, error) FnCheckIntegrity func() error }
func (*EmptyDocValuesProducer) CheckIntegrity ¶
func (e *EmptyDocValuesProducer) CheckIntegrity() error
func (*EmptyDocValuesProducer) Close ¶
func (e *EmptyDocValuesProducer) Close() error
func (*EmptyDocValuesProducer) GetBinary ¶
func (e *EmptyDocValuesProducer) GetBinary(ctx context.Context, field *document.FieldInfo) (index.BinaryDocValues, error)
func (*EmptyDocValuesProducer) GetMergeInstance ¶
func (e *EmptyDocValuesProducer) GetMergeInstance() index.DocValuesProducer
func (*EmptyDocValuesProducer) GetNumeric ¶
func (e *EmptyDocValuesProducer) GetNumeric(ctx context.Context, field *document.FieldInfo) (index.NumericDocValues, error)
func (*EmptyDocValuesProducer) GetSorted ¶
func (e *EmptyDocValuesProducer) GetSorted(ctx context.Context, fieldInfo *document.FieldInfo) (index.SortedDocValues, error)
func (*EmptyDocValuesProducer) GetSortedNumeric ¶
func (e *EmptyDocValuesProducer) GetSortedNumeric(ctx context.Context, field *document.FieldInfo) (index.SortedNumericDocValues, error)
func (*EmptyDocValuesProducer) GetSortedSet ¶
func (e *EmptyDocValuesProducer) GetSortedSet(ctx context.Context, field *document.FieldInfo) (index.SortedSetDocValues, error)
type EmptyNumericDocValuesProvider ¶
type EmptyNumericDocValuesProvider struct {
FnGet func(reader index.LeafReader) (index.NumericDocValues, error)
}
func (*EmptyNumericDocValuesProvider) Get ¶
func (e *EmptyNumericDocValuesProvider) Get(reader index.LeafReader) (index.NumericDocValues, error)
type EmptySortedDocValuesProvider ¶
type EmptySortedDocValuesProvider struct {
FnGet func(reader index.LeafReader) (index.SortedDocValues, error)
}
func (*EmptySortedDocValuesProvider) Get ¶
func (e *EmptySortedDocValuesProvider) Get(reader index.LeafReader) (index.SortedDocValues, error)
type Event ¶
type Event func(writer *IndexWriter) error
type EventQueue ¶
type EventQueue struct {
// contains filtered or unexported fields
}
func NewEventQueue ¶
func NewEventQueue(writer *IndexWriter) *EventQueue
func (*EventQueue) Add ¶
func (e *EventQueue) Add(event Event) bool
type FieldData ¶
type FieldData struct {
// contains filtered or unexported fields
}
func NewFieldData ¶
type FieldDataList ¶
type FieldDataList []FieldData
func (FieldDataList) Len ¶
func (f FieldDataList) Len() int
func (FieldDataList) Less ¶
func (f FieldDataList) Less(i, j int) bool
func (FieldDataList) Swap ¶
func (f FieldDataList) Swap(i, j int)
type FieldDimensions ¶
func NewFieldDimensions ¶
func NewFieldDimensions(dimensionCount, indexDimensionCount, dimensionNumBytes int) *FieldDimensions
type FieldInfosBuilder ¶
type FieldInfosBuilder struct {
// contains filtered or unexported fields
}
func NewFieldInfosBuilder ¶
func NewFieldInfosBuilder(globalFieldNumbers *FieldNumbers) *FieldInfosBuilder
func (*FieldInfosBuilder) Add ¶
func (f *FieldInfosBuilder) Add(other *fieldInfos) error
func (*FieldInfosBuilder) AddFieldInfo ¶
func (*FieldInfosBuilder) AddFieldInfoV ¶
func (*FieldInfosBuilder) Finish ¶
func (f *FieldInfosBuilder) Finish() index.FieldInfos
type FieldNumbers ¶
func NewFieldNumbers ¶
func NewFieldNumbers(softDeletesFieldName string) *FieldNumbers
func (*FieldNumbers) AddOrGet ¶
func (f *FieldNumbers) AddOrGet(fieldName string, preferredFieldNumber int, indexOptions document.IndexOptions, dvType document.DocValuesType, dimensionCount, indexDimensionCount, dimensionNumBytes int, isSoftDeletesField bool) (int, error)
AddOrGet Returns the global field number for the given field name. If the name does not exist yet it tries to add it with the given preferred field number assigned if possible otherwise the first unassigned field number is used as the field number.
func (*FieldNumbers) SetDimensions ¶
func (f *FieldNumbers) SetDimensions(number int, name string, dimensionCount, indexDimensionCount, dimensionNumBytes int)
type FieldTermIterator ¶
type FieldTermIterator interface { bytesref.BytesIterator // Field // Returns current field. // This method should not be called after iteration is done. // Note that you may use == to detect a change in field. Field() string // DelGen Del gen of the current term. DelGen() int64 }
FieldTermIterator Iterates over terms in across multiple fields. The caller must check field after each next to see if the field changed, but == can be used since the iterator implementation ensures it will use the same String instance for a given field.
type FilteredTermsEnum ¶
type FilteredTermsEnum interface { index.TermsEnum // Accept Return if term is accepted, not accepted or the iteration should ended (and possibly seek). Accept(term []byte) (AcceptStatus, error) }
type FilteredTermsEnumBase ¶
type FilteredTermsEnumBase struct { Accept func(term []byte) (AcceptStatus, error) NextSeekTerm func(currentTerm []byte) ([]byte, error) // contains filtered or unexported fields }
func NewFilteredTermsEnumDefault ¶
func NewFilteredTermsEnumDefault(cfg *FilteredTermsEnumDefaultConfig) *FilteredTermsEnumBase
func (*FilteredTermsEnumBase) Attributes ¶
func (f *FilteredTermsEnumBase) Attributes() *attribute.Source
Attributes Returns the related attributes, the returned AttributeSource is shared with the delegate TermsEnum.
func (*FilteredTermsEnumBase) DocFreq ¶
func (f *FilteredTermsEnumBase) DocFreq() (int, error)
func (*FilteredTermsEnumBase) Impacts ¶
func (f *FilteredTermsEnumBase) Impacts(flags int) (index.ImpactsEnum, error)
func (*FilteredTermsEnumBase) Next ¶
func (f *FilteredTermsEnumBase) Next(context.Context) ([]byte, error)
func (*FilteredTermsEnumBase) Ord ¶
func (f *FilteredTermsEnumBase) Ord() (int64, error)
func (*FilteredTermsEnumBase) Postings ¶
func (f *FilteredTermsEnumBase) Postings(reuse index.PostingsEnum, flags int) (index.PostingsEnum, error)
func (*FilteredTermsEnumBase) SeekCeil ¶
func (f *FilteredTermsEnumBase) SeekCeil(ctx context.Context, text []byte) (index.SeekStatus, error)
func (*FilteredTermsEnumBase) SeekExactByOrd ¶
func (f *FilteredTermsEnumBase) SeekExactByOrd(ctx context.Context, ord int64) error
func (*FilteredTermsEnumBase) SeekExactExpert ¶
func (f *FilteredTermsEnumBase) SeekExactExpert(ctx context.Context, term []byte, state index.TermState) error
SeekExactExpert This enum does not support seeking! Throws: ErrUnsupportedOperation – In general, subclasses do not support seeking.
func (*FilteredTermsEnumBase) Term ¶
func (f *FilteredTermsEnumBase) Term() ([]byte, error)
func (*FilteredTermsEnumBase) TermState ¶
func (f *FilteredTermsEnumBase) TermState() (index.TermState, error)
TermState Returns the filtered enums term state
func (*FilteredTermsEnumBase) TotalTermFreq ¶
func (f *FilteredTermsEnumBase) TotalTermFreq() (int64, error)
type FindSegmentsFile ¶
type FindSegmentsFile[T any] struct { // contains filtered or unexported fields }
FindSegmentsFile Utility class for executing code that needs to do something with the current segments file. This is necessary with lock-less commits because from the time you locate the current segments file name, until you actually open it, read its contents, or check modified time, etc., it could have been deleted due to a writer commit finishing.
func NewFindSegmentsFile ¶
func NewFindSegmentsFile[T any](directory store.Directory) *FindSegmentsFile[T]
func (*FindSegmentsFile[T]) Run ¶
func (f *FindSegmentsFile[T]) Run(ctx context.Context) (T, error)
Run Locate the most recent segments file and run doBody on it.
func (*FindSegmentsFile[T]) RunWithCommit ¶
func (f *FindSegmentsFile[T]) RunWithCommit(ctx context.Context, commit IndexCommit) (T, error)
RunWithCommit Run doBody on the provided commit.
func (*FindSegmentsFile[T]) SetFuncDoBody ¶
func (f *FindSegmentsFile[T]) SetFuncDoBody(fnDoBody func(ctx context.Context, segmentFileName string) (T, error))
type FinishedSegments ¶
FinishedSegments Tracks the contiguous range of packets that have finished resolving. We need this because the packets are concurrently resolved, and we can only write to disk the contiguous completed packets.
func NewFinishedSegments ¶
func NewFinishedSegments() *FinishedSegments
func (*FinishedSegments) Clear ¶
func (f *FinishedSegments) Clear()
func (*FinishedSegments) FinishedSegment ¶
func (f *FinishedSegments) FinishedSegment(delGen int64)
func (*FinishedSegments) GetCompletedDelGen ¶
func (f *FinishedSegments) GetCompletedDelGen() int64
type FloatComparableProvider ¶
type FloatComparableProvider struct {
// contains filtered or unexported fields
}
func (*FloatComparableProvider) GetAsComparableLong ¶
func (r *FloatComparableProvider) GetAsComparableLong(docID int) (int64, error)
type FloatDocComparator ¶
type FloatDocComparator struct {
// contains filtered or unexported fields
}
func (*FloatDocComparator) Compare ¶
func (f *FloatDocComparator) Compare(docID1, docID2 int) int
type FloatSorter ¶
type FloatSorter struct {
// contains filtered or unexported fields
}
FloatSorter Sorts documents based on float values from a NumericDocValues instance
func NewFloatSorter ¶
func NewFloatSorter(providerName string, missingValue float32, reverse bool, valuesProvider NumericDocValuesProvider) *FloatSorter
func (*FloatSorter) GetComparableProviders ¶
func (f *FloatSorter) GetComparableProviders(readers []index.LeafReader) ([]index.ComparableProvider, error)
func (*FloatSorter) GetDocComparator ¶
func (f *FloatSorter) GetDocComparator(reader index.LeafReader, maxDoc int) (index.DocComparator, error)
func (*FloatSorter) GetProviderName ¶
func (f *FloatSorter) GetProviderName() string
type FlushPolicy ¶
type FlushPolicy interface { }
type FlushTicket ¶
func NewFlushTicket ¶
func NewFlushTicket(frozenUpdates *FrozenBufferedUpdates, hasSegment bool) *FlushTicket
type FlushedSegment ¶
type FlushedSegment struct {
// contains filtered or unexported fields
}
type FreqProxFields ¶
type FreqProxFields struct {
// contains filtered or unexported fields
}
FreqProxFields Implements limited (iterators only, no stats) Fields interface over the in-RAM buffered fields/terms/postings, to flush postings through the PostingsFormat.
func NewFreqProxFields ¶
func NewFreqProxFields(fieldList []*FreqProxTermsWriterPerField) *FreqProxFields
func (*FreqProxFields) Names ¶
func (f *FreqProxFields) Names() []string
func (*FreqProxFields) Size ¶
func (f *FreqProxFields) Size() int
type FreqProxPostingsArray ¶
type FreqProxPostingsArray struct { *BaseParallelPostingsArray // contains filtered or unexported fields }
func NewFreqProxPostingsArray ¶
func NewFreqProxPostingsArray(writeFreqs, writeProx, writeOffsets bool) *FreqProxPostingsArray
func (*FreqProxPostingsArray) BytesPerPosting ¶
func (f *FreqProxPostingsArray) BytesPerPosting() int
func (*FreqProxPostingsArray) NewInstance ¶
func (f *FreqProxPostingsArray) NewInstance() ParallelPostingsArray
func (*FreqProxPostingsArray) SetLastDocCodes ¶
func (f *FreqProxPostingsArray) SetLastDocCodes(termID, v int)
func (*FreqProxPostingsArray) SetLastDocIDs ¶
func (f *FreqProxPostingsArray) SetLastDocIDs(termID, v int)
func (*FreqProxPostingsArray) SetLastOffsets ¶
func (f *FreqProxPostingsArray) SetLastOffsets(termID, v int)
func (*FreqProxPostingsArray) SetLastPositions ¶
func (f *FreqProxPostingsArray) SetLastPositions(termID, v int)
func (*FreqProxPostingsArray) SetTermFreqs ¶
func (f *FreqProxPostingsArray) SetTermFreqs(termID, v int)
type FreqProxPostingsEnum ¶
type FreqProxPostingsEnum struct {
// contains filtered or unexported fields
}
func (*FreqProxPostingsEnum) Cost ¶
func (f *FreqProxPostingsEnum) Cost() int64
func (*FreqProxPostingsEnum) DocID ¶
func (f *FreqProxPostingsEnum) DocID() int
func (*FreqProxPostingsEnum) EndOffset ¶
func (f *FreqProxPostingsEnum) EndOffset() (int, error)
func (*FreqProxPostingsEnum) Freq ¶
func (f *FreqProxPostingsEnum) Freq() (int, error)
func (*FreqProxPostingsEnum) GetPayload ¶
func (f *FreqProxPostingsEnum) GetPayload() ([]byte, error)
func (*FreqProxPostingsEnum) NextDoc ¶
func (f *FreqProxPostingsEnum) NextDoc(context.Context) (int, error)
func (*FreqProxPostingsEnum) NextPosition ¶
func (f *FreqProxPostingsEnum) NextPosition() (int, error)
func (*FreqProxPostingsEnum) SlowAdvance ¶
func (*FreqProxPostingsEnum) StartOffset ¶
func (f *FreqProxPostingsEnum) StartOffset() (int, error)
type FreqProxTerms ¶
type FreqProxTerms struct {
// contains filtered or unexported fields
}
func NewFreqProxTerms ¶
func NewFreqProxTerms(terms *FreqProxTermsWriterPerField) *FreqProxTerms
func (*FreqProxTerms) GetDocCount ¶
func (f *FreqProxTerms) GetDocCount() (int, error)
func (*FreqProxTerms) GetMax ¶
func (f *FreqProxTerms) GetMax() ([]byte, error)
func (*FreqProxTerms) GetMin ¶
func (f *FreqProxTerms) GetMin() ([]byte, error)
func (*FreqProxTerms) GetSumDocFreq ¶
func (f *FreqProxTerms) GetSumDocFreq() (int64, error)
func (*FreqProxTerms) GetSumTotalTermFreq ¶
func (f *FreqProxTerms) GetSumTotalTermFreq() (int64, error)
func (*FreqProxTerms) HasFreqs ¶
func (f *FreqProxTerms) HasFreqs() bool
func (*FreqProxTerms) HasOffsets ¶
func (f *FreqProxTerms) HasOffsets() bool
func (*FreqProxTerms) HasPayloads ¶
func (f *FreqProxTerms) HasPayloads() bool
func (*FreqProxTerms) HasPositions ¶
func (f *FreqProxTerms) HasPositions() bool
func (*FreqProxTerms) Intersect ¶
func (f *FreqProxTerms) Intersect(compiled *automaton.CompiledAutomaton, startTerm []byte) (index.TermsEnum, error)
func (*FreqProxTerms) Size ¶
func (f *FreqProxTerms) Size() (int, error)
type FreqProxTermsEnum ¶
type FreqProxTermsEnum struct { *BaseTermsEnum // contains filtered or unexported fields }
func NewFreqProxTermsEnum ¶
func NewFreqProxTermsEnum(terms *FreqProxTermsWriterPerField) *FreqProxTermsEnum
func (*FreqProxTermsEnum) DocFreq ¶
func (f *FreqProxTermsEnum) DocFreq() (int, error)
func (*FreqProxTermsEnum) Impacts ¶
func (f *FreqProxTermsEnum) Impacts(flags int) (index.ImpactsEnum, error)
func (*FreqProxTermsEnum) Ord ¶
func (f *FreqProxTermsEnum) Ord() (int64, error)
func (*FreqProxTermsEnum) Postings ¶
func (f *FreqProxTermsEnum) Postings(reuse index.PostingsEnum, flags int) (index.PostingsEnum, error)
func (*FreqProxTermsEnum) SeekCeil ¶
func (f *FreqProxTermsEnum) SeekCeil(ctx context.Context, text []byte) (index.SeekStatus, error)
func (*FreqProxTermsEnum) SeekExactByOrd ¶
func (f *FreqProxTermsEnum) SeekExactByOrd(ctx context.Context, ord int64) error
func (*FreqProxTermsEnum) Term ¶
func (f *FreqProxTermsEnum) Term() ([]byte, error)
func (*FreqProxTermsEnum) TotalTermFreq ¶
func (f *FreqProxTermsEnum) TotalTermFreq() (int64, error)
type FreqProxTermsWriter ¶
type FreqProxTermsWriter struct {
*BaseTermsHash
}
func NewFreqProxTermsWriter ¶
func NewFreqProxTermsWriter(intBlockAllocator ints.IntsAllocator, byteBlockAllocator bytesref.Allocator, nextTermsHash TermsHash) *FreqProxTermsWriter
func (*FreqProxTermsWriter) AddField ¶
func (f *FreqProxTermsWriter) AddField(invertState *index.FieldInvertState, fieldInfo *document.FieldInfo) (TermsHashPerField, error)
func (*FreqProxTermsWriter) Flush ¶
func (f *FreqProxTermsWriter) Flush(ctx context.Context, fieldsToFlush map[string]TermsHashPerField, state *index.SegmentWriteState, sortMap index.DocMap, norms index.NormsProducer) error
func (*FreqProxTermsWriter) SetTermBytePool ¶
func (f *FreqProxTermsWriter) SetTermBytePool(termBytePool *bytesref.BlockPool)
type FreqProxTermsWriterPerField ¶
type FreqProxTermsWriterPerField struct {
// contains filtered or unexported fields
}
FreqProxTermsWriterPerField TODO: break into separate freq and prox writers as codecs; make separate container (tii/tis/skip/*) that can be configured as any number of files 1..N
func NewFreqProxTermsWriterPerField ¶
func NewFreqProxTermsWriterPerField(invertState *index.FieldInvertState, termsHash TermsHash, fieldInfo *document.FieldInfo, nextPerField TermsHashPerField) (*FreqProxTermsWriterPerField, error)
func (FreqProxTermsWriterPerField) Add ¶
Add Called once per inverted token. This is the primary entry point (for first TermsHash); postings use this API.
func (FreqProxTermsWriterPerField) Add2nd ¶
Add2nd Secondary entry point (for 2nd & subsequent TermsHash), because token text has already been "interned" into textStart, so we hash by textStart. term vectors use this API.
func (*FreqProxTermsWriterPerField) AddTerm ¶
func (f *FreqProxTermsWriterPerField) AddTerm(termID, docID int) error
func (*FreqProxTermsWriterPerField) CreatePostingsArray ¶
func (f *FreqProxTermsWriterPerField) CreatePostingsArray(size int) ParallelPostingsArray
func (*FreqProxTermsWriterPerField) Finish ¶
func (f *FreqProxTermsWriterPerField) Finish() error
func (FreqProxTermsWriterPerField) GetNextPerField ¶
func (t FreqProxTermsWriterPerField) GetNextPerField() TermsHashPerField
func (FreqProxTermsWriterPerField) GetPostingsArray ¶
func (t FreqProxTermsWriterPerField) GetPostingsArray() ParallelPostingsArray
func (*FreqProxTermsWriterPerField) NewPostingsArray ¶
func (f *FreqProxTermsWriterPerField) NewPostingsArray()
func (*FreqProxTermsWriterPerField) NewTerm ¶
func (f *FreqProxTermsWriterPerField) NewTerm(termID, docID int) error
func (FreqProxTermsWriterPerField) SetPostingsArray ¶
func (t FreqProxTermsWriterPerField) SetPostingsArray(v ParallelPostingsArray)
func (*FreqProxTermsWriterPerField) Start ¶
func (f *FreqProxTermsWriterPerField) Start(field document.IndexableField, first bool) bool
type FreqProxTermsWriterPerFields ¶
type FreqProxTermsWriterPerFields []*FreqProxTermsWriterPerField
func (FreqProxTermsWriterPerFields) Len ¶
func (p FreqProxTermsWriterPerFields) Len() int
func (FreqProxTermsWriterPerFields) Less ¶
func (p FreqProxTermsWriterPerFields) Less(i, j int) bool
func (FreqProxTermsWriterPerFields) Swap ¶
func (p FreqProxTermsWriterPerFields) Swap(i, j int)
type FrozenBufferedUpdates ¶
FrozenBufferedUpdates Holds buffered deletes and updates by term or query, once pushed. Pushed deletes/updates are write-once, so we shift to more memory efficient data structure to hold them. We don't hold docIDs because these are applied on flush.
func NewFrozenBufferedUpdates ¶
func NewFrozenBufferedUpdates(updates *index.BufferedUpdates, privateSegment index.SegmentCommitInfo) *FrozenBufferedUpdates
NewFrozenBufferedUpdates TODO: fix it
func (*FrozenBufferedUpdates) Any ¶
func (f *FrozenBufferedUpdates) Any() bool
func (*FrozenBufferedUpdates) Apply ¶
func (f *FrozenBufferedUpdates) Apply(segStates []*SegmentState) (int, error)
Apply Applies pending delete-by-term, delete-by-query and doc values updates to all segments in the index, returning the number of new deleted or updated documents.
type IndexCommit ¶
type IndexCommit interface { // GetSegmentsFileName // Get the segments file (segments_N) associated with this commit point. GetSegmentsFileName() string // GetFileNames // Returns all index files referenced by this commit point. GetFileNames() (map[string]struct{}, error) // GetDirectory // Returns the Directory for the index. GetDirectory() store.Directory // Delete // Delete this commit point. This only applies when using the commit point in the context of // IndexWriter's IndexDeletionPolicy. // Upon calling this, the writer is notified that this commit point should be deleted. // Decision that a commit-point should be deleted is taken by the IndexDeletionPolicy in effect // and therefore this should only be called by its onInit() or onCommit() methods. Delete() error // IsDeleted // Returns true if this commit should be deleted; // this is only used by IndexWriter after invoking the IndexDeletionPolicy. IsDeleted() bool // GetSegmentCount // Returns number of segments referenced by this commit. GetSegmentCount() int // GetGeneration // Returns the generation (the _N in segments_N) for this IndexCommit GetGeneration() int64 // GetUserData // Returns userData, previously passed to IndexWriter.setLiveCommitData(Iterable) for this commit. GetUserData() (map[string]string, error) CompareTo(commit IndexCommit) int // GetReader // Package-private API for IndexWriter to init from a commit-point pulled from an NRT or non-NRT reader. GetReader() *StandardDirectoryReader }
IndexCommit Expert: represents a single commit into an index as seen by the IndexDeletionPolicy or IndexReader. Changes to the content of an index are made visible only after the writer who made that change commits by writing a new segments file (segments_N). This point in time, when the action of writing of a new segments file to the directory is completed, is an index commit.
Each index commit point has a unique segments file associated with it. The segments file associated with a later index commit point would have a larger N.
lucene.experimental
TODO: this is now a poor name, because this class also represents a point-in-time view from an NRT reader
type IndexCommits ¶
type IndexCommits []IndexCommit
func (IndexCommits) Len ¶
func (list IndexCommits) Len() int
func (IndexCommits) Less ¶
func (list IndexCommits) Less(i, j int) bool
func (IndexCommits) Swap ¶
func (list IndexCommits) Swap(i, j int)
type IndexDeletionPolicy ¶
type IndexDeletionPolicy interface { // OnInit This is called once when a writer is first instantiated to give the policy a chance to remove old commit points. // The writer locates all index commits present in the index directory and calls this method. The policy may choose to delete some of the commit points, doing so by calling method delete() of IndexCommit. // Note: the last CommitPoint is the most recent one, i.e. the "front index state". Be careful not to delete it, unless you know for sure what you are doing, and unless you can afford to lose the index content while doing that. // Params: commits – List of current point-in-time commits, sorted by age (the 0th one is the oldest commit). Note that for a new index this method is invoked with an empty list. OnInit(commits []IndexCommit) error OnCommit(commits []IndexCommit) error }
IndexDeletionPolicy Expert: policy for deletion of stale index commits. Implement this interface, and set it on IndexWriterConfig.setIndexDeletionPolicy(IndexDeletionPolicy) to customize when older point-in-time commits are deleted from the index directory. The default deletion policy is KeepOnlyLastCommitDeletionPolicy, always removes old commits as soon as a new commit is done (this matches the behavior before 2.2). One expected use case for this (and the reason why it was first created) is to work around problems with an index directory accessed via filesystems like NFS because NFS does not provide the "delete on last close" semantics that Lucene's "point in time" search normally relies on. By implementing a custom deletion policy, such as "a commit is only removed once it has been stale for more than X minutes", you can give your readers time to refresh to the new commit before IndexWriter removes the old commits. Note that doing so will increase the storage requirements of the index. See LUCENE-710 for details.
type IndexFileDeleter ¶
IndexFileDeleter This class keeps track of each SegmentInfos instance that is still "live", either because it corresponds to a segments_N file in the Directory (a "commit", i.e. a committed SegmentInfos) or because it's an in-memory SegmentInfos that a writer is actively updating but has not yet committed. This class uses simple reference counting to map the live SegmentInfos instances to individual files in the Directory.
The same directory file may be referenced by more than one IndexCommit, i.e. more than one SegmentInfos. Therefore we count how many commits reference each file. When all the commits referencing a certain file have been deleted, the refcount for that file becomes zero, and the file is deleted.
A separate deletion policy interface (IndexDeletionPolicy) is consulted on creation (onInit) and once per commit (onCommit), to decide when a commit should be removed.
It is the business of the IndexDeletionPolicy to choose when to delete commit points. The actual mechanics of file deletion, retrying, etc, derived from the deletion of commit points is the business of the IndexFileDeleter.
The current default deletion policy is {@link KeepOnlyLastCommitDeletionPolicy}, which removes all prior commits when a new commit has completed. This matches the behavior before 2.2.
Note that you must hold the write.lock before instantiating this class. It opens segments_N file(s) directly with no retry logic.
func NewIndexFileDeleter ¶
func NewIndexFileDeleter(ctx context.Context, files []string, directoryOrig, directory store.Directory, policy IndexDeletionPolicy, segmentInfos *SegmentInfos, writer *IndexWriter, initialIndexExists, isReaderInit bool) (*IndexFileDeleter, error)
NewIndexFileDeleter Initialize the deleter: find all previous commits in the Directory, incref the files they reference, call the policy to let it delete commits. This will remove any files not referenced by any of the commits. Throws: IOException – if there is a low-level IO error
func (*IndexFileDeleter) Checkpoint ¶
func (r *IndexFileDeleter) Checkpoint(segmentInfos *SegmentInfos, isCommit bool) error
Checkpoint For definition of "check point" see IndexWriter comments: "Clarification: Check Points (and commits)". IndexWriter calls this when it has made a "consistent change" to the index, meaning new files are written to the index and the in-memory SegmentInfos have been modified to point to those files. This may or may not be a commit (segments_N may or may not have been written). We simply incref the files referenced by the new SegmentInfos and decref the files we had previously seen (if any). If this is a commit, we also call the policy to give it a chance to remove other commits. If any commits are removed, we decref their files as well.
func (*IndexFileDeleter) DecRef ¶
func (r *IndexFileDeleter) DecRef(files map[string]struct{}) error
func (*IndexFileDeleter) IncRef ¶
func (r *IndexFileDeleter) IncRef(segmentInfos *SegmentInfos, isCommit bool) error
func (*IndexFileDeleter) IncRefFiles ¶
func (r *IndexFileDeleter) IncRefFiles(files map[string]struct{}) error
type IndexReaderSPI ¶
type IndexWriter ¶
type IndexWriter struct {
// contains filtered or unexported fields
}
func NewIndexWriter ¶
func NewIndexWriter(ctx context.Context, dir store.Directory, conf *IndexWriterConfig) (*IndexWriter, error)
func (*IndexWriter) AddDocument ¶
AddDocument Adds a document to this index. Note that if an Exception is hit (for example disk full) then the index will be consistent, but this document may not have been added. Furthermore, it's possible the index will have one segment in non-compound format even when using compound files (when a merge has partially succeeded).
This method periodically flushes pending documents to the Directory (see above), and also periodically triggers segment merges in the index according to the MergePolicy in use.
Merges temporarily consume space in the directory. The amount of space required is up to 1X the size of all segments being merged, when no readers/searchers are open against the index, and up to 2X the size of all segments being merged when readers/searchers are open against the index (see forceMerge(int) for details). The sequence of primitive merge operations performed is governed by the merge policy.
Note that each term in the document can be no longer than MAX_TERM_LENGTH in bytes, otherwise an IllegalArgumentException will be thrown.
Note that it's possible to create an invalid Unicode string in java if a UTF16 surrogate pair is malformed. In this case, the invalid characters are silently replaced with the Unicode replacement character U+FFFD.
Returns: The sequence number for this operation Throws: CorruptIndexException – if the index is corrupt
IOException – if there is a low-level IO error
func (*IndexWriter) AddIndexesFromReaders ¶
func (w *IndexWriter) AddIndexesFromReaders(readers ...index.CodecReader) (int64, error)
AddIndexesFromReaders Merges the provided indexes into this index. The provided IndexReaders are not closed. See addIndexes for details on transactional semantics, temporary free space required in the Directory, and non-CFS segments on an Exception. NOTE: empty segments are dropped by this method and not added to this index. NOTE: this merges all given LeafReaders in one merge. If you intend to merge a large number of readers,
it may be better to call this method multiple times, each time with a small set of readers. In principle, if you use a merge policy with a mergeFactor or maxMergeAtOnce parameter, you should pass that many readers in one call.
NOTE: this method does not call or make use of the MergeScheduler, so any custom bandwidth throttling is at the
moment ignored.
func (*IndexWriter) Changed ¶
func (w *IndexWriter) Changed()
func (*IndexWriter) Close ¶
func (w *IndexWriter) Close() error
Close Closes all open resources and releases the write lock. If IndexWriterConfig. commitOnClose is true, this will attempt to gracefully shut down by writing any changes, waiting for any running merges, committing, and closing. In this case, note that:
If you called prepareCommit but failed to call commit, this method will throw IllegalStateException and the IndexWriter will not be closed.
If this method throws any other exception, the IndexWriter will be closed, but changes may have been lost. Note that this may be a costly operation, so, try to re-use a single writer instead of closing and opening a new one. See commit() for caveats about write caching done by some IO devices.
NOTE: You must ensure no other threads are still making changes at the same time that this method is invoked.
func (*IndexWriter) GetConfig ¶
func (w *IndexWriter) GetConfig() *IndexWriterConfig
func (*IndexWriter) GetDirectory ¶
func (w *IndexWriter) GetDirectory() store.Directory
GetDirectory Returns the Directory used by this index.
func (*IndexWriter) GetReader ¶
func (w *IndexWriter) GetReader(ctx context.Context, applyAllDeletes bool, writeAllDeletes bool) (index.DirectoryReader, error)
func (*IndexWriter) IncRefDeleter ¶
func (w *IndexWriter) IncRefDeleter(segmentInfos *SegmentInfos) error
func (*IndexWriter) IsClosed ¶
func (w *IndexWriter) IsClosed() bool
func (*IndexWriter) MaybeMerge ¶
func (w *IndexWriter) MaybeMerge() error
func (*IndexWriter) Release ¶
func (w *IndexWriter) Release(readersAndUpdates *ReadersAndUpdates) error
func (*IndexWriter) SoftUpdateDocument ¶
func (w *IndexWriter) SoftUpdateDocument(ctx context.Context, term index.Term, doc *document.Document, softDeletes ...document.IndexableField) (int64, error)
SoftUpdateDocument Expert: Updates a document by first updating the document(s) containing term with the given doc-values fields and then adding the new document. The doc-values update and then add are atomic as seen by a reader on the same index (flush may happen only after the add). One use of this API is to retain older versions of documents instead of replacing them. The existing documents can be updated to reflect they are no longer current while atomically adding new documents at the same time. In contrast to updateDocument(Term, Iterable) this method will not delete documents in the index matching the given term but instead update them with the given doc-values fields which can be used as a soft-delete mechanism. See addDocuments(Iterable) and updateDocuments(Term, Iterable).
Returns: The sequence number for this operation Throws: CorruptIndexException: if the index is corrupt IOException: if there is a low-level IO error
func (*IndexWriter) UpdateDocument ¶
func (w *IndexWriter) UpdateDocument(ctx context.Context, term index.Term, doc *document.Document) (int64, error)
UpdateDocument Updates a document by first deleting the document(s) containing term and then adding the new document. The delete and then add are atomic as seen by a reader on the same index (Flush may happen only after the add).
term: the term to identify the document(s) to be deleted doc: the document to be added
Returns: The sequence number for this operation Throws:
CorruptIndexException – if the index is corrupt IOException – if there is a low-level IO error
type IndexWriterConfig ¶
type IndexWriterConfig struct {
// contains filtered or unexported fields
}
func NewIndexWriterConfig ¶
func NewIndexWriterConfig(codec index.Codec, similarity index.Similarity) *IndexWriterConfig
func (IndexWriterConfig) GetAnalyzer ¶
func (*IndexWriterConfig) GetCommitOnClose ¶
func (c *IndexWriterConfig) GetCommitOnClose() bool
GetCommitOnClose Returns true if IndexWriter.close() should first commit before closing.
func (*IndexWriterConfig) GetFlushPolicy ¶
func (c *IndexWriterConfig) GetFlushPolicy() FlushPolicy
func (*IndexWriterConfig) GetIndexCommit ¶
func (c *IndexWriterConfig) GetIndexCommit() IndexCommit
GetIndexCommit Returns the IndexCommit as specified in IndexWriterConfig.setIndexCommit(IndexCommit) or the default, null which specifies to open the latest index commit point.
func (*IndexWriterConfig) GetIndexCreatedVersionMajor ¶
func (c *IndexWriterConfig) GetIndexCreatedVersionMajor() int
func (IndexWriterConfig) GetIndexDeletionPolicy ¶
func (r IndexWriterConfig) GetIndexDeletionPolicy() IndexDeletionPolicy
func (IndexWriterConfig) GetIndexSort ¶
func (IndexWriterConfig) GetIndexSortFields ¶
func (r IndexWriterConfig) GetIndexSortFields() map[string]struct{}
func (IndexWriterConfig) GetIndexingChain ¶
func (r IndexWriterConfig) GetIndexingChain() IndexingChain
func (IndexWriterConfig) GetLeafSorter ¶
func (r IndexWriterConfig) GetLeafSorter() func(a, b index.LeafReader) int
func (IndexWriterConfig) GetMaxBufferedDocs ¶
func (r IndexWriterConfig) GetMaxBufferedDocs() int
func (IndexWriterConfig) GetMaxFullFlushMergeWaitMillis ¶
func (r IndexWriterConfig) GetMaxFullFlushMergeWaitMillis() int64
func (IndexWriterConfig) GetMergePolicy ¶
func (r IndexWriterConfig) GetMergePolicy() MergePolicy
func (*IndexWriterConfig) GetMergeScheduler ¶
func (c *IndexWriterConfig) GetMergeScheduler() MergeScheduler
func (IndexWriterConfig) GetMergedSegmentWarmer ¶
func (r IndexWriterConfig) GetMergedSegmentWarmer() ReaderWarmer
func (*IndexWriterConfig) GetOpenMode ¶
func (c *IndexWriterConfig) GetOpenMode() OpenMode
func (IndexWriterConfig) GetReaderPooling ¶
func (r IndexWriterConfig) GetReaderPooling() bool
func (IndexWriterConfig) GetSimilarity ¶
func (r IndexWriterConfig) GetSimilarity() index.Similarity
GetSimilarity Expert: returns the Similarity implementation used by this IndexWriter.
func (IndexWriterConfig) GetSoftDeletesField ¶
func (r IndexWriterConfig) GetSoftDeletesField() string
func (IndexWriterConfig) GetUseCompoundFile ¶
func (r IndexWriterConfig) GetUseCompoundFile() bool
func (IndexWriterConfig) IsCheckPendingFlushOnUpdate ¶
func (r IndexWriterConfig) IsCheckPendingFlushOnUpdate() bool
func (IndexWriterConfig) SetCheckPendingFlushUpdate ¶
func (r IndexWriterConfig) SetCheckPendingFlushUpdate(checkPendingFlushOnUpdate bool) LiveIndexWriterConfig
func (*IndexWriterConfig) SetIndexSort ¶
func (c *IndexWriterConfig) SetIndexSort(sort index.Sort) error
SetIndexSort Set the Sort order to use for all (flushed and merged) segments.
func (IndexWriterConfig) SetMaxBufferedDocs ¶
func (r IndexWriterConfig) SetMaxBufferedDocs(maxBufferedDocs int) LiveIndexWriterConfig
func (IndexWriterConfig) SetMergePolicy ¶
func (r IndexWriterConfig) SetMergePolicy(mergePolicy MergePolicy) LiveIndexWriterConfig
func (IndexWriterConfig) SetMergedSegmentWarmer ¶
func (r IndexWriterConfig) SetMergedSegmentWarmer(mergeSegmentWarmer ReaderWarmer) LiveIndexWriterConfig
func (IndexWriterConfig) SetUseCompoundFile ¶
func (r IndexWriterConfig) SetUseCompoundFile(useCompoundFile bool) LiveIndexWriterConfig
type IndexingChain ¶
type IndexingChain interface { GetChain(indexCreatedVersionMajor int, segmentInfo *SegmentInfo, directory store.Directory, fieldInfos *FieldInfosBuilder, indexWriterConfig *liveIndexWriterConfig) index.DocConsumer }
type IntComparableProvider ¶
type IntComparableProvider struct {
// contains filtered or unexported fields
}
func (*IntComparableProvider) GetAsComparableLong ¶
func (r *IntComparableProvider) GetAsComparableLong(docID int) (int64, error)
type IntDocComparator ¶
type IntDocComparator struct {
// contains filtered or unexported fields
}
func (*IntDocComparator) Compare ¶
func (r *IntDocComparator) Compare(docID1, docID2 int) int
type IntSorter ¶
type IntSorter struct {
// contains filtered or unexported fields
}
IntSorter Sorts documents based on integer values from a NumericDocValues instance
func NewIntSorter ¶
func NewIntSorter(providerName string, missingValue int32, reverse bool, valuesProvider NumericDocValuesProvider) *IntSorter
func (*IntSorter) GetComparableProviders ¶
func (i *IntSorter) GetComparableProviders(readers []index.LeafReader) ([]index.ComparableProvider, error)
func (*IntSorter) GetDocComparator ¶
func (i *IntSorter) GetDocComparator(reader index.LeafReader, maxDoc int) (index.DocComparator, error)
func (*IntSorter) GetProviderName ¶
type KeepOnlyLastCommitDeletionPolicy ¶
type KeepOnlyLastCommitDeletionPolicy struct { }
KeepOnlyLastCommitDeletionPolicy This IndexDeletionPolicy implementation that keeps only the most recent commit and immediately removes all prior commits after a new commit is done. This is the default deletion policy.
func NewKeepOnlyLastCommitDeletionPolicy ¶
func NewKeepOnlyLastCommitDeletionPolicy() *KeepOnlyLastCommitDeletionPolicy
func (*KeepOnlyLastCommitDeletionPolicy) OnCommit ¶
func (k *KeepOnlyLastCommitDeletionPolicy) OnCommit(commits []IndexCommit) error
func (*KeepOnlyLastCommitDeletionPolicy) OnInit ¶
func (k *KeepOnlyLastCommitDeletionPolicy) OnInit(commits []IndexCommit) error
type LeafAndDocID ¶
type LeafAndDocID struct {
// contains filtered or unexported fields
}
func NewLeafAndDocID ¶
type LeafReaderBaseInner ¶
type LeafReaderContextImpl ¶
type LeafReaderContextImpl struct { *BaseIndexReaderContext // contains filtered or unexported fields }
LeafReaderContextImpl IndexReaderContext for LeafReader instances.
func (*LeafReaderContextImpl) Children ¶
func (l *LeafReaderContextImpl) Children() []index.IndexReaderContext
func (*LeafReaderContextImpl) DocBase ¶
func (l *LeafReaderContextImpl) DocBase() int
func (*LeafReaderContextImpl) Identity ¶
func (l *LeafReaderContextImpl) Identity() string
func (*LeafReaderContextImpl) LeafReader ¶
func (l *LeafReaderContextImpl) LeafReader() index.LeafReader
func (*LeafReaderContextImpl) Leaves ¶
func (l *LeafReaderContextImpl) Leaves() ([]index.LeafReaderContext, error)
func (*LeafReaderContextImpl) Ord ¶
func (l *LeafReaderContextImpl) Ord() int
func (*LeafReaderContextImpl) Reader ¶
func (l *LeafReaderContextImpl) Reader() index.IndexReader
type LiveIndexWriterConfig ¶
type LiveIndexWriterConfig interface { GetAnalyzer() analysis.Analyzer SetMaxBufferedDocs(maxBufferedDocs int) LiveIndexWriterConfig // GetMaxBufferedDocs Returns the number of buffered added documents that will trigger a flush if enabled. // See Also: setMaxBufferedDocs(int) GetMaxBufferedDocs() int // SetMergePolicy // Expert: MergePolicy is invoked whenever there are changes to the segments in the index. // Its role is to select which merges to do, if any, and return a MergePolicy.MergeSpecification // describing the merges. It also selects merges to do for forceMerge. // Takes effect on subsequent merge selections. Any merges in flight or any merges already registered by // the previous MergePolicy are not affected. SetMergePolicy(mergePolicy MergePolicy) LiveIndexWriterConfig // SetMergedSegmentWarmer // Set the merged segment warmer. See IndexWriter.ReaderWarmer. //Takes effect on the next merge. SetMergedSegmentWarmer(mergeSegmentWarmer ReaderWarmer) LiveIndexWriterConfig // GetMergedSegmentWarmer Returns the current merged segment warmer. See IndexWriter.ReaderWarmer. GetMergedSegmentWarmer() ReaderWarmer // GetIndexCreatedVersionMajor Return the compatibility version to use for this index. // See Also: IndexWriterConfig.setIndexCreatedVersionMajor GetIndexCreatedVersionMajor() int // GetIndexDeletionPolicy Returns the IndexDeletionPolicy specified in // IndexWriterConfig.setIndexDeletionPolicy(IndexDeletionPolicy) or the default KeepOnlyLastCommitDeletionPolicy/ GetIndexDeletionPolicy() IndexDeletionPolicy // GetIndexCommit Returns the IndexCommit as specified in IndexWriterConfig.setIndexCommit(IndexCommit) or the // default, null which specifies to open the latest index commit point. GetIndexCommit() IndexCommit // GetSimilarity Expert: returns the Similarity implementation used by this IndexWriter. GetSimilarity() index.Similarity // GetMergeScheduler Returns the MergeScheduler that was set by IndexWriterConfig.setMergeScheduler(MergeScheduler). GetMergeScheduler() MergeScheduler // GetCodec Returns the current Codec. GetCodec() index.Codec // GetMergePolicy Returns the current MergePolicy in use by this writer. // See Also: IndexWriterConfig.setMergePolicy(MergePolicy) GetMergePolicy() MergePolicy // GetReaderPooling Returns true if IndexWriter should pool readers even if // DirectoryReader.open(IndexWriter) has not been called. GetReaderPooling() bool // GetIndexingChain Returns the indexing chain. GetIndexingChain() IndexingChain // GetFlushPolicy See Also: //IndexWriterConfig.setFlushPolicy(FlushPolicy) GetFlushPolicy() FlushPolicy // SetUseCompoundFile // Sets if the IndexWriter should pack newly written segments in a compound file. // Default is true. // Use false for batch indexing with very large ram buffer settings. // Note: To control compound file usage during segment merges see MergePolicy.setNoCFSRatio(double) // and MergePolicy.setMaxCFSSegmentSizeMB(double). This setting only applies to newly created segments. SetUseCompoundFile(useCompoundFile bool) LiveIndexWriterConfig // GetUseCompoundFile Returns true iff the IndexWriter packs newly written segments in a compound file. // Default is true. GetUseCompoundFile() bool // GetCommitOnClose Returns true if IndexWriter.close() should first commit before closing. GetCommitOnClose() bool // GetIndexSort Get the index-time Sort order, applied to all (flushed and merged) segments. GetIndexSort() index.Sort // GetIndexSortFields Returns the field names involved in the index sort GetIndexSortFields() map[string]struct{} // GetLeafSorter Returns a comparator for sorting leaf readers. If not null, this comparator is // used to sort leaf readers within DirectoryReader opened from the IndexWriter of this configuration. // Returns: a comparator for sorting leaf readers GetLeafSorter() func(a, b index.LeafReader) int // IsCheckPendingFlushOnUpdate Expert: Returns if indexing threads check for pending flushes on update //in order to help our flushing indexing buffers to disk //lucene.experimental IsCheckPendingFlushOnUpdate() bool // SetCheckPendingFlushUpdate // Expert: sets if indexing threads check for pending flushes on update // in order to help our flushing indexing buffers to disk. As a consequence, threads calling // DirectoryReader.openIfChanged(DirectoryReader, IndexWriter) or IndexWriter.flush() will be the // only thread writing segments to disk unless flushes are falling behind. If indexing is stalled due // to too many pending flushes indexing threads will help our writing pending segment flushes to disk. //lucene.experimental SetCheckPendingFlushUpdate(checkPendingFlushOnUpdate bool) LiveIndexWriterConfig // GetSoftDeletesField Returns the soft deletes field or null if soft-deletes are disabled. // See IndexWriterConfig.setSoftDeletesField(String) for details. GetSoftDeletesField() string // GetMaxFullFlushMergeWaitMillis Expert: return the amount of time to wait for merges returned by // by MergePolicy.findFullFlushMerges(...). If this time is reached, we proceed with the commit // based on segments merged up to that point. The merges are not cancelled, and may still run to // completion independent of the commit. GetMaxFullFlushMergeWaitMillis() int64 GetOpenMode() OpenMode }
type LongComparableProvider ¶
type LongComparableProvider struct {
// contains filtered or unexported fields
}
func (*LongComparableProvider) GetAsComparableLong ¶
func (r *LongComparableProvider) GetAsComparableLong(docID int) (int64, error)
type LongDocComparator ¶
type LongDocComparator struct {
// contains filtered or unexported fields
}
func (*LongDocComparator) Compare ¶
func (r *LongDocComparator) Compare(docID1, docID2 int) int
type LongSorter ¶
type LongSorter struct {
// contains filtered or unexported fields
}
LongSorter Sorts documents based on long values from a NumericDocValues instance
func NewLongSorter ¶
func NewLongSorter(providerName string, missingValue int64, reverse bool, valuesProvider NumericDocValuesProvider) *LongSorter
func (*LongSorter) GetComparableProviders ¶
func (i *LongSorter) GetComparableProviders(readers []index.LeafReader) ([]index.ComparableProvider, error)
func (*LongSorter) GetDocComparator ¶
func (i *LongSorter) GetDocComparator(reader index.LeafReader, maxDoc int) (index.DocComparator, error)
func (*LongSorter) GetProviderName ¶
func (i *LongSorter) GetProviderName() string
type MergeContext ¶
type MergeContext interface { // NumDeletesToMerge Returns the number of deletes a merge would claim back if the given segment is merged. // Params: info – the segment to get the number of deletes for // See Also: numDeletesToMerge(SegmentCommitInfo, int, IOSupplier) NumDeletesToMerge(info index.SegmentCommitInfo) (int, error) // NumDeletedDocs Returns the number of deleted documents in the given segments. NumDeletedDocs(info index.SegmentCommitInfo) int // GetMergingSegments Returns an unmodifiable set of segments that are currently merging. GetMergingSegments() []index.SegmentCommitInfo }
MergeContext This interface represents the current context of the merge selection process. It allows to access real-time information like the currently merging segments or how many deletes a segment would claim back if merged. This context might be stateful and change during the execution of a merge policy's selection processes. lucene.experimental
type MergePolicy ¶
type MergePolicy interface { // FindMerges // Determine what set of merge operations are now necessary on the index. // IndexWriter calls this whenever there is a change to the segments. // This call is always synchronized on the IndexWriter instance so only one thread at a time will call this method. // mergeTrigger: the event that triggered the merge // segmentInfos: the total set of segments in the index // mergeContext: the IndexWriter to find the merges on FindMerges(mergeTrigger MergeTrigger, segmentInfos *SegmentInfos, mergeContext MergeContext) (*MergeSpecification, error) // FindForcedMerges Determine what set of merge operations is necessary in order to // merge to <= the specified segment count. // IndexWriter calls this when its IndexWriter.forceMerge method is called. // This call is always synchronized on the IndexWriter instance so only one // thread at a time will call this method. // // FindForcedMerges确定需要哪一组合并操作才能合并到<=指定的段计数。 // IndexWriter在调用其IndexWriter.forceMerge方法时调用此函数。 // 此调用始终在IndexWriter实例上同步,因此一次只有一个线程会调用此方法 // // Params: // segmentInfos – the total set of segments in the index // maxSegmentCount – requested maximum number of segments in the index (currently this is always 1) // segmentsToMerge – contains the specific SegmentInfo instances that must be merged away. // This may be a subset of all SegmentInfos. If the item is True for a given SegmentInfo, // that means this segment was an original segment present in the to-be-merged index; // else, it was a segment produced by a cascaded merge. // mergeContext – the MergeContext to find the merges on FindForcedMerges(segmentInfos *SegmentInfos, maxSegmentCount int, segmentsToMerge map[index.SegmentCommitInfo]bool, mergeContext MergeContext) (*MergeSpecification, error) // FindForcedDeletesMerges // Determine what set of merge operations is necessary in order to expunge all deletes from the index. // // 确定需要哪一组合并操作才能从索引中删除所有删除。 // // Params: // segmentInfos – the total set of segments in the index // mergeContext – the MergeContext to find the merges on FindForcedDeletesMerges(segmentInfos *SegmentInfos, mergeContext MergeContext) (*MergeSpecification, error) // FindFullFlushMerges // Identifies merges that we want to execute (synchronously) on commit. // By default, this will do no merging on commit. If you implement this method in your MergePolicy you // must also set a non-zero timeout using IndexWriterConfig.setMaxFullFlushMergeWaitMillis. // Any merges returned here will make IndexWriter.commit(), IndexWriter.prepareCommit() or // IndexWriter.getReader(boolean, boolean) block until the merges complete or until // IndexWriterConfig.getMaxFullFlushMergeWaitMillis() has elapsed. // This may be used to merge small segments that have just been flushed, reducing the number of // segments in the point in time snapshot. If a merge does not complete in the allotted time, // it will continue to execute, and eventually finish and apply to future point in time snapshot, // but will not be reflected in the current one. If a MergePolicy.OneMerge in the returned // MergePolicy.MergeSpecification includes a segment already included in a registered merge, // then IndexWriter.commit() or IndexWriter.prepareCommit() will throw a IllegalStateException. // Use MergePolicy.MergeContext.getMergingSegments() to determine which segments are currently // registered to merge. // // 标识我们要在提交时(同步)执行的合并。默认情况下,这将不会在提交时进行合并。 // 如果在MergePolicy中实现此方法,则还必须使用IndexWriterConfig.setMaxFullFlushMergeWaitMillis设置非零超时。 // 此处返回的任何合并都将导致IndexWriter.commit()、IndexWriter.prepareCommit() // 或IndexWriter.getReader(布尔值、布尔值)阻塞,直到合并完成或IndexWriter // Config.getMaxFullFlushMergeWaitMillis()结束。这可以用于合并刚刚刷新的小片段, // 从而减少时间点快照中的片段数量。如果合并没有在分配的时间内完成,它将继续执行, // 最终完成并应用于未来的时间点快照,但不会反映在当前快照中。如果返回的 // MergePolicy.MergeSpecification中的MergePolicy.OneMerge包含已包含在注册合并中的段,则IndexWriter.commit() // 或IndexWriter.prepareCommit()将引发IllegalStateException。 // 使用MergePolicy.MergeContext.getMergingSegments()确定当前要注册合并的段。 // // Params: // mergeTrigger – the event that triggered the merge (COMMIT or GET_READER). // segmentInfos – the total set of segments in the index (while preparing the commit) // mergeContext – the MergeContext to find the merges on, which should be used to // determine which segments are already in a registered merge // (see MergePolicy.MergeContext.getMergingSegments()). FindFullFlushMerges(mergeTrigger MergeTrigger, segmentInfos *SegmentInfos, mergeContext MergeContext) (*MergeSpecification, error) // UseCompoundFile // Returns true if a new segment (regardless of its origin) should use the compound file format. // The default implementation returns true iff the size of the given mergedInfo is less or equal // to getMaxCFSSegmentSizeMB() and the size is less or equal to the TotalIndexSize * getNoCFSRatio() // otherwise false. // // 如果新段(无论其来源如何)应使用复合文件格式,则返回true。 // 如果给定mergedInfo的大小小于或等于getMaxCFSSegmentSizeMB(), // 并且大小小于或相等于TotalIndexSize*getNoCFSRatio(),则默认实现返回true,否则为false。 UseCompoundFile(infos *SegmentInfos, mergedInfo index.SegmentCommitInfo, mergeContext MergeContext) (bool, error) KeepFullyDeletedSegment(func() index.CodecReader) bool MergePolicySPI }
MergePolicy Expert: a MergePolicy determines the sequence of primitive merge operations. Whenever the segments in an index have been altered by IndexWriter, either the addition of a newly flushed segment, addition of many segments from addIndexes* calls, or a previous merge that may now need to cascade, IndexWriter invokes findMerges to give the MergePolicy a chance to pick merges that are now required. This method returns a MergePolicy.MergeSpecification instance describing the set of merges that should be done, or null if no merges are necessary. When IndexWriter.forceMerge is called, it calls findForcedMerges(SegmentInfos, int, Map, MergePolicy.MergeContext) and the MergePolicy should then return the necessary merges.
Note that the policy can return more than one merge at a time. In this case, if the writer is using SerialMergeScheduler, the merges will be run sequentially but if it is using ConcurrentMergeScheduler they will be run concurrently.
The default MergePolicy is TieredMergePolicy.
lucene.experimental
type MergePolicyBase ¶
type MergePolicyBase struct { MergePolicySPI // contains filtered or unexported fields }
func NewMergePolicy ¶
func NewMergePolicy(spi MergePolicySPI) *MergePolicyBase
func (*MergePolicyBase) FindFullFlushMerges ¶
func (m *MergePolicyBase) FindFullFlushMerges(mergeTrigger MergeTrigger, segmentInfos *SegmentInfos, mergeContext MergeContext) (*MergeSpecification, error)
func (*MergePolicyBase) KeepFullyDeletedSegment ¶
func (m *MergePolicyBase) KeepFullyDeletedSegment(func() index.CodecReader) bool
func (*MergePolicyBase) UseCompoundFile ¶
func (m *MergePolicyBase) UseCompoundFile(infos *SegmentInfos, mergedInfo index.SegmentCommitInfo, mergeContext MergeContext) (bool, error)
type MergePolicySPI ¶
type MergePolicySPI interface { Size(info index.SegmentCommitInfo, mergeContext MergeContext) (int64, error) GetNoCFSRatio() float64 }
type MergeReader ¶
type MergeReader struct {
// contains filtered or unexported fields
}
func NewMergeReader ¶
func NewMergeReader(reader *SegmentReader, hardLiveDocs util.Bits) *MergeReader
type MergeScheduler ¶
type MergeScheduler interface { io.Closer // Merge Run the merges provided by MergeScheduler.MergeSource.getNextMerge(). // Params: // mergeSource – the IndexWriter to obtain the merges from. // trigger – the MergeTrigger that caused this merge to happen Merge(mergeSource MergeSource, trigger MergeTrigger) error // Initialize IndexWriter calls this on init. Initialize(dir store.Directory) }
MergeScheduler Expert: IndexWriter uses an instance implementing this interface to execute the merges selected by a MergePolicy. The default MergeScheduler is ConcurrentMergeScheduler. lucene.experimental
type MergeSource ¶
type MergeSource interface { // GetNextMerge // The MergeScheduler calls this method to retrieve the next merge requested by the MergePolicy GetNextMerge() (*OneMerge, error) // OnMergeFinished // Does finishing for a merge. OnMergeFinished(merge *OneMerge) error // HasPendingMerges // Expert: returns true if there are merges waiting to be scheduled. HasPendingMerges() bool // Merge // merges the indicated segments, replacing them in the stack with a single segment. Merge(merge *OneMerge) error }
type MergeSpecification ¶
type MergeSpecification struct {
// contains filtered or unexported fields
}
A MergeSpecification instance provides the information necessary to perform multiple merges. It simply contains a list of MergePolicy.OneMerge instances.
func NewMergeSpecification ¶
func NewMergeSpecification() *MergeSpecification
NewMergeSpecification Sole constructor. Use add(MergePolicy.OneMerge) to add merges.
func (*MergeSpecification) Add ¶
func (m *MergeSpecification) Add(merge *OneMerge)
type MergeState ¶
type MergeState struct { // Maps document IDs from old segments to document IDs in the new segment DocMaps []MergeStateDocMap // SegmentInfo of the newly merged segment. SegmentInfo index.SegmentInfo // FieldInfos of the newly merged segment. MergeFieldInfos index.FieldInfos // Stored field producers being merged StoredFieldsReaders []index.StoredFieldsReader // Term vector producers being merged TermVectorsReaders []index.TermVectorsReader // Norms producers being merged NormsProducers []index.NormsProducer // DocValues producers being merged DocValuesProducers []index.DocValuesProducer // FieldInfos being merged FieldInfos []index.FieldInfos // Live docs for each reader LiveDocs []util.Bits // Postings to merge FieldsProducers []index.FieldsProducer // Point readers to merge PointsReaders []index.PointsReader // Max docs per reader MaxDocs []int // Indicates if the index needs to be sorted NeedsIndexSort bool }
MergeState Holds common state used during segment merging.
func NewMergeState ¶
func NewMergeState(readers []index.CodecReader, segmentInfo *SegmentInfo) (*MergeState, error)
type MergeStateDocMap ¶
func SortCodecReader ¶
func SortCodecReader(sort index.Sort, readers []index.CodecReader) ([]MergeStateDocMap, error)
SortCodecReader Does a merge sort of the leaves of the incoming reader, returning MergeState.DocMap to map each leaf's documents into the merged segment. The documents for each incoming leaf reader must already be sorted by the same sort! Returns null if the merge sort is not needed (segments are already in index sort order).
type MergeTrigger ¶
type MergeTrigger int
type MultiLevelSkipListReader ¶
type MultiLevelSkipListReader interface { // ReadSkipData Subclasses must implement the actual skip data encoding in this method. // level: the level skip data shall be read from // skipStream: the skip stream to read from ReadSkipData(level int, skipStream store.IndexInput) (int64, error) }
MultiLevelSkipListReader This interface reads skip lists with multiple levels. See MultiLevelSkipListWriter for the information about the encoding of the multi level skip lists. Subclasses must implement the abstract method readSkipData(int, IndexInput) which defines the actual format of the skip data.
type MultiLevelSkipListReaderContext ¶
type MultiLevelSkipListReaderContext struct {
// contains filtered or unexported fields
}
func NewMultiLevelSkipListReaderContext ¶
func NewMultiLevelSkipListReaderContext(skipStream store.IndexInput, maxSkipLevels, skipInterval, skipMultiplier int) *MultiLevelSkipListReaderContext
func (*MultiLevelSkipListReaderContext) Close ¶
func (m *MultiLevelSkipListReaderContext) Close() error
func (*MultiLevelSkipListReaderContext) GetDoc ¶
func (m *MultiLevelSkipListReaderContext) GetDoc() int
func (*MultiLevelSkipListReaderContext) GetSkipDoc ¶
func (m *MultiLevelSkipListReaderContext) GetSkipDoc(idx int) int
func (*MultiLevelSkipListReaderContext) Init ¶
func (m *MultiLevelSkipListReaderContext) Init(ctx context.Context, skipPointer int64, df int, spi MultiLevelSkipListReaderSPI) error
func (*MultiLevelSkipListReaderContext) MaxNumberOfSkipLevels ¶
func (m *MultiLevelSkipListReaderContext) MaxNumberOfSkipLevels() int
func (*MultiLevelSkipListReaderContext) SkipToWithSPI ¶
func (m *MultiLevelSkipListReaderContext) SkipToWithSPI(ctx context.Context, target int, spi MultiLevelSkipListReaderSPI) (int, error)
type MultiLevelSkipListReaderSPI ¶
type MultiLevelSkipListReaderSPI interface { // ReadSkipData // Subclasses must implement the actual skip data encoding in this method. ReadSkipData(ctx context.Context, level int, skipStream store.IndexInput, mtx *MultiLevelSkipListReaderContext) (int64, error) // ReadLevelLength // read the length of the current level written via MultiLevelSkipListWriter. writeLevelLength(long, IndexOutput). ReadLevelLength(ctx context.Context, skipStream store.IndexInput, mtx *MultiLevelSkipListReaderContext) (int64, error) // ReadChildPointer // read the child pointer written via MultiLevelSkipListWriter. writeChildPointer(long, DataOutput). ReadChildPointer(ctx context.Context, skipStream store.IndexInput, mtx *MultiLevelSkipListReaderContext) (int64, error) }
type MultiLevelSkipListWriter ¶
type MultiLevelSkipListWriter interface { // WriteSkipData // Subclasses must implement the actual skip data encoding in this method. // Params: level – the level skip data shall be writing for // skipBuffer – the skip buffer to write to WriteSkipData(level int, skipBuffer store.IndexOutput) error // Init Allocates internal skip buffers. Init() // ResetSkip // Creates new buffers or empties the existing ones ResetSkip() // BufferSkip // Writes the current skip data to the buffers. The current document frequency // determines the max level is skip data is to be written to. // Params: df – the current document frequency // Throws: IOException – If an I/O error occurs BufferSkip(df int) error // WriteSkip // Writes the buffered skip lists to the given output. // Params: output – the IndexOutput the skip lists shall be written to // Returns: the pointer the skip list starts WriteSkip(output store.IndexOutput) (int64, error) // WriteLevelLength // Writes the length of a level to the given output. // Params: levelLength – the length of a level // output – the IndexOutput the length shall be written to WriteLevelLength(levelLength int64, output store.IndexOutput) error // WriteChildPointer // Writes the child pointer of a block to the given output. // Params: childPointer – block of higher level point to the lower level // skipBuffer – the skip buffer to write to WriteChildPointer(childPointer int64, skipBuffer store.DataOutput) error }
MultiLevelSkipListWriter This abstract class writes skip lists with multiple levels.
Example for skipInterval = 3: c (skip level 2) c c c (skip level 1) x x x x x x x x x x (skip level 0) d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d (posting list) 3 6 9 12 15 18 21 24 27 30 (df) d - document x - skip data c - skip data with child pointer Skip level i contains every skipInterval-th entry from skip level i-1. Therefore the number of entries on level i is: floor(df / ((skipInterval ^ (i + 1))). Each skip entry on a level i>0 contains a pointer to the corresponding skip entry in list i-1. This guarantees a logarithmic amount of skips to find the target document. While this class takes care of writing the different skip levels, subclasses must define the actual format of the skip data.
type MultiLevelSkipListWriterContext ¶
type MultiLevelSkipListWriterContext struct { NumberOfSkipLevels int SkipInterval int SkipMultiplier int SkipBuffer []*store.BufferOutput }
func NewMultiLevelSkipListWriterContext ¶
func NewMultiLevelSkipListWriterContext(skipInterval, skipMultiplier, maxSkipLevels, df int) *MultiLevelSkipListWriterContext
func (*MultiLevelSkipListWriterContext) BufferSkip ¶
func (m *MultiLevelSkipListWriterContext) BufferSkip(ctx context.Context, df int, spi MultiLevelSkipListWriterSPI) error
func (*MultiLevelSkipListWriterContext) ResetSkip ¶
func (m *MultiLevelSkipListWriterContext) ResetSkip()
type MultiLevelSkipListWriterSPI ¶
type MultiLevelSkipListWriterSPI interface { ResetSkip(mwc *MultiLevelSkipListWriterContext) error // WriteSkipData // Subclasses must implement the actual skip data encoding in this method. // level: the level skip data shall be writing for // skipBuffer: the skip buffer to write to WriteSkipData(ctx context.Context, level int, skipBuffer store.IndexOutput, mwc *MultiLevelSkipListWriterContext) error // WriteSkip // Writes the buffered skip lists to the given output. // output: the IndexOutput the skip lists shall be written to // Returns: the pointer the skip list starts WriteSkip(ctx context.Context, output store.IndexOutput, mwc *MultiLevelSkipListWriterContext) (int64, error) // WriteLevelLength // Writes the length of a level to the given output. // levelLength: the length of a level // output: the IndexOutput the length shall be written to WriteLevelLength(ctx context.Context, levelLength int64, output store.IndexOutput) error // WriteChildPointer // Writes the child pointer of a block to the given output. // childPointer: block of higher level point to the lower level // skipBuffer: the skip buffer to write to WriteChildPointer(ctx context.Context, childPointer int64, skipBuffer store.DataOutput) error }
type NoMergePolicy ¶
type NoMergePolicy struct {
*MergePolicyBase
}
func NewNoMergePolicy ¶
func NewNoMergePolicy() *NoMergePolicy
func (*NoMergePolicy) FindForcedDeletesMerges ¶
func (n *NoMergePolicy) FindForcedDeletesMerges(segmentInfos *SegmentInfos, mergeContext MergeContext) (*MergeSpecification, error)
func (*NoMergePolicy) FindForcedMerges ¶
func (n *NoMergePolicy) FindForcedMerges(segmentInfos *SegmentInfos, maxSegmentCount int, segmentsToMerge map[index.SegmentCommitInfo]bool, mergeContext MergeContext) (*MergeSpecification, error)
func (*NoMergePolicy) FindFullFlushMerges ¶
func (n *NoMergePolicy) FindFullFlushMerges(mergeTrigger MergeTrigger, segmentInfos *SegmentInfos, mergeContext MergeContext) (*MergeSpecification, error)
func (*NoMergePolicy) FindMerges ¶
func (n *NoMergePolicy) FindMerges(mergeTrigger MergeTrigger, segmentInfos *SegmentInfos, mergeContext MergeContext) (*MergeSpecification, error)
func (*NoMergePolicy) GetNoCFSRatio ¶
func (n *NoMergePolicy) GetNoCFSRatio() float64
func (*NoMergePolicy) Size ¶
func (n *NoMergePolicy) Size(info index.SegmentCommitInfo, mergeContext MergeContext) (int64, error)
func (*NoMergePolicy) UseCompoundFile ¶
func (n *NoMergePolicy) UseCompoundFile(infos *SegmentInfos, newSegment index.SegmentCommitInfo, mergeContext MergeContext) (bool, error)
type NoMergeScheduler ¶
type NoMergeScheduler struct { }
func NewNoMergeScheduler ¶
func NewNoMergeScheduler() *NoMergeScheduler
func (*NoMergeScheduler) Close ¶
func (n *NoMergeScheduler) Close() error
func (*NoMergeScheduler) Initialize ¶
func (n *NoMergeScheduler) Initialize(dir store.Directory)
func (*NoMergeScheduler) Merge ¶
func (n *NoMergeScheduler) Merge(mergeSource MergeSource, trigger MergeTrigger) error
type NodeApply ¶
type NodeApply interface { Apply(bufferedDeletes *index.BufferedUpdates, docIDUpto int) error IsDelete() bool }
type NormValuesWriter ¶
type NormValuesWriter struct {
// contains filtered or unexported fields
}
NormValuesWriter Buffers up pending long per doc, then flushes when segment flushes.
func NewNormValuesWriter ¶
func NewNormValuesWriter(fieldInfo *document.FieldInfo) *NormValuesWriter
func (*NormValuesWriter) AddValue ¶
func (n *NormValuesWriter) AddValue(docID int, value int64) error
func (*NormValuesWriter) Finish ¶
func (n *NormValuesWriter) Finish(maxDoc int)
func (*NormValuesWriter) Flush ¶
func (n *NormValuesWriter) Flush(ctx context.Context, state *index.SegmentWriteState, sortMap index.DocMap, normsConsumer index.NormsConsumer) error
type NormsConsumer ¶
type NormsConsumer interface { io.Closer // AddNormsField // Writes normalization values for a field. // field: field information // normsProducer: NormsProducer of the numeric norm values // Throws: IOException – if an I/O error occurred. AddNormsField(ctx context.Context, field *document.FieldInfo, normsProducer index.NormsProducer) error // Merge // Merges in the fields from the readers in mergeState. // The default implementation calls mergeNormsField for each field, // filling segments with missing norms for the field with zeros. // Implementations can override this method for more sophisticated merging // (bulk-byte copying, etc). Merge(ctx context.Context, mergeState *index.MergeState) error // MergeNormsField // Merges the norms from toMerge. // The default implementation calls FnAddNormsField, passing an Iterable // that merges and filters deleted documents on the fly. MergeNormsField(ctx context.Context, mergeFieldInfo *document.FieldInfo, mergeState *index.MergeState) error }
NormsConsumer Abstract API that consumes normalization values. Concrete implementations of this actually do "something" with the norms (write it into the index in a specific format). The lifecycle is: NormsConsumer is created by NormsFormat.normsConsumer(SegmentWriteState). FnAddNormsField is called for each field with normalization values. The API is a "pull" rather than "push", and the implementation is free to iterate over the values multiple times (Iterable.iterator()). After all fields are added, the consumer is closed.
type NormsConsumerDefault ¶
type NormsConsumerDefault struct {
FnAddNormsField func(ctx context.Context, field *document.FieldInfo, normsProducer index.NormsProducer) error
}
func (*NormsConsumerDefault) Merge ¶
func (n *NormsConsumerDefault) Merge(ctx context.Context, mergeState *index.MergeState) error
func (*NormsConsumerDefault) MergeNormsField ¶
func (n *NormsConsumerDefault) MergeNormsField(ctx context.Context, mergeFieldInfo *document.FieldInfo, mergeState *index.MergeState) error
type NumericDVs ¶
type NumericDVs struct {
// contains filtered or unexported fields
}
func NewNumericDVs ¶
func NewNumericDVs(values []int64, docsWithField *bitset.BitSet) *NumericDVs
func SortDocValues ¶
func SortDocValues(maxDoc int, sortMap index.DocMap, oldDocValues index.NumericDocValues) *NumericDVs
type NumericDocValuesDefault ¶
type NumericDocValuesDefault struct { FnDocID func() int FnNextDoc func(ctx context.Context) (int, error) FnAdvance func(ctx context.Context, target int) (int, error) FnSlowAdvance func(ctx context.Context, target int) (int, error) FnCost func() int64 FnAdvanceExact func(target int) (bool, error) FnLongValue func() (int64, error) }
func (*NumericDocValuesDefault) AdvanceExact ¶
func (n *NumericDocValuesDefault) AdvanceExact(target int) (bool, error)
func (*NumericDocValuesDefault) Cost ¶
func (n *NumericDocValuesDefault) Cost() int64
func (*NumericDocValuesDefault) DocID ¶
func (n *NumericDocValuesDefault) DocID() int
func (*NumericDocValuesDefault) LongValue ¶
func (n *NumericDocValuesDefault) LongValue() (int64, error)
func (*NumericDocValuesDefault) NextDoc ¶
func (n *NumericDocValuesDefault) NextDoc(ctx context.Context) (int, error)
func (*NumericDocValuesDefault) SlowAdvance ¶
type NumericDocValuesProvider ¶
type NumericDocValuesProvider interface {
Get(reader index.LeafReader) (index.NumericDocValues, error)
}
NumericDocValuesProvider Provide a NumericDocValues instance for a LeafReader
type NumericDocValuesSub ¶
type NumericDocValuesSub struct {
// contains filtered or unexported fields
}
NumericDocValuesSub Tracks state of one numeric sub-reader that we are merging
type NumericDocValuesWriter ¶
type NumericDocValuesWriter struct {
// contains filtered or unexported fields
}
func NewNumericDocValuesWriter ¶
func NewNumericDocValuesWriter(fieldInfo *document.FieldInfo) *NumericDocValuesWriter
func (*NumericDocValuesWriter) AddValue ¶
func (n *NumericDocValuesWriter) AddValue(docID int, value int64) error
func (*NumericDocValuesWriter) Flush ¶
func (n *NumericDocValuesWriter) Flush(state *index.SegmentWriteState, sortMap index.DocMap, consumer index.DocValuesConsumer) error
func (*NumericDocValuesWriter) GetDocValues ¶
func (n *NumericDocValuesWriter) GetDocValues() types.DocIdSetIterator
type OneMerge ¶
type OneMerge struct {
// contains filtered or unexported fields
}
OneMerge provides the information necessary to perform an individual primitive merge operation, resulting in a single new segment. The merge spec includes the subset of segments to be merged as well as whether the new segment should use the compound file format. OneMerge提供了执行单个基元合并操作所需的信息,从而产生单个新段。 合并规范包括要合并的线段的子集,以及新线段是否应使用复合文件格式。 lucene.experimental
type OneMergeProgress ¶
type OneMergeProgress struct {
// contains filtered or unexported fields
}
OneMergeProgress Progress and state for an executing merge. This class encapsulates the logic to pause and resume the merge thread or to abort the merge entirely. lucene.experimental
type OrdTermState ¶
type OrdTermState struct {
Ord int64
}
func NewOrdTermState ¶
func NewOrdTermState() *OrdTermState
func (*OrdTermState) CopyFrom ¶
func (r *OrdTermState) CopyFrom(other index.TermState)
type OrdinalMap ¶
type OrdinalMap struct {
// contains filtered or unexported fields
}
OrdinalMap Maps per-segment ordinals to/from global ordinal space, using a compact packed-ints representation. NOTE: this is a costly operation, as it must merge sort all terms, and may require non-trivial RAM once done. It's better to operate in segment-private ordinal space instead when possible. lucene.internal
func NewOrdinalMap ¶
func NewOrdinalMap(subs []index.TermsEnum, segmentMap *SegmentMap, acceptableOverheadRatio float64) (*OrdinalMap, error)
type PagedBytes ¶
type PagedBytes struct {
// contains filtered or unexported fields
}
PagedBytes Represents a logical byte[] as a series of pages. You can write-once into the logical byte[] (append only), using copy, and then retrieve slices (BytesRef) into it using fill. lucene.internal TODO: refactor this, byteblockpool, fst.bytestore, and any other "shift/mask big arrays". there are too many of these classes!
func NewPagedBytes ¶
func NewPagedBytes(blockBits int) *PagedBytes
func (*PagedBytes) CloneWithoutBlocks ¶
func (r *PagedBytes) CloneWithoutBlocks() *PagedBytes
func (*PagedBytes) CopyV1 ¶
func (r *PagedBytes) CopyV1(in store.IndexInput, byteCount int) error
CopyV1 Read this many bytes from in
func (*PagedBytes) CopyV2 ¶
func (r *PagedBytes) CopyV2(bytes []byte, out *bytes.Buffer) error
CopyV2 Copy BytesRef in, setting BytesRef out to the result. Do not use this if you will use freeze(true). This only supports bytes.length <= blockSize
func (*PagedBytes) Freeze ¶
func (r *PagedBytes) Freeze(trim bool) (*PagedBytesReader, error)
Freeze Commits final byte[], trimming it if necessary and if trim=true
func (*PagedBytes) GetDataInput ¶
func (r *PagedBytes) GetDataInput() *PagedBytesDataInput
func (*PagedBytes) GetDataOutput ¶
func (r *PagedBytes) GetDataOutput() *PagedBytesDataOutput
func (*PagedBytes) GetPointer ¶
func (r *PagedBytes) GetPointer() int64
type PagedBytesDataInput ¶
type PagedBytesDataInput struct { *store.BaseDataInput *PagedBytes // contains filtered or unexported fields }
func NewPagedBytesDataInput ¶
func NewPagedBytesDataInput(pageBytes *PagedBytes) *PagedBytesDataInput
func (*PagedBytesDataInput) Clone ¶
func (r *PagedBytesDataInput) Clone() store.CloneReader
func (*PagedBytesDataInput) ReadByte ¶
func (r *PagedBytesDataInput) ReadByte() (byte, error)
type PagedBytesDataOutput ¶
type PagedBytesDataOutput struct { *store.BaseDataOutput *PagedBytes }
func (*PagedBytesDataOutput) GetPosition ¶
func (r *PagedBytesDataOutput) GetPosition() int64
GetPosition Return the current byte position.
func (*PagedBytesDataOutput) Write ¶
func (r *PagedBytesDataOutput) Write(bs []byte) (n int, err error)
func (*PagedBytesDataOutput) WriteByte ¶
func (r *PagedBytesDataOutput) WriteByte(b byte) error
type PagedBytesReader ¶
type PagedBytesReader struct {
// contains filtered or unexported fields
}
PagedBytesReader Provides methods to read BytesRefs from a frozen PagedBytes.
func NewPagedBytesReader ¶
func NewPagedBytesReader(pagedBytes *PagedBytes) *PagedBytesReader
func (*PagedBytesReader) FillSlice ¶
func (p *PagedBytesReader) FillSlice(b *bytes.Buffer, start, length int)
FillSlice Gets a slice out of PagedBytes starting at start with a given length. Iff the slice spans across a block border this method will allocate sufficient resources and copy the paged data. Slices spanning more than two blocks are not supported. lucene.internal
func (*PagedBytesReader) GetByte ¶
func (p *PagedBytesReader) GetByte(o int64) byte
type ParallelPostingsArray ¶
type PendingDeletes ¶
type PendingDeletes interface { GetMutableBits() *bitset.BitSet // Delete // Marks a document as deleted in this segment and return true if a document got actually deleted or // if the document was already deleted. Delete(docID int) (bool, error) // GetLiveDocs // Returns a snapshot of the current live docs. GetLiveDocs() util.Bits // GetHardLiveDocs // Returns a snapshot of the hard live docs. GetHardLiveDocs() util.Bits // NumPendingDeletes // Returns the number of pending deletes that are not written to disk. NumPendingDeletes() int // OnNewReader // Called once a new reader is opened for this segment ie. when deletes or updates are applied. OnNewReader(reader index.CodecReader, info index.SegmentCommitInfo) error // DropChanges // Resets the pending docs DropChanges() // WriteLiveDocs // Writes the live docs to disk and returns true if any new docs were written. WriteLiveDocs(ctx context.Context, dir store.Directory) (bool, error) // IsFullyDeleted // Returns true iff the segment represented by this PendingDeletes is fully deleted IsFullyDeleted(ctx context.Context, readerIOSupplier func() index.CodecReader) (bool, error) // OnDocValuesUpdate // Called for every field update for the given field at flush time // info: the field info of the field that's updated // iterator: the values to apply OnDocValuesUpdate(info *document.FieldInfo, iterator DocValuesFieldUpdatesIterator) // NeedsRefresh // Returns true if the given reader needs to be refreshed in order to see the latest deletes NeedsRefresh(reader index.CodecReader) bool // GetDelCount // Returns the number of deleted docs in the segment. GetDelCount() int // NumDocs // Returns the number of live documents in this segment NumDocs() (int, error) // MustInitOnDelete // Returns true if we have to initialize this PendingDeletes before delete(int); // otherwise this PendingDeletes is ready to accept deletes. A PendingDeletes can // be initialized by providing it a reader via onNewReader(CodecReader, SegmentCommitInfo). MustInitOnDelete() bool }
func NewPendingDeletes ¶
func NewPendingDeletes(reader *SegmentReader, info index.SegmentCommitInfo) PendingDeletes
func NewPendingDeletesV1 ¶
func NewPendingDeletesV1(info index.SegmentCommitInfo) PendingDeletes
func NewPendingDeletesV2 ¶
func NewPendingDeletesV2(info index.SegmentCommitInfo, liveDocs util.Bits, liveDocsInitialized bool) PendingDeletes
type PendingSoftDeletes ¶
type PendingSoftDeletes struct {
// contains filtered or unexported fields
}
func NewPendingSoftDeletes ¶
func NewPendingSoftDeletes(field string, info index.SegmentCommitInfo) *PendingSoftDeletes
func NewPendingSoftDeletesV1 ¶
func NewPendingSoftDeletesV1(field string, reader *SegmentReader, info index.SegmentCommitInfo) *PendingSoftDeletes
func (*PendingSoftDeletes) DropChanges ¶
func (p *PendingSoftDeletes) DropChanges()
func (PendingSoftDeletes) GetDelCount ¶
func (p PendingSoftDeletes) GetDelCount() int
func (*PendingSoftDeletes) GetHardLiveDocs ¶
func (p *PendingSoftDeletes) GetHardLiveDocs() util.Bits
func (PendingSoftDeletes) GetLiveDocs ¶
func (PendingSoftDeletes) GetMutableBits ¶
func (*PendingSoftDeletes) IsFullyDeleted ¶
func (p *PendingSoftDeletes) IsFullyDeleted(ctx context.Context, readerIOSupplier func() index.CodecReader) (bool, error)
func (*PendingSoftDeletes) MustInitOnDelete ¶
func (p *PendingSoftDeletes) MustInitOnDelete() bool
func (PendingSoftDeletes) NeedsRefresh ¶
func (p PendingSoftDeletes) NeedsRefresh(reader index.CodecReader) bool
func (*PendingSoftDeletes) NumPendingDeletes ¶
func (p *PendingSoftDeletes) NumPendingDeletes() int
func (*PendingSoftDeletes) OnDocValuesUpdate ¶
func (p *PendingSoftDeletes) OnDocValuesUpdate(info *document.FieldInfo, iterator DocValuesFieldUpdatesIterator)
func (*PendingSoftDeletes) OnNewReader ¶
func (p *PendingSoftDeletes) OnNewReader(reader index.CodecReader, info index.SegmentCommitInfo) error
func (*PendingSoftDeletes) WriteLiveDocs ¶
type PerField ¶
type PerField struct {
// contains filtered or unexported fields
}
PerField NOTE: not static: accesses at least docState, termsHash.
type PointValuesWriter ¶
type PointValuesWriter struct {
// contains filtered or unexported fields
}
PointValuesWriter Buffers up pending byte[][] item(s) per doc, then flushes when segment flushes.
func NewPointValuesWriter ¶
func NewPointValuesWriter(fieldInfo *document.FieldInfo) *PointValuesWriter
func (*PointValuesWriter) AddPackedValue ¶
func (p *PointValuesWriter) AddPackedValue(docID int, value []byte) error
AddPackedValue TODO: if exactly the same item is added to exactly the same doc, should we dedup?
func (*PointValuesWriter) Flush ¶
func (p *PointValuesWriter) Flush(ctx context.Context, state *index.SegmentWriteState, docMap index.DocMap, writer index.PointsWriter) error
type PositionData ¶
func NewPositionData ¶
func NewPositionData(pos int, payload []byte) *PositionData
type PostingsBytesStartArray ¶
type PostingsBytesStartArray struct {
// contains filtered or unexported fields
}
func NewPostingsBytesStartArray ¶
func NewPostingsBytesStartArray(perField TermsHashPerField) *PostingsBytesStartArray
func (*PostingsBytesStartArray) Clear ¶
func (p *PostingsBytesStartArray) Clear() []uint32
func (*PostingsBytesStartArray) Grow ¶
func (p *PostingsBytesStartArray) Grow() []uint32
func (*PostingsBytesStartArray) Init ¶
func (p *PostingsBytesStartArray) Init() []uint32
type PrefixCodedTerms ¶
type PrefixCodedTerms struct {
// contains filtered or unexported fields
}
PrefixCodedTerms Prefix codes term instances (prefixes are shared). This is expected to be faster to build than a FST and might also be more compact if there are no common suffixes. lucene.internal
func (*PrefixCodedTerms) Iterator ¶
func (t *PrefixCodedTerms) Iterator() interface{}
Iterator TODO: fix it
func (*PrefixCodedTerms) Size ¶
func (t *PrefixCodedTerms) Size() int
type ReaderCommit ¶
type ReaderCommit struct {
// contains filtered or unexported fields
}
func NewReaderCommit ¶
func NewReaderCommit(reader *StandardDirectoryReader, infos *SegmentInfos, dir store.Directory) (*ReaderCommit, error)
func (*ReaderCommit) CompareTo ¶
func (r *ReaderCommit) CompareTo(commit index.IndexCommit) int
func (*ReaderCommit) Delete ¶
func (r *ReaderCommit) Delete() error
func (*ReaderCommit) GetDirectory ¶
func (r *ReaderCommit) GetDirectory() store.Directory
func (*ReaderCommit) GetFileNames ¶
func (r *ReaderCommit) GetFileNames() (map[string]struct{}, error)
func (*ReaderCommit) GetGeneration ¶
func (r *ReaderCommit) GetGeneration() int64
func (*ReaderCommit) GetReader ¶
func (r *ReaderCommit) GetReader() index.DirectoryReader
func (*ReaderCommit) GetSegmentCount ¶
func (r *ReaderCommit) GetSegmentCount() int
func (*ReaderCommit) GetSegmentsFileName ¶
func (r *ReaderCommit) GetSegmentsFileName() string
func (*ReaderCommit) GetUserData ¶
func (r *ReaderCommit) GetUserData() (map[string]string, error)
func (*ReaderCommit) IsDeleted ¶
func (r *ReaderCommit) IsDeleted() bool
type ReaderPool ¶
type ReaderPool struct {
// contains filtered or unexported fields
}
ReaderPool Holds shared SegmentReader instances. IndexWriter uses SegmentReaders for 1) applying deletes/DV updates, 2) doing merges, 3) handing out a real-time reader. This pool reuses instances of the SegmentReaders in all these places if it is in "near real-time mode" (getReader() has been called on this instance).
func NewReaderPool ¶
func NewReaderPool(directory, originalDirectory store.Directory, segmentInfos *SegmentInfos, fieldNumbers *FieldNumbers, completedDelGenSupplier func() int64, softDeletesField string, reader *StandardDirectoryReader) (*ReaderPool, error)
func (*ReaderPool) Get ¶
func (p *ReaderPool) Get(info index.SegmentCommitInfo, create bool) (*ReadersAndUpdates, error)
Get Obtain a ReadersAndLiveDocs instance from the readerPool. If create is true, you must later call release(ReadersAndUpdates, boolean).
type ReaderSorter ¶
type ReaderSorter struct { Readers []index.IndexReader FnCompare func(a, b index.LeafReader) int }
func (*ReaderSorter) Len ¶
func (r *ReaderSorter) Len() int
func (*ReaderSorter) Less ¶
func (r *ReaderSorter) Less(i, j int) bool
func (*ReaderSorter) Swap ¶
func (r *ReaderSorter) Swap(i, j int)
type ReaderWarmer ¶
type ReaderWarmer interface {
Warm(reader index.LeafReader) error
}
ReaderWarmer If DirectoryReader.open(IndexWriter) has been called (ie, this writer is in near real-time mode), then after a merge completes, this class can be invoked to warm the reader on the newly merged segment, before the merge commits. This is not required for near real-time search, but will reduce search latency on opening a new near real-time reader after a merge completes.
lucene.experimental
NOTE: Warm(LeafReader) is called before any deletes have been carried over to the merged segment.
type ReadersAndUpdates ¶
type ReadersAndUpdates struct {
// contains filtered or unexported fields
}
ReadersAndUpdates Used by IndexWriter to hold open SegmentReaders (for searching or merging), plus pending deletes and updates, for a given segment
func NewReadersAndUpdates ¶
func NewReadersAndUpdates(indexCreatedVersionMajor int, info index.SegmentCommitInfo, pendingDeletes PendingDeletes) *ReadersAndUpdates
func (*ReadersAndUpdates) AddDVUpdate ¶
func (r *ReadersAndUpdates) AddDVUpdate(update DocValuesFieldUpdates) error
AddDVUpdate Adds a new resolved (meaning it maps docIDs to new values) doc values packet. We buffer these in RAM and write to disk when too much RAM is used or when a merge needs to kick off, or a commit/refresh.
func (*ReadersAndUpdates) DecRef ¶
func (r *ReadersAndUpdates) DecRef()
func (*ReadersAndUpdates) GetDelCount ¶
func (r *ReadersAndUpdates) GetDelCount() int
func (*ReadersAndUpdates) GetNumDVUpdates ¶
func (r *ReadersAndUpdates) GetNumDVUpdates() int
func (*ReadersAndUpdates) GetReader ¶
func (r *ReadersAndUpdates) GetReader(ctx context.Context, ioContext *store.IOContext) (*SegmentReader, error)
func (*ReadersAndUpdates) IncRef ¶
func (r *ReadersAndUpdates) IncRef()
func (*ReadersAndUpdates) IsFullyDeleted ¶
func (r *ReadersAndUpdates) IsFullyDeleted() (bool, error)
func (*ReadersAndUpdates) RefCount ¶
func (r *ReadersAndUpdates) RefCount() int64
func (*ReadersAndUpdates) Release ¶
func (r *ReadersAndUpdates) Release(sr *SegmentReader) error
type RefCount ¶
type RefCount struct {
// contains filtered or unexported fields
}
func NewRefCount ¶
type SegmentCoreReaders ¶
type SegmentCoreReaders struct {
// contains filtered or unexported fields
}
SegmentCoreReaders Holds core readers that are shared (unchanged) when SegmentReader is cloned or reopened
func NewSegmentCoreReaders ¶
func NewSegmentCoreReaders(ctx context.Context, dir store.Directory, si index.SegmentCommitInfo, ioContext *store.IOContext) (*SegmentCoreReaders, error)
type SegmentDocValues ¶
type SegmentDocValues struct {
// contains filtered or unexported fields
}
SegmentDocValues Manages the DocValuesProducer held by SegmentReader and keeps track of their reference counting.
func NewSegmentDocValues ¶
func NewSegmentDocValues() *SegmentDocValues
func (*SegmentDocValues) GetDocValuesProducer ¶
func (s *SegmentDocValues) GetDocValuesProducer(gen int64, si index.SegmentCommitInfo, dir store.Directory, infos index.FieldInfos) (index.DocValuesProducer, error)
type SegmentDocValuesProducer ¶
type SegmentDocValuesProducer struct {
// contains filtered or unexported fields
}
func NewSegmentDocValuesProducer ¶
func NewSegmentDocValuesProducer(si index.SegmentCommitInfo, dir store.Directory, coreInfos, allInfos index.FieldInfos, segDocValues *SegmentDocValues) (*SegmentDocValuesProducer, error)
func (*SegmentDocValuesProducer) CheckIntegrity ¶
func (s *SegmentDocValuesProducer) CheckIntegrity() error
func (*SegmentDocValuesProducer) Close ¶
func (s *SegmentDocValuesProducer) Close() error
func (*SegmentDocValuesProducer) GetBinary ¶
func (s *SegmentDocValuesProducer) GetBinary(ctx context.Context, field *document.FieldInfo) (index.BinaryDocValues, error)
func (*SegmentDocValuesProducer) GetMergeInstance ¶
func (s *SegmentDocValuesProducer) GetMergeInstance() index.DocValuesProducer
func (*SegmentDocValuesProducer) GetNumeric ¶
func (s *SegmentDocValuesProducer) GetNumeric(ctx context.Context, field *document.FieldInfo) (index.NumericDocValues, error)
func (*SegmentDocValuesProducer) GetSorted ¶
func (s *SegmentDocValuesProducer) GetSorted(ctx context.Context, fieldInfo *document.FieldInfo) (index.SortedDocValues, error)
func (*SegmentDocValuesProducer) GetSortedNumeric ¶
func (s *SegmentDocValuesProducer) GetSortedNumeric(ctx context.Context, field *document.FieldInfo) (index.SortedNumericDocValues, error)
func (*SegmentDocValuesProducer) GetSortedSet ¶
func (s *SegmentDocValuesProducer) GetSortedSet(ctx context.Context, field *document.FieldInfo) (index.SortedSetDocValues, error)
type SegmentInfo ¶
type SegmentInfo struct {
// contains filtered or unexported fields
}
SegmentInfo Information about a segment such as its name, directory, and files related to the segment.
func NewSegmentInfo ¶
func (*SegmentInfo) AddFile ¶
func (s *SegmentInfo) AddFile(file string) error
AddFile Add this file to the set of files written for this segment.
func (*SegmentInfo) Dir ¶
func (s *SegmentInfo) Dir() store.Directory
func (*SegmentInfo) Files ¶
func (s *SegmentInfo) Files() map[string]struct{}
Files Return all files referenced by this SegmentInfo.
func (*SegmentInfo) GetAttributes ¶
func (s *SegmentInfo) GetAttributes() map[string]string
GetAttributes Returns the internal codec attributes map. Returns: internal codec attributes map.
func (*SegmentInfo) GetCodec ¶
func (s *SegmentInfo) GetCodec() index.Codec
func (*SegmentInfo) GetDiagnostics ¶
func (s *SegmentInfo) GetDiagnostics() map[string]string
GetDiagnostics Returns diagnostics saved into the segment when it was written. The map is immutable.
func (*SegmentInfo) GetID ¶
func (s *SegmentInfo) GetID() []byte
func (*SegmentInfo) GetIndexSort ¶
func (s *SegmentInfo) GetIndexSort() index.Sort
func (*SegmentInfo) GetMinVersion ¶
func (s *SegmentInfo) GetMinVersion() *version.Version
func (*SegmentInfo) GetUseCompoundFile ¶
func (s *SegmentInfo) GetUseCompoundFile() bool
GetUseCompoundFile Returns true if this segment is stored as a compound file; else, false.
func (*SegmentInfo) GetVersion ¶
func (s *SegmentInfo) GetVersion() *version.Version
func (*SegmentInfo) MaxDoc ¶
func (s *SegmentInfo) MaxDoc() (int, error)
MaxDoc Returns number of documents in this segment (deletions are not taken into account).
func (*SegmentInfo) Name ¶
func (s *SegmentInfo) Name() string
func (*SegmentInfo) NamedForThisSegment ¶
func (s *SegmentInfo) NamedForThisSegment(file string) string
NamedForThisSegment strips any segment name from the file, naming it with this segment this is because "segment names" can change, e.g. by addIndexes(Dir)
func (*SegmentInfo) PutAttribute ¶
func (s *SegmentInfo) PutAttribute(key, value string) string
PutAttribute Puts a codec attribute item. This is a key-item mapping for the field that the codec can use to store additional metadata, and will be available to the codec when reading the segment via getAttribute(String) If a item already exists for the field, it will be replaced with the new item. This method make a copy on write for every attribute change.
func (*SegmentInfo) SetCodec ¶
func (s *SegmentInfo) SetCodec(codec index.Codec)
func (*SegmentInfo) SetDiagnostics ¶
func (s *SegmentInfo) SetDiagnostics(diagnostics map[string]string)
func (*SegmentInfo) SetFiles ¶
func (s *SegmentInfo) SetFiles(files map[string]struct{})
func (*SegmentInfo) SetMaxDoc ¶
func (s *SegmentInfo) SetMaxDoc(maxDoc int) error
func (*SegmentInfo) SetUseCompoundFile ¶
func (s *SegmentInfo) SetUseCompoundFile(isCompoundFile bool)
SetUseCompoundFile Mark whether this segment is stored as a compound file. Params: isCompoundFile – true if this is a compound file; else, false
type SegmentInfos ¶
type SegmentInfos struct {
// contains filtered or unexported fields
}
SegmentInfos A collection of segmentInfo objects with methods for operating on those segments in relation to the file system. The active segments in the index are stored in the segment info file, segments_N. There may be one or more segments_N files in the index; however, the one with the largest generation is the active one (when older segments_N files are present it's because they temporarily cannot be deleted, or a custom IndexDeletionPolicy is in use). This file lists each segment by name and has details about the codec and generation of deletes.
Files:
- segments_N: Header, LuceneVersion, Version, NameCounter, SegCount, MinSegmentLuceneVersion, <SegName, SegID, SegCodec, DelGen, DeletionCount, FieldInfosGen, DocValuesGen, UpdatesFiles>SegCount, CommitUserData, Footer
Data types:
- Header --> IndexHeader
- LuceneVersion --> Which Lucene code Version was used for this commit, written as three vInt: major, minor, bugfix
- MinSegmentLuceneVersion --> Lucene code Version of the oldest segment, written as three vInt: major, minor, bugfix; this is only written only if there's at least one segment
- NameCounter, SegCount, DeletionCount --> Int32
- Generation, Version, DelGen, Checksum, FieldInfosGen, DocValuesGen --> Int64
- SegID --> Int8ID_LENGTH
- SegName, SegCodec --> String
- CommitUserData --> Map<String,String>
- UpdatesFiles --> Map<Int32, Set<String>>
- Footer --> CodecFooter
Field Descriptions:
- Version counts how often the index has been Changed by adding or deleting documents.
- NameCounter is used to generate names for new segment files.
- SegName is the name of the segment, and is used as the file name prefix for all of the files that compose the segment's index.
- DelGen is the generation count of the deletes file. If this is -1, there are no deletes. Anything above zero means there are deletes stored by LiveDocsFormat.
- DeletionCount records the number of deleted documents in this segment.
- SegCodec is the name of the Codec that encoded this segment.
- SegID is the identifier of the Codec that encoded this segment.
- CommitUserData stores an optional user-supplied opaque Map<String,String> that was passed to IndexWriter.setLiveCommitData(Iterable).
- FieldInfosGen is the generation count of the fieldInfos file. If this is -1, there are no updates to the fieldInfos in that segment. Anything above zero means there are updates to fieldInfos stored by FieldInfosFormat .
- DocValuesGen is the generation count of the updatable DocValues. If this is -1, there are no updates to DocValues in that segment. Anything above zero means there are updates to DocValues stored by DocValuesFormat.
- UpdatesFiles stores the set of files that were updated in that segment per field.
lucene.experimental
func NewSegmentInfos ¶
func NewSegmentInfos(indexCreatedVersionMajor int) *SegmentInfos
func ReadCommit ¶
func ReadCommitFromChecksumIndexInput ¶
func ReadCommitFromChecksumIndexInput(ctx context.Context, directory store.Directory, input store.ChecksumIndexInput, generation int64) (*SegmentInfos, error)
ReadCommitFromChecksumIndexInput Read the commit from the provided ChecksumIndexInput.
func ReadLatestCommit ¶
ReadLatestCommit Find the latest commit (segments_N file) and load all SegmentCommitInfos.
func (*SegmentInfos) Add ¶
func (s *SegmentInfos) Add(si index.SegmentCommitInfo) error
func (*SegmentInfos) AddAll ¶
func (s *SegmentInfos) AddAll(sis []index.SegmentCommitInfo) error
func (*SegmentInfos) AsList ¶
func (s *SegmentInfos) AsList() []index.SegmentCommitInfo
func (*SegmentInfos) Changed ¶
func (s *SegmentInfos) Changed()
Changed Call this before committing if changes have been made to the segments.
func (*SegmentInfos) Clone ¶
func (s *SegmentInfos) Clone() *SegmentInfos
Clone Returns a copy of this instance, also copying each SegmentInfo.
func (*SegmentInfos) CreateBackupSegmentInfos ¶
func (s *SegmentInfos) CreateBackupSegmentInfos() []index.SegmentCommitInfo
func (*SegmentInfos) Files ¶
func (s *SegmentInfos) Files(includeSegmentsFile bool) (map[string]struct{}, error)
func (*SegmentInfos) GetGeneration ¶
func (s *SegmentInfos) GetGeneration() int64
func (*SegmentInfos) GetLastGeneration ¶
func (s *SegmentInfos) GetLastGeneration() int64
func (*SegmentInfos) GetSegmentsFileName ¶
func (s *SegmentInfos) GetSegmentsFileName() string
func (*SegmentInfos) GetUserData ¶
func (s *SegmentInfos) GetUserData() map[string]string
func (*SegmentInfos) GetVersion ¶
func (s *SegmentInfos) GetVersion() int64
func (*SegmentInfos) Info ¶
func (s *SegmentInfos) Info(j int) index.SegmentCommitInfo
func (*SegmentInfos) Remove ¶
func (s *SegmentInfos) Remove(index int)
func (*SegmentInfos) Replace ¶
func (s *SegmentInfos) Replace(other *SegmentInfos) error
func (*SegmentInfos) RollbackCommit ¶
func (s *SegmentInfos) RollbackCommit(directory store.Directory) error
func (*SegmentInfos) SetNextWriteGeneration ¶
func (s *SegmentInfos) SetNextWriteGeneration(generation int64)
func (*SegmentInfos) SetUserData ¶
func (s *SegmentInfos) SetUserData(data map[string]string, b bool)
func (*SegmentInfos) Size ¶
func (s *SegmentInfos) Size() int
func (*SegmentInfos) TotalMaxDoc ¶
func (s *SegmentInfos) TotalMaxDoc() int64
func (*SegmentInfos) UpdateGeneration ¶
func (s *SegmentInfos) UpdateGeneration(other *SegmentInfos)
func (*SegmentInfos) UpdateGenerationVersionAndCounter ¶
func (s *SegmentInfos) UpdateGenerationVersionAndCounter(other *SegmentInfos)
type SegmentMap ¶
type SegmentMap struct {
// contains filtered or unexported fields
}
func NewSegmentMap ¶
func NewSegmentMap(weight []int64) *SegmentMap
func (*SegmentMap) NewToOld ¶
func (s *SegmentMap) NewToOld(segment int) int
func (*SegmentMap) OldToNew ¶
func (s *SegmentMap) OldToNew(segment int) int
type SegmentMerger ¶
type SegmentMerger struct {
// contains filtered or unexported fields
}
The SegmentMerger class combines two or more Segments, represented by an IndexReader, into a single Segment. Call the merge method to combine the segments.
func NewSegmentMerger ¶
func NewSegmentMerger(readers []index.CodecReader, segmentInfo *SegmentInfo, dir store.Directory, fieldNumbers *FieldNumbers, ioCtx *store.IOContext) (*SegmentMerger, error)
func (*SegmentMerger) ShouldMerge ¶
func (s *SegmentMerger) ShouldMerge() bool
type SegmentReader ¶
type SegmentReader struct { *BaseCodecReader // contains filtered or unexported fields }
SegmentReader IndexReader implementation over a single segment. Instances pointing to the same segment (but with different deletes, etc) may share the same core data. lucene.experimental
func NewSegmentReader ¶
func NewSegmentReader(ctx context.Context, si index.SegmentCommitInfo, createdVersionMajor int, ioContext *store.IOContext) (*SegmentReader, error)
NewSegmentReader Constructs a new SegmentReader with a new core.
func (*SegmentReader) Directory ¶
func (s *SegmentReader) Directory() store.Directory
func (*SegmentReader) DoClose ¶
func (s *SegmentReader) DoClose() error
func (SegmentReader) DocumentWithFields ¶
func (*SegmentReader) GetDocValuesReader ¶
func (s *SegmentReader) GetDocValuesReader() index.DocValuesProducer
func (*SegmentReader) GetFieldInfos ¶
func (s *SegmentReader) GetFieldInfos() index.FieldInfos
func (*SegmentReader) GetFieldsReader ¶
func (s *SegmentReader) GetFieldsReader() index.StoredFieldsReader
func (*SegmentReader) GetHardLiveDocs ¶
func (s *SegmentReader) GetHardLiveDocs() util.Bits
GetHardLiveDocs Returns the live docs that are not hard-deleted. This is an expert API to be used with soft-deletes to filter out document that hard deleted for instance due to aborted documents or to distinguish soft and hard deleted documents ie. a rolled back tombstone. lucene.experimental
func (*SegmentReader) GetLiveDocs ¶
func (s *SegmentReader) GetLiveDocs() util.Bits
func (*SegmentReader) GetMetaData ¶
func (s *SegmentReader) GetMetaData() index.LeafMetaData
func (*SegmentReader) GetNormsReader ¶
func (s *SegmentReader) GetNormsReader() index.NormsProducer
func (*SegmentReader) GetOriginalSegmentInfo ¶
func (s *SegmentReader) GetOriginalSegmentInfo() index.SegmentCommitInfo
GetOriginalSegmentInfo Returns the original SegmentInfo passed to the segment reader on creation time. getSegmentInfo() returns a clone of this instance.
func (*SegmentReader) GetPointsReader ¶
func (s *SegmentReader) GetPointsReader() index.PointsReader
func (*SegmentReader) GetPostingsReader ¶
func (s *SegmentReader) GetPostingsReader() index.FieldsProducer
func (*SegmentReader) GetReaderCacheHelper ¶
func (s *SegmentReader) GetReaderCacheHelper() index.CacheHelper
func (SegmentReader) GetRefCount ¶
func (r SegmentReader) GetRefCount() int
func (SegmentReader) GetTermVector ¶
func (*SegmentReader) GetTermVectorsReader ¶
func (s *SegmentReader) GetTermVectorsReader() index.TermVectorsReader
func (SegmentReader) HasDeletions ¶
func (r SegmentReader) HasDeletions() bool
func (SegmentReader) Leaves ¶
func (r SegmentReader) Leaves() ([]index.LeafReaderContext, error)
func (*SegmentReader) MaxDoc ¶
func (s *SegmentReader) MaxDoc() int
func (*SegmentReader) New ¶
func (s *SegmentReader) New(si index.SegmentCommitInfo, liveDocs, hardLiveDocs util.Bits, numDocs int, isNRT bool) (*SegmentReader, error)
New Create new SegmentReader sharing core from a previous SegmentReader and using the provided liveDocs, and recording whether those liveDocs were carried in ram (isNRT=true).
func (*SegmentReader) NewReadersAndUpdates ¶
func (s *SegmentReader) NewReadersAndUpdates(indexCreatedVersionMajor int, pendingDeletes PendingDeletes) (*ReadersAndUpdates, error)
NewReadersAndUpdates Init from a previously opened SegmentReader. NOTE: steals incoming ref from reader.
func (SegmentReader) NotifyReaderClosedListeners ¶
func (r SegmentReader) NotifyReaderClosedListeners() error
NotifyReaderClosedListeners overridden by StandardDirectoryReader and SegmentReader
func (SegmentReader) NumDeletedDocs ¶
func (r SegmentReader) NumDeletedDocs() int
func (*SegmentReader) NumDocs ¶
func (s *SegmentReader) NumDocs() int
func (SegmentReader) RegisterParentReader ¶
func (r SegmentReader) RegisterParentReader(reader index.IndexReader)
RegisterParentReader Expert: This method is called by IndexReaders which wrap other readers (e.g. CompositeReader or FilterLeafReader) to register the parent at the child (this reader) on construction of the parent. When this reader is closed, it will mark all registered parents as closed, too. The references to parent readers are weak only, so they can be GCed once they are no longer in use.
type SegmentState ¶
type SegmentState struct {
// contains filtered or unexported fields
}
func (*SegmentState) Close ¶
func (s *SegmentState) Close() error
type SimScorerSPI ¶
type SingleTermsEnum ¶
type SingleTermsEnum struct { *FilteredTermsEnumBase // contains filtered or unexported fields }
SingleTermsEnum Subclass of FilteredTermsEnum for enumerating a single term. For example, this can be used by MultiTermQuerys that need only visit one term, but want to preserve MultiTermQuery semantics such as MultiTermQuery.getRewriteMethod.
func NewSingleTermsEnum ¶
func NewSingleTermsEnum(tenum index.TermsEnum, termText []byte) *SingleTermsEnum
func (*SingleTermsEnum) Accept ¶
func (s *SingleTermsEnum) Accept(term []byte) (AcceptStatus, error)
type SingleValueDocValuesFieldUpdates ¶
type SingleValueDocValuesFieldUpdates struct { }
type SkipBuffer ¶
type SkipBuffer struct { *store.BaseIndexInput // contains filtered or unexported fields }
SkipBuffer used to buffer the top skip levels
func NewSkipBuffer ¶
func NewSkipBuffer(in store.IndexInput, length int) (*SkipBuffer, error)
func (*SkipBuffer) Clone ¶
func (s *SkipBuffer) Clone() store.CloneReader
func (*SkipBuffer) Close ¶
func (s *SkipBuffer) Close() error
func (*SkipBuffer) GetFilePointer ¶
func (s *SkipBuffer) GetFilePointer() int64
func (*SkipBuffer) Length ¶
func (s *SkipBuffer) Length() int64
func (*SkipBuffer) ReadByte ¶
func (s *SkipBuffer) ReadByte() (byte, error)
func (*SkipBuffer) Slice ¶
func (s *SkipBuffer) Slice(sliceDescription string, offset, length int64) (store.IndexInput, error)
type SlowImpactsEnum ¶
type SlowImpactsEnum struct {
// contains filtered or unexported fields
}
func NewSlowImpactsEnum ¶
func NewSlowImpactsEnum(delegate index.PostingsEnum) *SlowImpactsEnum
func (*SlowImpactsEnum) AdvanceShallow ¶
func (s *SlowImpactsEnum) AdvanceShallow(ctx context.Context, target int) error
func (*SlowImpactsEnum) Cost ¶
func (s *SlowImpactsEnum) Cost() int64
func (*SlowImpactsEnum) DocID ¶
func (s *SlowImpactsEnum) DocID() int
func (*SlowImpactsEnum) EndOffset ¶
func (s *SlowImpactsEnum) EndOffset() (int, error)
func (*SlowImpactsEnum) Freq ¶
func (s *SlowImpactsEnum) Freq() (int, error)
func (*SlowImpactsEnum) GetImpacts ¶
func (s *SlowImpactsEnum) GetImpacts() (index.Impacts, error)
func (*SlowImpactsEnum) GetPayload ¶
func (s *SlowImpactsEnum) GetPayload() ([]byte, error)
func (*SlowImpactsEnum) NextDoc ¶
func (s *SlowImpactsEnum) NextDoc(ctx context.Context) (int, error)
func (*SlowImpactsEnum) NextPosition ¶
func (s *SlowImpactsEnum) NextPosition() (int, error)
func (*SlowImpactsEnum) SlowAdvance ¶
func (*SlowImpactsEnum) StartOffset ¶
func (s *SlowImpactsEnum) StartOffset() (int, error)
type SortedDocValuesProvider ¶
type SortedDocValuesProvider interface {
Get(reader index.LeafReader) (index.SortedDocValues, error)
}
SortedDocValuesProvider Provide a SortedDocValues instance for a LeafReader
type SortedDocValuesTermsEnum ¶
type SortedDocValuesTermsEnum struct { }
SortedDocValuesTermsEnum Creates a new TermsEnum over the provided values
func NewSortedDocValuesTermsEnum ¶
func NewSortedDocValuesTermsEnum(values index.SortedDocValues) *SortedDocValuesTermsEnum
func (*SortedDocValuesTermsEnum) Attributes ¶
func (s *SortedDocValuesTermsEnum) Attributes() *attribute.Source
func (*SortedDocValuesTermsEnum) DocFreq ¶
func (s *SortedDocValuesTermsEnum) DocFreq() (int, error)
func (*SortedDocValuesTermsEnum) Impacts ¶
func (s *SortedDocValuesTermsEnum) Impacts(flags int) (index.ImpactsEnum, error)
func (*SortedDocValuesTermsEnum) Next ¶
func (s *SortedDocValuesTermsEnum) Next(context.Context) ([]byte, error)
func (*SortedDocValuesTermsEnum) Ord ¶
func (s *SortedDocValuesTermsEnum) Ord() (int64, error)
func (*SortedDocValuesTermsEnum) Postings ¶
func (s *SortedDocValuesTermsEnum) Postings(reuse index.PostingsEnum, flags int) (index.PostingsEnum, error)
func (*SortedDocValuesTermsEnum) SeekCeil ¶
func (s *SortedDocValuesTermsEnum) SeekCeil(ctx context.Context, text []byte) (index.SeekStatus, error)
func (*SortedDocValuesTermsEnum) SeekExactByOrd ¶
func (s *SortedDocValuesTermsEnum) SeekExactByOrd(ctx context.Context, ord int64) error
func (*SortedDocValuesTermsEnum) SeekExactExpert ¶
func (*SortedDocValuesTermsEnum) Term ¶
func (s *SortedDocValuesTermsEnum) Term() ([]byte, error)
func (*SortedDocValuesTermsEnum) TermState ¶
func (s *SortedDocValuesTermsEnum) TermState() (index.TermState, error)
func (*SortedDocValuesTermsEnum) TotalTermFreq ¶
func (s *SortedDocValuesTermsEnum) TotalTermFreq() (int64, error)
type SortedDocValuesWriter ¶
type SortedDocValuesWriter struct { }
func (*SortedDocValuesWriter) Flush ¶
func (s *SortedDocValuesWriter) Flush(state *index.SegmentWriteState, sortMap index.DocMap, consumer index.DocValuesConsumer) error
func (*SortedDocValuesWriter) GetDocValues ¶
func (s *SortedDocValuesWriter) GetDocValues() types.DocIdSetIterator
type SortedNumericDocValuesWriter ¶
type SortedNumericDocValuesWriter struct { }
func (*SortedNumericDocValuesWriter) Flush ¶
func (s *SortedNumericDocValuesWriter) Flush(state *index.SegmentWriteState, sortMap index.DocMap, consumer index.DocValuesConsumer) error
func (*SortedNumericDocValuesWriter) GetDocValues ¶
func (s *SortedNumericDocValuesWriter) GetDocValues() types.DocIdSetIterator
type SortedSetDocValuesWriter ¶
type SortedSetDocValuesWriter struct { }
func (*SortedSetDocValuesWriter) Flush ¶
func (s *SortedSetDocValuesWriter) Flush(state *index.SegmentWriteState, sortMap index.DocMap, consumer index.DocValuesConsumer) error
func (*SortedSetDocValuesWriter) GetDocValues ¶
func (s *SortedSetDocValuesWriter) GetDocValues() types.DocIdSetIterator
type SortedSetSelector ¶
type SortedSetSelector struct { }
SortedSetSelector Selects a item from the document's set to use as the representative item
type SortedSetSelectorType ¶
type SortedSetSelectorType int
type SortedSetSortField ¶
type SortedSetSortField struct { *BaseSortField // contains filtered or unexported fields }
SortedSetSortField SortField for SortedSetDocValues. A SortedSetDocValues contains multiple values for a field, so sorting with this technique "selects" a item as the representative sort item for the document. By default, the minimum item in the set is selected as the sort item, but this can be customized. Selectors other than the default do have some limitations to ensure that all selections happen in constant-time for performance. Like sorting by string, this also supports sorting missing values as first or last, via setMissingValue(Object). See Also: SortedSetSelector
func NewSortedSetSortField ¶
func NewSortedSetSortField(field string, reverse bool) *SortedSetSortField
func NewSortedSetSortFieldV1 ¶
func NewSortedSetSortFieldV1(field string, reverse bool, selector SortedSetSelectorType) *SortedSetSortField
type SortedSetSortFieldProvider ¶
type SortedSetSortFieldProvider struct { }
func NewSortedSetSortFieldProvider ¶
func NewSortedSetSortFieldProvider() *SortedSetSortFieldProvider
func (*SortedSetSortFieldProvider) GetName ¶
func (s *SortedSetSortFieldProvider) GetName() string
func (*SortedSetSortFieldProvider) ReadSortField ¶
func (*SortedSetSortFieldProvider) WriteSortField ¶
func (s *SortedSetSortFieldProvider) WriteSortField(ctx context.Context, sf index.SortField, out store.DataOutput) error
type SorterDefault ¶
type SorterDefault struct {
// contains filtered or unexported fields
}
type SortingNumericDocValues ¶
type SortingNumericDocValues struct {
// contains filtered or unexported fields
}
func NewSortingNumericDocValues ¶
func NewSortingNumericDocValues(dvs *NumericDVs) *SortingNumericDocValues
func (*SortingNumericDocValues) AdvanceExact ¶
func (s *SortingNumericDocValues) AdvanceExact(target int) (bool, error)
func (*SortingNumericDocValues) Cost ¶
func (s *SortingNumericDocValues) Cost() int64
func (*SortingNumericDocValues) DocID ¶
func (s *SortingNumericDocValues) DocID() int
func (*SortingNumericDocValues) LongValue ¶
func (s *SortingNumericDocValues) LongValue() (int64, error)
func (*SortingNumericDocValues) NextDoc ¶
func (s *SortingNumericDocValues) NextDoc(ctx context.Context) (int, error)
func (*SortingNumericDocValues) SlowAdvance ¶
type StandardDirectoryReader ¶
type StandardDirectoryReader struct {
// contains filtered or unexported fields
}
StandardDirectoryReader Default implementation of DirectoryReader.
func NewStandardDirectoryReader ¶
func NewStandardDirectoryReader(directory store.Directory, readers []index.IndexReader, writer *IndexWriter, sis *SegmentInfos, compareFunc CompareLeafReader, applyAllDeletes, writeAllDeletes bool) (*StandardDirectoryReader, error)
NewStandardDirectoryReader package private constructor, called only from static open() methods
func (*StandardDirectoryReader) GetIndexCommit ¶
func (s *StandardDirectoryReader) GetIndexCommit() (index.IndexCommit, error)
func (*StandardDirectoryReader) GetSegmentInfos ¶
func (s *StandardDirectoryReader) GetSegmentInfos() *SegmentInfos
func (*StandardDirectoryReader) GetVersion ¶
func (s *StandardDirectoryReader) GetVersion() int64
type StoredFieldsConsumer ¶
type StoredFieldsConsumer struct {
// contains filtered or unexported fields
}
func NewStoredFieldsConsumer ¶
func NewStoredFieldsConsumer(codec index.Codec, dir store.Directory, info *SegmentInfo) *StoredFieldsConsumer
func (*StoredFieldsConsumer) Finish ¶
func (s *StoredFieldsConsumer) Finish(ctx context.Context, maxDoc int) error
func (*StoredFieldsConsumer) FinishDocument ¶
func (s *StoredFieldsConsumer) FinishDocument() error
func (*StoredFieldsConsumer) Flush ¶
func (s *StoredFieldsConsumer) Flush(ctx context.Context, state *index.SegmentWriteState, sortMap index.DocMap) error
func (*StoredFieldsConsumer) StartDocument ¶
func (s *StoredFieldsConsumer) StartDocument(ctx context.Context, docID int) error
type StringDocComparator ¶
type StringDocComparator struct {
// contains filtered or unexported fields
}
func (*StringDocComparator) Compare ¶
func (s *StringDocComparator) Compare(docID1, docID2 int) int
type StringSorter ¶
type StringSorter struct {
// contains filtered or unexported fields
}
func NewStringSorter ¶
func NewStringSorter(providerName, missingValue string, reverse bool, valuesProvider SortedDocValuesProvider) *StringSorter
func (*StringSorter) GetComparableProviders ¶
func (s *StringSorter) GetComparableProviders(readers []index.LeafReader) ([]index.ComparableProvider, error)
func (*StringSorter) GetDocComparator ¶
func (s *StringSorter) GetDocComparator(reader index.LeafReader, maxDoc int) (index.DocComparator, error)
func (*StringSorter) GetProviderName ¶
func (s *StringSorter) GetProviderName() string
type TermData ¶
type TermData struct {
// contains filtered or unexported fields
}
func NewTermData ¶
func NewTermData(text string, docs []int, positions [][]PositionData) *TermData
type TermDataList ¶
type TermDataList []*TermData
func (TermDataList) Len ¶
func (t TermDataList) Len() int
func (TermDataList) Less ¶
func (t TermDataList) Less(i, j int) bool
func (TermDataList) Swap ¶
func (t TermDataList) Swap(i, j int)
type TermDocsIterator ¶
type TermDocsIterator struct { }
type TermNode ¶
type TermNode struct {
// contains filtered or unexported fields
}
func NewTermNode ¶
type TermStates ¶
type TermStates struct {
// contains filtered or unexported fields
}
TermStates Maintains a IndexReader TermState view over IndexReader instances containing a single term. The TermStates doesn't track if the given TermState objects are valid, neither if the TermState instances refer to the same terms in the associated readers.
func BuildTermStates ¶
func BuildTermStates(context index.IndexReaderContext, term index.Term, needsStats bool) (*TermStates, error)
BuildTermStates Creates a TermStates from a top-level IndexReaderContext and the given Term. This method will lookup the given term in all context's leaf readers and register each of the readers containing the term in the returned TermStates using the leaf reader's ordinal. Note: the given context must be a top-level context. Params: needsStats – if true then all leaf contexts will be visited up-front to collect term statistics.
Otherwise, the TermState objects will be built only when requested
func NewTermStates ¶
func NewTermStates(term index.Term, context index.IndexReaderContext) *TermStates
func (*TermStates) AccumulateStatistics ¶
func (r *TermStates) AccumulateStatistics(docFreq int, totalTermFreq int64)
AccumulateStatistics Expert: Accumulate term statistics.
func (*TermStates) DocFreq ¶
func (r *TermStates) DocFreq() (int, error)
DocFreq Returns the accumulated document frequency of all TermState instances passed to register(TermState, int, int, long). Returns: the accumulated document frequency of all TermState instances passed to register(TermState, int, int, long).
func (*TermStates) Get ¶
func (r *TermStates) Get(ctx index.LeafReaderContext) (index.TermState, error)
Get Returns the TermState for a leaf reader context or null if no TermState for the context was registered. Params: ctx – the LeafReaderContextImpl to get the TermState for. Returns: the TermState for the given readers ord or null if no TermState for the reader was
func (*TermStates) Register ¶
func (r *TermStates) Register(state index.TermState, ord, docFreq int, totalTermFreq int64)
func (*TermStates) Register2 ¶
func (r *TermStates) Register2(state index.TermState, ord int)
Register2 Expert: Registers and associates a TermState with an leaf ordinal. The leaf ordinal should be derived from a IndexReaderContext's leaf ord. On the contrary to register(TermState, int, int, long) this method does NOT update term statistics.
func (*TermStates) TotalTermFreq ¶
func (r *TermStates) TotalTermFreq() (int64, error)
TotalTermFreq Returns the accumulated term frequency of all TermState instances passed to register(TermState, int, int, long). Returns: the accumulated term frequency of all TermState instances passed to register(TermState, int, int, long).
func (*TermStates) WasBuiltFor ¶
func (r *TermStates) WasBuiltFor(context index.IndexReaderContext) bool
type TermVectorsConsumer ¶
type TermVectorsConsumer struct { *BaseTermsHash // contains filtered or unexported fields }
func NewTermVectorsConsumer ¶
func NewTermVectorsConsumer(intBlockAllocator ints.IntsAllocator, byteBlockAllocator bytesref.Allocator, directory store.Directory, info *SegmentInfo, codec index.Codec) *TermVectorsConsumer
func (*TermVectorsConsumer) AddField ¶
func (t *TermVectorsConsumer) AddField(invertState *index.FieldInvertState, fieldInfo *document.FieldInfo) (TermsHashPerField, error)
func (*TermVectorsConsumer) FinishDocument ¶
func (t *TermVectorsConsumer) FinishDocument(ctx context.Context, docID int) error
func (*TermVectorsConsumer) Flush ¶
func (t *TermVectorsConsumer) Flush(ctx context.Context, fieldsToFlush map[string]TermsHashPerField, state *index.SegmentWriteState, sortMap index.DocMap, norms index.NormsProducer) error
func (*TermVectorsConsumer) SetTermBytePool ¶
func (t *TermVectorsConsumer) SetTermBytePool(termBytePool *bytesref.BlockPool)
type TermVectorsConsumerPerField ¶
type TermVectorsConsumerPerField struct {
// contains filtered or unexported fields
}
func NewTermVectorsConsumerPerField ¶
func NewTermVectorsConsumerPerField(invertState *index.FieldInvertState, termsHash *TermVectorsConsumer, fieldInfo *document.FieldInfo) (*TermVectorsConsumerPerField, error)
func (TermVectorsConsumerPerField) Add ¶
Add Called once per inverted token. This is the primary entry point (for first TermsHash); postings use this API.
func (TermVectorsConsumerPerField) Add2nd ¶
Add2nd Secondary entry point (for 2nd & subsequent TermsHash), because token text has already been "interned" into textStart, so we hash by textStart. term vectors use this API.
func (*TermVectorsConsumerPerField) AddTerm ¶
func (t *TermVectorsConsumerPerField) AddTerm(termID, docID int) error
func (*TermVectorsConsumerPerField) CreatePostingsArray ¶
func (t *TermVectorsConsumerPerField) CreatePostingsArray(size int) ParallelPostingsArray
func (*TermVectorsConsumerPerField) Finish ¶
func (t *TermVectorsConsumerPerField) Finish() error
func (*TermVectorsConsumerPerField) FinishDocument ¶
func (t *TermVectorsConsumerPerField) FinishDocument() error
func (TermVectorsConsumerPerField) GetNextPerField ¶
func (t TermVectorsConsumerPerField) GetNextPerField() TermsHashPerField
func (TermVectorsConsumerPerField) GetPostingsArray ¶
func (t TermVectorsConsumerPerField) GetPostingsArray() ParallelPostingsArray
func (*TermVectorsConsumerPerField) NewPostingsArray ¶
func (t *TermVectorsConsumerPerField) NewPostingsArray()
func (*TermVectorsConsumerPerField) NewTerm ¶
func (t *TermVectorsConsumerPerField) NewTerm(termID, docID int) error
func (*TermVectorsConsumerPerField) Reset ¶
func (t *TermVectorsConsumerPerField) Reset() error
func (TermVectorsConsumerPerField) SetPostingsArray ¶
func (t TermVectorsConsumerPerField) SetPostingsArray(v ParallelPostingsArray)
func (*TermVectorsConsumerPerField) Start ¶
func (t *TermVectorsConsumerPerField) Start(field document.IndexableField, first bool) bool
type TermVectorsConsumerPerFields ¶
type TermVectorsConsumerPerFields []*TermVectorsConsumerPerField
func (TermVectorsConsumerPerFields) Len ¶
func (p TermVectorsConsumerPerFields) Len() int
func (TermVectorsConsumerPerFields) Less ¶
func (p TermVectorsConsumerPerFields) Less(i, j int) bool
func (TermVectorsConsumerPerFields) Swap ¶
func (p TermVectorsConsumerPerFields) Swap(i, j int)
type TermVectorsPostingsArray ¶
type TermVectorsPostingsArray struct { *BaseParallelPostingsArray // contains filtered or unexported fields }
func NewTermVectorsPostingsArray ¶
func NewTermVectorsPostingsArray() *TermVectorsPostingsArray
func (*TermVectorsPostingsArray) BytesPerPosting ¶
func (t *TermVectorsPostingsArray) BytesPerPosting() int
func (*TermVectorsPostingsArray) Grow ¶
func (t *TermVectorsPostingsArray) Grow()
func (*TermVectorsPostingsArray) NewInstance ¶
func (t *TermVectorsPostingsArray) NewInstance() ParallelPostingsArray
func (*TermVectorsPostingsArray) SetFreqs ¶
func (t *TermVectorsPostingsArray) SetFreqs(termID, v int)
func (*TermVectorsPostingsArray) SetLastOffsets ¶
func (t *TermVectorsPostingsArray) SetLastOffsets(termID, v int)
func (*TermVectorsPostingsArray) SetLastPositions ¶
func (t *TermVectorsPostingsArray) SetLastPositions(termID, v int)
type TermsEnumIndex ¶
type TermsEnumIndex struct {
// contains filtered or unexported fields
}
func NewTermsEnumIndex ¶
func NewTermsEnumIndex(termsEnum index.TermsEnum, subIndex int) *TermsEnumIndex
type TermsHash ¶
type TermsHash interface { Flush(ctx context.Context, fieldsToFlush map[string]TermsHashPerField, state *index.SegmentWriteState, sortMap index.DocMap, norms index.NormsProducer) error AddField(fieldInvertState *index.FieldInvertState, fieldInfo *document.FieldInfo) (TermsHashPerField, error) SetTermBytePool(termBytePool *bytesref.BlockPool) FinishDocument(ctx context.Context, docID int) error Abort() error Reset() error StartDocument() error GetIntPool() *ints.BlockPool GetBytePool() *bytesref.BlockPool GetTermBytePool() *bytesref.BlockPool }
TermsHash This class is passed each token produced by the analyzer on each field during indexing, and it stores these tokens in a hash table, and allocates separate byte streams per token. Consumers of this class, eg FreqProxTermsWriter and TermVectorsConsumer, write their own byte streams under each term.
type TermsHashPerField ¶
type TermsHashPerField interface { // Start adding a new field instance; first is true if this is the first time this field name was seen in the document. Start(field document.IndexableField, first bool) bool Add(termBytes []byte, docID int) error Add2nd(textStart, docID int) error GetNextPerField() TermsHashPerField Finish() error Reset() error // NewTerm Called when a term is seen for the first time. NewTerm(termID, docID int) error // AddTerm Called when a previously seen term is seen again. AddTerm(termID, docID int) error // NewPostingsArray Called when the postings array is initialized or resized. NewPostingsArray() // CreatePostingsArray Creates a new postings array of the specified size. CreatePostingsArray(size int) ParallelPostingsArray GetPostingsArray() ParallelPostingsArray SetPostingsArray(v ParallelPostingsArray) }
TermsHashPerField This class stores streams of information per term without knowing the size of the stream ahead of time. Each stream typically encodes one level of information like term frequency per document or term proximity. Internally this class allocates a linked list of slices that can be read by a ByteSliceReader for each term. Terms are first deduplicated in a BytesHash once this is done internal data-structures point to the current offset of each stream that can be written to.
TermsHashPerField 可以存储每个term的信息,而不需要知道数据流的大小。 每个数据流通常编码一个级别的信息,如每个文档的术语频率(term's frequency)或术语接近度(term's proximity)。 在内部,此类为每个术语(term)分配 ByteSliceReader 可以读取的切片的链接列表。 完成后,首先在BytesRefHash中对术语(term)进行重复数据消除。内部数据结构指向可以写入的每个流的当前偏移量。
Source Files
¶
- automatontermsenum.go
- binarydocvalues.go
- bitsetiterator.go
- bufferedupdatesstream.go
- byteslicereader.go
- checkindex.go
- codec.go
- codecspointswriter.go
- competitiveimpactaccumulator.go
- compositereader.go
- datafields.go
- defaultindexingchain.go
- directoryreader.go
- docswithfieldset.go
- documentswriter.go
- documentswriterdeletequeue.go
- documentswriterflushcontrol.go
- documentswriterflushqueue.go
- documentswriterperthread.go
- documentswriterperthreadpool.go
- docvalues.go
- docvaluesfieldupdates.go
- docvaluesleafreader.go
- emptydocvaluesproducer.go
- eventqueue.go
- fielddata.go
- fieldinfos.go
- fields.go
- fieldtermiterator.go
- filteredtermsenum.go
- flushpolicy.go
- freqproxfields.go
- freqproxpostingsarray.go
- freqproxtermswriter.go
- freqproxtermswriterperfield.go
- frozenbufferedupdates.go
- impact.go
- indexcommit.go
- indexdeletionpolicy.go
- indexfiledeleter.go
- indexfilenames.go
- indexreader.go
- indexreadercontext.go
- indexsorter.go
- indexwriter.go
- indexwriterutils.go
- keeponlylastcommitdeletionpolicy.go
- leafmetadata.go
- leafreader.go
- liveindexwriterconfig.go
- mergepolicy.go
- mergescheduler.go
- mergestate.go
- mergetrigger.go
- multilevelskiplistreader.go
- multilevelskiplistwriter.go
- multisorter.go
- mutablepointsreaderutils.go
- node.go
- nomergescheduler.go
- norms.go
- numericdocvalues.go
- ordinalmap.go
- pagedbytes.go
- parallelpostingsarray.go
- pendingdeletes.go
- pendingsoftdeletes.go
- pointvalueswriter.go
- postingsenum.go
- prefixcodedterms.go
- readerpool.go
- readersandupdates.go
- readerutil.go
- segmentcorereaders.go
- segmentdocvalues.go
- segmentinfo.go
- segmentinfos.go
- segmentmerger.go
- segmentreader.go
- similarity.go
- singletermsenum.go
- slowimpactsenum.go
- sort.go
- sorteddocvalues.go
- sorteddocvaluestermsenum.go
- sortednumericdocvalues.go
- sortedsetdocvalues.go
- sortedsetdocvalueswriter.go
- sortedsetselector.go
- sortedsetsortfield.go
- sorter.go
- sortfield.go
- sortfieldprovider.go
- standarddirectoryreader.go
- storedfieldsconsumer.go
- term.go
- terms.go
- termsenum.go
- termshash.go
- termshashperfield.go
- termstates.go
- termvectorsconsumer.go
- termvectorsconsumerperfield.go
- termvectorspostingsarray.go