Documentation
¶
Index ¶
- Constants
- func ConsumeCStatusIntoError(status *C.CStatus) error
- func DeleteCStorageConfig(cStorageConfig C.CStorageConfig)
- func GetCStorageConfig(storageConfig *indexpb.StorageConfig) C.CStorageConfig
- func GetFileSize(path string, storageConfig *indexpb.StorageConfig) (int64, error)
- type CArrowArray
- type CArrowSchema
- type FFIPackedReader
- type PackedReader
- type PackedWriter
Constants ¶
View Source
const ( // ColumnGroupSizeThreshold is the threshold of column group size per row. ColumnGroupSizeThreshold = 1024 // 1KB // DefaultBufferSize is the default buffer size for writing data to storage. DefaultWriteBufferSize = 32 * 1024 * 1024 // 32MB // DefaultBufferSize is the default buffer size for reading data from storage. DefaultReadBufferSize = 32 * 1024 * 1024 // 32MB // DefaultMultiPartUploadSize is the default size of each part of a multipart upload. DefaultMultiPartUploadSize = 10 * 1024 * 1024 // 10MB // Arrow will convert these field IDs to a metadata key named PARQUET:field_id on the appropriate field. ArrowFieldIdMetadataKey = "PARQUET:field_id" )
Variables ¶
This section is empty.
Functions ¶
func ConsumeCStatusIntoError ¶
func DeleteCStorageConfig ¶
func DeleteCStorageConfig(cStorageConfig C.CStorageConfig)
func GetCStorageConfig ¶
func GetCStorageConfig(storageConfig *indexpb.StorageConfig) C.CStorageConfig
func GetFileSize ¶
func GetFileSize(path string, storageConfig *indexpb.StorageConfig) (int64, error)
Types ¶
type CArrowArray ¶
type CArrowArray = C.struct_ArrowArray
CArrowArray is the C Data Interface object for Arrow Arrays as defined in abi.h
type CArrowSchema ¶
type CArrowSchema = C.struct_ArrowSchema
CArrowSchema is the C Data Interface for ArrowSchemas
type FFIPackedReader ¶
type FFIPackedReader struct {
// contains filtered or unexported fields
}
func NewFFIPackedReader ¶
func NewFFIPackedReader(manifest string, schema *arrow.Schema, neededColumns []string, bufferSize int64, storageConfig *indexpb.StorageConfig, storagePluginContext *indexcgopb.StoragePluginContext) (*FFIPackedReader, error)
func (*FFIPackedReader) ReadNext ¶
func (r *FFIPackedReader) ReadNext() (arrow.Record, error)
ReadNext reads the next record batch from the reader
func (*FFIPackedReader) Release ¶
func (r *FFIPackedReader) Release()
Release decreases the reference count
func (*FFIPackedReader) Retain ¶
func (r *FFIPackedReader) Retain()
Retain increases the reference count
func (*FFIPackedReader) Schema ¶
func (r *FFIPackedReader) Schema() *arrow.Schema
Schema returns the schema of the reader
type PackedReader ¶
type PackedReader struct {
// contains filtered or unexported fields
}
func NewPackedReader ¶
func NewPackedReader(filePaths []string, schema *arrow.Schema, bufferSize int64, storageConfig *indexpb.StorageConfig, storagePluginContext *indexcgopb.StoragePluginContext) (*PackedReader, error)
func (*PackedReader) Close ¶
func (pr *PackedReader) Close() error
type PackedWriter ¶
type PackedWriter struct {
// contains filtered or unexported fields
}
func NewPackedWriter ¶
func NewPackedWriter(filePaths []string, schema *arrow.Schema, bufferSize int64, multiPartUploadSize int64, columnGroups []storagecommon.ColumnGroup, storageConfig *indexpb.StorageConfig, storagePluginContext *indexcgopb.StoragePluginContext) (*PackedWriter, error)
func (*PackedWriter) Close ¶
func (pw *PackedWriter) Close() error
func (*PackedWriter) WriteRecordBatch ¶
func (pw *PackedWriter) WriteRecordBatch(recordBatch arrow.Record) error
Click to show internal directories.
Click to hide internal directories.