Documentation
¶
Overview ¶
Package desync implements data structures, protocols and features of https://github.com/systemd/casync in order to allow support for additional platforms and improve performace by way of concurrency and caching.
Supports the following casync data structures: catar archives, caibx/caidx index files, castr stores (local or remote).
See desync/cmd for reference implementations of the available features.
Index ¶
- Constants
- Variables
- func AssembleFile(ctx context.Context, name string, idx Index, s Store, n int, progress func()) error
- func ChopFile(ctx context.Context, name string, chunks []IndexChunk, ws WriteStore, n int, ...) error
- func Compress(b []byte) ([]byte, error)
- func Copy(ctx context.Context, ids []ChunkID, src Store, dst WriteStore, n int, ...) error
- func Decompress(out, in []byte) ([]byte, error)
- func IndexFromFile(ctx context.Context, name string, n int, min, avg, max uint64, ...) (Index, ChunkingStats, error)
- func MountIndex(ctx context.Context, idx Index, path, name string, s Store, n int) error
- func NewHTTPHandler(s Store, writable bool) http.Handler
- func NewHTTPIndexHandler(s IndexStore, writable bool) http.Handler
- func SipHash(b []byte) uint64
- func Tar(ctx context.Context, w io.Writer, src string) error
- func UnTar(ctx context.Context, r io.Reader, dst string, opts UntarOptions) error
- func UnTarIndex(ctx context.Context, dst string, index Index, s Store, n int, ...) error
- func VerifyIndex(ctx context.Context, name string, idx Index, n int, progress func()) error
- type ArchiveDecoder
- type Cache
- type ChunkID
- type ChunkInvalid
- type ChunkMissing
- type ChunkStorage
- type Chunker
- type ChunkingStats
- type ConsoleIndexStore
- type FormatACLDefault
- type FormatACLGroup
- type FormatACLGroupObj
- type FormatACLUser
- type FormatDecoder
- type FormatDevice
- type FormatEncoder
- type FormatEntry
- type FormatFCaps
- type FormatFilename
- type FormatGoodbye
- type FormatGoodbyeItem
- type FormatGroup
- type FormatHeader
- type FormatIndex
- type FormatPayload
- type FormatSELinux
- type FormatSymlink
- type FormatTable
- type FormatTableItem
- type FormatUser
- type FormatXAttr
- type HTTPHandler
- type HTTPHandlerBase
- type HTTPIndexHandler
- type Hash
- type Index
- type IndexChunk
- type IndexMountFS
- func (me *IndexMountFS) GetAttr(name string, context *fuse.Context) (*fuse.Attr, fuse.Status)
- func (me *IndexMountFS) Open(name string, flags uint32, context *fuse.Context) (file nodefs.File, code fuse.Status)
- func (me *IndexMountFS) OpenDir(name string, context *fuse.Context) (c []fuse.DirEntry, code fuse.Status)
- type IndexMountFile
- type IndexPos
- type IndexStore
- type IndexWriteStore
- type Interrupted
- type InvalidFormat
- type LocalIndexStore
- type LocalStore
- func (s LocalStore) Close() error
- func (s LocalStore) GetChunk(id ChunkID) ([]byte, error)
- func (s LocalStore) HasChunk(id ChunkID) bool
- func (s LocalStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error
- func (s LocalStore) RemoveChunk(id ChunkID) error
- func (s LocalStore) StoreChunk(id ChunkID, b []byte) error
- func (s LocalStore) String() string
- func (s LocalStore) Verify(ctx context.Context, n int, repair bool) error
- type Message
- type NoSuchObject
- type NodeDevice
- type NodeDirectory
- type NodeFile
- type NodeSymlink
- type NullChunk
- type ProgressBar
- type Protocol
- func (p *Protocol) Initialize(flags uint64) (uint64, error)
- func (p *Protocol) ReadMessage() (Message, error)
- func (p *Protocol) RecvHello() (uint64, error)
- func (p *Protocol) RequestChunk(id ChunkID) ([]byte, error)
- func (p *Protocol) SendGoodbye() error
- func (p *Protocol) SendHello(flags uint64) error
- func (p *Protocol) SendMissing(id ChunkID) error
- func (p *Protocol) SendProtocolChunk(id ChunkID, flags uint64, chunk []byte) error
- func (p *Protocol) SendProtocolRequest(id ChunkID, flags uint64) error
- func (p *Protocol) WriteMessage(m Message) error
- type ProtocolServer
- type PruneStore
- type RemoteHTTP
- type RemoteHTTPBase
- func (r *RemoteHTTPBase) Close() error
- func (r *RemoteHTTPBase) GetObject(name string) ([]byte, error)
- func (r *RemoteHTTPBase) SetErrorRetry(n int)
- func (r *RemoteHTTPBase) SetTimeout(timeout time.Duration)
- func (r *RemoteHTTPBase) StoreObject(name string, rdr io.Reader) error
- func (r *RemoteHTTPBase) String() string
- type RemoteHTTPIndex
- type RemoteSSH
- type S3IndexStore
- type S3Store
- func (s S3Store) GetChunk(id ChunkID) ([]byte, error)
- func (s S3Store) HasChunk(id ChunkID) bool
- func (s S3Store) Prune(ctx context.Context, ids map[ChunkID]struct{}) error
- func (s S3Store) RemoveChunk(id ChunkID) error
- func (s S3Store) StoreChunk(id ChunkID, b []byte) error
- func (s S3Store) Upgrade(ctx context.Context) error
- type S3StoreBase
- type SFTPIndexStore
- type SFTPStore
- type SFTPStoreBase
- type Store
- type StoreRouter
- type UntarOptions
- type WriteStore
Constants ¶
const ( // Format identifiers used in archive files CaFormatEntry = 0x1396fabcea5bbb51 CaFormatUser = 0xf453131aaeeaccb3 CaFormatGroup = 0x25eb6ac969396a52 CaFormatXAttr = 0xb8157091f80bc486 CaFormatACLUser = 0x297dc88b2ef12faf CaFormatACLGroup = 0x36f2acb56cb3dd0b CaFormatACLGroupObj = 0x23047110441f38f3 CaFormatACLDefault = 0xfe3eeda6823c8cd0 CaFormatACLDefaultUser = 0xbdf03df9bd010a91 CaFormatACLDefaultGroup = 0xa0cb1168782d1f51 CaFormatFCaps = 0xf7267db0afed0629 CaFormatSELinux = 0x46faf0602fd26c59 CaFormatSymlink = 0x664a6fb6830e0d6c CaFormatDevice = 0xac3dace369dfe643 CaFormatPayload = 0x8b9e1d93d6dcffc9 CaFormatFilename = 0x6dbb6ebcb3161f0b CaFormatGoodbye = 0xdfd35c5e8327c403 CaFormatGoodbyeTailMarker = 0x57446fa533702943 CaFormatIndex = 0x96824d9c7b129ff9 CaFormatTable = 0xe75b9e112f17417d CaFormatTableTailMarker = 0x4b4f050e5549ecd1 // SipHash key used in Goodbye elements to hash the filename. It's 16 bytes, // split into 2x64bit values, upper and lower part of the key CaFormatGoodbyeHashKey0 = 0x8574442b0f1d84b3 CaFormatGoodbyeHashKey1 = 0x2736ed30d1c22ec1 // Format feature flags CaFormatWith16BitUIDs = 0x1 CaFormatWith32BitUIDs = 0x2 CaFormatWithUserNames = 0x4 CaFormatWithSecTime = 0x8 CaFormatWithUSecTime = 0x10 CaFormatWithNSecTime = 0x20 CaFormatWith2SecTime = 0x40 CaFormatWithReadOnly = 0x80 CaFormatWithPermissions = 0x100 CaFormatWithSymlinks = 0x200 CaFormatWithDeviceNodes = 0x400 CaFormatWithFIFOs = 0x800 CaFormatWithSockets = 0x1000 /* DOS file flags */ CaFormatWithFlagHidden = 0x2000 CaFormatWithFlagSystem = 0x4000 CaFormatWithFlagArchive = 0x8000 /* chattr() flags */ CaFormatWithFlagAppend = 0x10000 CaFormatWithFlagNoAtime = 0x20000 CaFormatWithFlagCompr = 0x40000 CaFormatWithFlagNoCow = 0x80000 CaFormatWithFlagNoDump = 0x100000 CaFormatWithFlagDirSync = 0x200000 CaFormatWithFlagImmutable = 0x400000 CaFormatWithFlagSync = 0x800000 CaFormatWithFlagNoComp = 0x1000000 CaFormatWithFlagProjectInherit = 0x2000000 /* btrfs magic */ CaFormatWithSubvolume = 0x4000000 CaFormatWithSubvolumeRO = 0x8000000 /* Extended Attribute metadata */ CaFormatWithXattrs = 0x10000000 CaFormatWithACL = 0x20000000 CaFormatWithSELinux = 0x40000000 CaFormatWithFcaps = 0x80000000 CaFormatSHA512256 = 0x2000000000000000 CaFormatExcludeSubmounts = 0x4000000000000000 CaFormatExcludeNoDump = 0x8000000000000000 // Protocol message types CaProtocolHello = 0x3c71d0948ca5fbee CaProtocolIndex = 0xb32a91dd2b3e27f8 CaProtocolIndexEOF = 0x4f0932f1043718f5 CaProtocolArchive = 0x95d6428a69eddcc5 CaProtocolArchiveEOF = 0x450bef663f24cbad CaProtocolRequest = 0x8ab427e0f89d9210 CaProtocolChunk = 0x5213dd180a84bc8c CaProtocolMissing = 0xd010f9fac82b7b6c CaProtocolGoodbye = 0xad205dbf1a3686c3 CaProtocolAbort = 0xe7d9136b7efea352 // Provided services CaProtocolReadableStore = 0x1 CaProtocolWritableStore = 0x2 CaProtocolReadableIndex = 0x4 CaProtocolWritableIndex = 0x8 CaProtocolReadableArchive = 0x10 CaProtocolWritableArchive = 0x20 // Wanted services CaProtocolPullChunks = 0x40 CaProtocolPullIndex = 0x80 CaProtocolPullArchive = 0x100 CaProtocolPushChunks = 0x200 CaProtocolPushIndex = 0x400 CaProtocolPushIndexChunks = 0x800 CaProtocolPushArchive = 0x1000 // Protocol request flags CaProtocolRequestHighPriority = 1 // Chunk properties CaProtocolChunkCompressed = 1 )
const ChunkerWindowSize = 48
ChunkerWindowSize is the number of bytes in the rolling hash window
Variables ¶
var ( FormatString = map[uint64]string{ CaFormatEntry: "CaFormatEntry", CaFormatUser: "CaFormatUser", CaFormatGroup: "CaFormatGroup", CaFormatXAttr: "CaFormatXAttr", CaFormatACLUser: "CaFormatACLUser", CaFormatACLGroup: "CaFormatACLGroup", CaFormatACLGroupObj: "CaFormatACLGroupObj", CaFormatACLDefault: "CaFormatACLDefault", CaFormatACLDefaultUser: "CaFormatACLDefaultUser", CaFormatACLDefaultGroup: "CaFormatACLDefaultGroup", CaFormatFCaps: "CaFormatFCaps", CaFormatSELinux: "CaFormatSELinux", CaFormatSymlink: "CaFormatSymlink", CaFormatDevice: "CaFormatDevice", CaFormatPayload: "CaFormatPayload", CaFormatFilename: "CaFormatFilename", CaFormatGoodbye: "CaFormatGoodbye", CaFormatGoodbyeTailMarker: "CaFormatGoodbyeTailMarker", CaFormatIndex: "CaFormatIndex", CaFormatTable: "CaFormatTable", CaFormatTableTailMarker: "CaFormatTableTailMarker", } )
var TrustInsecure bool
TrustInsecure determines if invalid certs presented by HTTP stores should be accepted.
Functions ¶
func AssembleFile ¶ added in v0.2.0
func AssembleFile(ctx context.Context, name string, idx Index, s Store, n int, progress func()) error
AssembleFile re-assembles a file based on a list of index chunks. It runs n goroutines, creating one filehandle for the file "name" per goroutine and writes to the file simultaneously. If progress is provided, it'll be called when a chunk has been processed. If the input file exists and is not empty, the algorithm will first confirm if the data matches what is expected and only populate areas that differ from the expected content. This can be used to complete partly written files.
func ChopFile ¶ added in v0.2.0
func ChopFile(ctx context.Context, name string, chunks []IndexChunk, ws WriteStore, n int, pb ProgressBar) error
ChopFile split a file according to a list of chunks obtained from an Index and stores them in the provided store
func Copy ¶ added in v0.2.0
func Copy(ctx context.Context, ids []ChunkID, src Store, dst WriteStore, n int, progress func()) error
Copy reads a list of chunks from the provided src store, and copies the ones not already present in the dst store. The goal is to load chunks from remote store to populate a cache. If progress is provided, it'll be called when a chunk has been processed. Used to draw a progress bar, can be nil.
func Decompress ¶ added in v0.2.0
Decompress a block using the only supported algorithm. If you already have a buffer it can be passed into out and will be used. If out=nil, a buffer will be allocated.
func IndexFromFile ¶ added in v0.2.0
func IndexFromFile(ctx context.Context, name string, n int, min, avg, max uint64, progress func(uint64), ) (Index, ChunkingStats, error)
IndexFromFile chunks a file in parallel and returns an index. It does not store chunks! Each concurrent chunker starts filesize/n bytes apart and splits independently. Each chunk worker tries to sync with it's next neighbor and if successful stops processing letting the next one continue. The main routine reads and assembles a list of (confirmed) chunks from the workers, starting with the first worker. This algorithm wastes some CPU and I/O if the data doesn't contain chunk boundaries, for example if the whole file contains nil bytes. If progress is not nil, it'll be updated with the confirmed chunk position in the file.
func MountIndex ¶ added in v0.2.0
func NewHTTPIndexHandler ¶ added in v0.3.0
func NewHTTPIndexHandler(s IndexStore, writable bool) http.Handler
func SipHash ¶ added in v0.2.0
SipHash is used to calculate the hash in Goodbye element items, hashing the filename.
func Tar ¶ added in v0.2.0
Tar implements the tar command which recursively parses a directory tree, and produces a stream of encoded casync format elements (catar file).
func UnTar ¶ added in v0.2.0
UnTar implements the untar command, decoding a catar file and writing the contained tree to a target directory.
func UnTarIndex ¶ added in v0.2.0
func UnTarIndex(ctx context.Context, dst string, index Index, s Store, n int, opts UntarOptions) error
UnTarIndex takes an index file (of a chunked catar), re-assembles the catar and decodes it on-the-fly into the target directory 'dst'. Uses n gorountines to retrieve and decompress the chunks.
Types ¶
type ArchiveDecoder ¶ added in v0.2.0
type ArchiveDecoder struct {
// contains filtered or unexported fields
}
func NewArchiveDecoder ¶ added in v0.2.0
func NewArchiveDecoder(r io.Reader) ArchiveDecoder
func (*ArchiveDecoder) Next ¶ added in v0.2.0
func (a *ArchiveDecoder) Next() (interface{}, error)
Next returns a node from an archive, or nil if the end is reached. If NodeFile is returned, the caller should read the file body before calling Next() again as that invalidates the reader.
type Cache ¶
type Cache struct {
// contains filtered or unexported fields
}
Cache is used to connect a (typically remote) store with a local store which functions as disk cache. Any request to the cache for a chunk will first be routed to the local store, and if that fails to the slower remote store. Any chunks retrieved from the remote store will be stored in the local one.
func NewCache ¶
func NewCache(s Store, l WriteStore) Cache
NewCache returns a cache router that uses a local store as cache before accessing a (supposedly slower) remote one.
func (Cache) GetChunk ¶
GetChunk first asks the local store for the chunk and then the remote one. If we get a chunk from the remote, it's stored locally too.
type ChunkID ¶
type ChunkID [32]byte
ChunkID is the SHA512/256 in binary encoding
func ChunkIDFromSlice ¶
ChunkIDFromSlice converts a SHA512/256 encoded as byte slice into a ChunkID. It's expected the slice is of the correct length
func ChunkIDFromString ¶
ChunkIDFromString converts a SHA512/56 encoded as string into a ChunkID
type ChunkInvalid ¶ added in v0.2.0
ChunkInvalid means the hash of the chunk content doesn't match its ID
func (ChunkInvalid) Error ¶ added in v0.2.0
func (e ChunkInvalid) Error() string
type ChunkMissing ¶
type ChunkMissing struct {
ID ChunkID
}
ChunkMissing is returned by a store that can't find a requested chunk
func (ChunkMissing) Error ¶
func (e ChunkMissing) Error() string
type ChunkStorage ¶ added in v0.2.0
func NewChunkStorage ¶ added in v0.2.0
func NewChunkStorage(ws WriteStore) *ChunkStorage
Stores chunks passed in the input channel asynchronously. Wait() will wait for until the input channel is closed or until there's an error, in which case it will return it.
func (*ChunkStorage) StoreChunk ¶ added in v0.2.0
func (s *ChunkStorage) StoreChunk(id ChunkID, b []byte) (err error)
Stores a single chunk in a synchronous manner.
type Chunker ¶ added in v0.2.0
type Chunker struct {
// contains filtered or unexported fields
}
func NewChunker ¶ added in v0.2.0
type ChunkingStats ¶ added in v0.2.0
type ConsoleIndexStore ¶ added in v0.3.0
type ConsoleIndexStore struct{}
ConsoleIndexStore is used for writing/reading indexes from STDOUT/STDIN
func NewConsoleIndexStore ¶ added in v0.3.0
func NewConsoleIndexStore() (ConsoleIndexStore, error)
NewConsoleStore creates an instance of an indexStore that reads/writes to and from console
func (ConsoleIndexStore) Close ¶ added in v0.3.0
func (s ConsoleIndexStore) Close() error
func (ConsoleIndexStore) GetIndex ¶ added in v0.3.0
func (s ConsoleIndexStore) GetIndex(string) (i Index, e error)
GetIndex reads an index from STDIN and returns it.
func (ConsoleIndexStore) GetIndexReader ¶ added in v0.3.0
func (s ConsoleIndexStore) GetIndexReader(string) (io.ReadCloser, error)
GetIndexReader returns a reader from STDIN
func (ConsoleIndexStore) StoreIndex ¶ added in v0.3.0
func (s ConsoleIndexStore) StoreIndex(name string, idx Index) error
StoreIndex writes the provided indes to STDOUT. The name is ignored.
func (ConsoleIndexStore) String ¶ added in v0.3.0
func (r ConsoleIndexStore) String() string
type FormatACLDefault ¶ added in v0.2.0
type FormatACLDefault struct { FormatHeader UserObjPermissions uint64 GroupObjPermissions uint64 OtherPermissions uint64 MaskPermissions uint64 }
type FormatACLGroup ¶ added in v0.2.0
type FormatACLGroup struct { FormatHeader GID uint64 Permissions uint64 Name string }
type FormatACLGroupObj ¶ added in v0.2.0
type FormatACLGroupObj struct { FormatHeader Permissions uint64 }
type FormatACLUser ¶ added in v0.2.0
type FormatACLUser struct { FormatHeader UID uint64 Permissions uint64 Name string }
type FormatDecoder ¶ added in v0.2.0
type FormatDecoder struct {
// contains filtered or unexported fields
}
FormatDecoder is used to parse and break up a stream of casync format elements found in archives or index files.
func NewFormatDecoder ¶ added in v0.2.0
func NewFormatDecoder(r io.Reader) FormatDecoder
func (*FormatDecoder) Next ¶ added in v0.2.0
func (d *FormatDecoder) Next() (interface{}, error)
Next returns the next format element from the stream. If an element contains a reader, that reader should be used before any subsequent calls as it'll be invalidated then. Returns nil when the end is reached.
type FormatDevice ¶ added in v0.2.0
type FormatDevice struct { FormatHeader Major uint64 Minor uint64 }
type FormatEncoder ¶ added in v0.2.0
type FormatEncoder struct {
// contains filtered or unexported fields
}
FormatEncoder takes casync format elements and encodes them into a stream.
func NewFormatEncoder ¶ added in v0.2.0
func NewFormatEncoder(w io.Writer) FormatEncoder
func (*FormatEncoder) Encode ¶ added in v0.2.0
func (e *FormatEncoder) Encode(v interface{}) (int64, error)
type FormatEntry ¶ added in v0.2.0
type FormatFCaps ¶ added in v0.2.0
type FormatFCaps struct { FormatHeader Data []byte }
type FormatFilename ¶ added in v0.2.0
type FormatFilename struct { FormatHeader Name string }
type FormatGoodbye ¶ added in v0.2.0
type FormatGoodbye struct { FormatHeader Items []FormatGoodbyeItem }
type FormatGoodbyeItem ¶ added in v0.2.0
type FormatGroup ¶ added in v0.2.0
type FormatGroup struct { FormatHeader Name string }
type FormatHeader ¶ added in v0.2.0
type FormatIndex ¶ added in v0.2.0
type FormatIndex struct { FormatHeader FeatureFlags uint64 ChunkSizeMin uint64 ChunkSizeAvg uint64 ChunkSizeMax uint64 }
type FormatPayload ¶ added in v0.2.0
type FormatPayload struct { FormatHeader Data io.Reader }
type FormatSELinux ¶ added in v0.2.0
type FormatSELinux struct { FormatHeader Label string }
type FormatSymlink ¶ added in v0.2.0
type FormatSymlink struct { FormatHeader Target string }
type FormatTable ¶ added in v0.2.0
type FormatTable struct { FormatHeader Items []FormatTableItem }
type FormatTableItem ¶ added in v0.2.0
type FormatUser ¶ added in v0.2.0
type FormatUser struct { FormatHeader Name string }
type FormatXAttr ¶ added in v0.2.0
type FormatXAttr struct { FormatHeader NameAndValue string }
type HTTPHandler ¶ added in v0.2.0
type HTTPHandler struct { HTTPHandlerBase // contains filtered or unexported fields }
func (HTTPHandler) ServeHTTP ¶ added in v0.2.0
func (h HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
type HTTPHandlerBase ¶ added in v0.3.0
type HTTPHandlerBase struct {
// contains filtered or unexported fields
}
type HTTPIndexHandler ¶ added in v0.3.0
type HTTPIndexHandler struct { HTTPHandlerBase // contains filtered or unexported fields }
func (HTTPIndexHandler) ServeHTTP ¶ added in v0.3.0
func (h HTTPIndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
type Hash ¶ added in v0.2.0
type Hash struct {
// contains filtered or unexported fields
}
Hash implements the rolling hash algorithm used to find chunk bounaries in a stream of bytes.
func NewHash ¶ added in v0.2.0
NewHash returns a new instance of a hash. size determines the length of the hash window used and the discriminator is used to find the boundary.
func (*Hash) Initialize ¶ added in v0.2.0
Initialize the window used for the rolling hash calculation. The size of the slice must match the window size
func (*Hash) IsBoundary ¶ added in v0.2.0
IsBoundary returns true if the discriminator and hash match to signal a chunk boundary has been reached
type Index ¶
type Index struct { Index FormatIndex Chunks []IndexChunk }
Index represents the content of an index file
func ChunkStream ¶ added in v0.2.0
ChunkStream splits up a blob into chunks using the provided chunker (single stream), populates a store with the chunks and returns an index. Hashing and compression is performed in n goroutines while the hashing algorithm is performed serially.
func IndexFromReader ¶ added in v0.2.0
IndexFromReader parses a caibx structure (from a reader) and returns a populated Caibx object
type IndexChunk ¶ added in v0.2.0
IndexChunk is a table entry in an index file containing the chunk ID (SHA256) Similar to an FormatTableItem but with Start and Size instead of just offset to make it easier to use throughout the application.
type IndexMountFS ¶ added in v0.2.0
type IndexMountFS struct { FName string // File name in the mountpoint Idx Index // Index of the blob Store Store pathfs.FileSystem }
IndexMountFS is used to FUSE mount an index file (as a blob, not an archive). It present a single file underneath the mountpoint.
func NewIndexMountFS ¶ added in v0.2.0
func NewIndexMountFS(idx Index, name string, s Store) *IndexMountFS
type IndexMountFile ¶ added in v0.2.0
IndexMountFile represents a (read-only) file handle on a blob in a FUSE mounted filesystem
func NewIndexMountFile ¶ added in v0.2.0
func NewIndexMountFile(idx Index, s Store) *IndexMountFile
func (*IndexMountFile) GetAttr ¶ added in v0.2.0
func (f *IndexMountFile) GetAttr(out *fuse.Attr) fuse.Status
func (*IndexMountFile) Read ¶ added in v0.2.0
func (f *IndexMountFile) Read(dest []byte, off int64) (fuse.ReadResult, fuse.Status)
type IndexPos ¶ added in v0.2.0
type IndexPos struct { Store Store Index Index Length int64 // total length of file // contains filtered or unexported fields }
IndexPos represents a position inside an index file, to permit a seeking reader
func NewIndexReadSeeker ¶ added in v0.2.0
type IndexStore ¶ added in v0.3.0
type IndexWriteStore ¶ added in v0.3.0
type IndexWriteStore interface { IndexStore StoreIndex(name string, idx Index) error }
type Interrupted ¶ added in v0.2.0
type Interrupted struct{}
Interrupted is returned when a user interrupted a long-running operation, for example by pressing Ctrl+C
func (Interrupted) Error ¶ added in v0.2.0
func (e Interrupted) Error() string
type InvalidFormat ¶ added in v0.2.0
type InvalidFormat struct {
Msg string
}
InvalidFormat is returned when an error occurred when parsing an archive file
func (InvalidFormat) Error ¶ added in v0.2.0
func (e InvalidFormat) Error() string
type LocalIndexStore ¶ added in v0.3.0
type LocalIndexStore struct {
Path string
}
LocalIndexStore is used to read/write index files on local disk
func NewLocaIndexStore ¶ added in v0.3.0
func NewLocaIndexStore(path string) (LocalIndexStore, error)
NewLocalStore creates an instance of a local castore, it only checks presence of the store
func (LocalIndexStore) Close ¶ added in v0.3.0
func (s LocalIndexStore) Close() error
func (LocalIndexStore) GetIndex ¶ added in v0.3.0
func (s LocalIndexStore) GetIndex(name string) (i Index, e error)
GetIndex returns an Index structure from the store
func (LocalIndexStore) GetIndexReader ¶ added in v0.3.0
func (s LocalIndexStore) GetIndexReader(name string) (rdr io.ReadCloser, e error)
GetIndexReader returns a reader of an index file in the store or an error if the specified index file does not exist.
func (LocalIndexStore) StoreIndex ¶ added in v0.3.0
func (s LocalIndexStore) StoreIndex(name string, idx Index) error
StoreIndex stores an index in the index store with the given name.
func (LocalIndexStore) String ¶ added in v0.3.0
func (r LocalIndexStore) String() string
type LocalStore ¶
type LocalStore struct { Base string // When accessing chunks, should mtime be updated? Useful when this is // a cache. Old chunks can be identified and removed from the store that way UpdateTimes bool }
LocalStore casync store
func NewLocalStore ¶
func NewLocalStore(dir string) (LocalStore, error)
NewLocalStore creates an instance of a local castore, it only checks presence of the store
func (LocalStore) Close ¶ added in v0.2.0
func (s LocalStore) Close() error
func (LocalStore) GetChunk ¶
func (s LocalStore) GetChunk(id ChunkID) ([]byte, error)
GetChunk reads and returns one (compressed!) chunk from the store
func (LocalStore) HasChunk ¶ added in v0.2.0
func (s LocalStore) HasChunk(id ChunkID) bool
HasChunk returns true if the chunk is in the store
func (LocalStore) Prune ¶ added in v0.2.0
func (s LocalStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error
Prune removes any chunks from the store that are not contained in a list of chunks
func (LocalStore) RemoveChunk ¶ added in v0.2.0
func (s LocalStore) RemoveChunk(id ChunkID) error
RemoveChunk deletes a chunk, typically an invalid one, from the filesystem. Used when verifying and repairing caches.
func (LocalStore) StoreChunk ¶
func (s LocalStore) StoreChunk(id ChunkID, b []byte) error
StoreChunk adds a new chunk to the store
func (LocalStore) String ¶ added in v0.2.0
func (s LocalStore) String() string
type NoSuchObject ¶ added in v0.3.0
type NoSuchObject struct {
// contains filtered or unexported fields
}
NoSuchObject is returned by a store that can't find a requested object
func (NoSuchObject) Error ¶ added in v0.3.0
func (e NoSuchObject) Error() string
type NodeDevice ¶ added in v0.2.0
type NodeDirectory ¶ added in v0.2.0
type NodeSymlink ¶ added in v0.2.0
type NullChunk ¶ added in v0.2.0
NullChunk is used in places where it's common to see requests for chunks containing only 0-bytes. When a chunked file has large areas of 0-bytes, the chunking algorithm does not produce split boundaries, which results in many chunks of 0-bytes of size MAX (max chunk size). The NullChunk can be used to make requesting this kind of chunk more efficient by serving it from memory, rather that request it from disk or network and decompress it repeatedly.
func NewNullChunk ¶ added in v0.2.0
NewNullChunk returns an initialized chunk consisting of 0-bytes of 'size' which must mach the max size used in the index to be effective
type ProgressBar ¶ added in v0.2.0
type Protocol ¶ added in v0.2.0
type Protocol struct {
// contains filtered or unexported fields
}
Protocol handles the casync protocol when using remote stores via SSH
func NewProtocol ¶ added in v0.2.0
NewProtocol creates a new casync protocol handler
func StartProtocol ¶ added in v0.2.0
StartProtocol initiates a connection to the remote store server using the value in CASYNC_SSH_PATH (default "ssh"), and executes the command in CASYNC_REMOTE_PATH (default "casync"). It then performs the HELLO handshake to initialze the connection
func (*Protocol) Initialize ¶ added in v0.2.0
Initialize exchanges HELLOs with the other side to start a protocol session. Returns the (capability) flags provided by the other party.
func (*Protocol) ReadMessage ¶ added in v0.2.0
ReadMessage reads a generic message from the other end, verifies the length, extracts the type and returns the message body as byte slice
func (*Protocol) RecvHello ¶ added in v0.2.0
RecvHello waits for the server to send a HELLO, fails if anything else is received. Returns the flags provided by the server.
func (*Protocol) RequestChunk ¶ added in v0.2.0
RequestChunk sends a request for a specific chunk to the server, waits for the response and returns the bytes in the chunk. Returns an error if the server reports the chunk as missing
func (*Protocol) SendGoodbye ¶ added in v0.2.0
SendGoodbye tells the other side to terminate gracefully
func (*Protocol) SendHello ¶ added in v0.2.0
SendHello sends a HELLO message to the server, with the flags signaling which service is being requested from it.
func (*Protocol) SendMissing ¶ added in v0.2.0
SendMissing tells the client that the requested chunk is not available
func (*Protocol) SendProtocolChunk ¶ added in v0.2.0
SendProtocolChunk responds to a request with the content of a chunk
func (*Protocol) SendProtocolRequest ¶ added in v0.2.0
SendProtocolRequest requests a chunk from a server
func (*Protocol) WriteMessage ¶ added in v0.2.0
WriteMessage sends a generic message to the server
type ProtocolServer ¶ added in v0.2.0
type ProtocolServer struct {
// contains filtered or unexported fields
}
ProtocolServer serves up chunks from a local store using the casync protocol
func NewProtocolServer ¶ added in v0.2.0
NewProtocolServer returns an initialized server that can serve chunks from a chunk store via the casync protocol
type PruneStore ¶ added in v0.2.0
type PruneStore interface { WriteStore Prune(ctx context.Context, ids map[ChunkID]struct{}) error }
PruneStore is a store that supports pruning of chunks
type RemoteHTTP ¶ added in v0.2.0
type RemoteHTTP struct {
*RemoteHTTPBase
}
RemoteHTTP is a remote casync store accessed via HTTP.
func NewRemoteHTTPStore ¶ added in v0.2.0
NewRemoteHTTPStore initializes a new store that pulls chunks via HTTP(S) from a remote web server. n defines the size of idle connections allowed.
func (*RemoteHTTP) GetChunk ¶ added in v0.2.0
func (r *RemoteHTTP) GetChunk(id ChunkID) ([]byte, error)
GetChunk reads and returns one (compressed!) chunk from the store
func (*RemoteHTTP) HasChunk ¶ added in v0.2.0
func (r *RemoteHTTP) HasChunk(id ChunkID) bool
HasChunk returns true if the chunk is in the store
func (*RemoteHTTP) StoreChunk ¶ added in v0.2.0
func (r *RemoteHTTP) StoreChunk(id ChunkID, b []byte) error
StoreChunk adds a new chunk to the store
type RemoteHTTPBase ¶ added in v0.3.0
type RemoteHTTPBase struct {
// contains filtered or unexported fields
}
RemoteHTTP is a remote casync store accessed via HTTP.
func NewRemoteHTTPStoreBase ¶ added in v0.3.0
func (*RemoteHTTPBase) Close ¶ added in v0.3.0
func (r *RemoteHTTPBase) Close() error
func (*RemoteHTTPBase) GetObject ¶ added in v0.3.0
func (r *RemoteHTTPBase) GetObject(name string) ([]byte, error)
GetObject reads and returns an object in the form of []byte from the store
func (*RemoteHTTPBase) SetErrorRetry ¶ added in v0.3.0
func (r *RemoteHTTPBase) SetErrorRetry(n int)
SetErrorRetry defines how many HTTP errors are retried. This can be useful when dealing with unreliable networks that can timeout or where errors are transient.
func (*RemoteHTTPBase) SetTimeout ¶ added in v0.3.0
func (r *RemoteHTTPBase) SetTimeout(timeout time.Duration)
SetTimeout configures the timeout on the HTTP client for all requests
func (*RemoteHTTPBase) StoreObject ¶ added in v0.3.0
func (r *RemoteHTTPBase) StoreObject(name string, rdr io.Reader) error
StoreObject stores an object to the store.
func (*RemoteHTTPBase) String ¶ added in v0.3.0
func (r *RemoteHTTPBase) String() string
type RemoteHTTPIndex ¶ added in v0.3.0
type RemoteHTTPIndex struct {
*RemoteHTTPBase
}
RemoteHTTP is a remote index store accessed via HTTP.
func NewRemoteHTTPIndexStore ¶ added in v0.3.0
func NewRemoteHTTPIndexStore(location *url.URL, n int, cert string, key string) (*RemoteHTTPIndex, error)
NewRemoteHTTPStore initializes a new store that pulls the specified index file via HTTP(S) from a remote web server.
func (*RemoteHTTPIndex) GetIndex ¶ added in v0.3.0
func (r *RemoteHTTPIndex) GetIndex(name string) (i Index, e error)
GetIndex returns an Index structure from the store
func (RemoteHTTPIndex) GetIndexReader ¶ added in v0.3.0
func (r RemoteHTTPIndex) GetIndexReader(name string) (rdr io.ReadCloser, e error)
Get and Index Reader from an HTTP store, returns an error if the specified index file does not exist.
func (*RemoteHTTPIndex) StoreIndex ¶ added in v0.3.0
func (r *RemoteHTTPIndex) StoreIndex(name string, idx Index) error
StoreChunk adds a new chunk to the store
type RemoteSSH ¶
type RemoteSSH struct {
// contains filtered or unexported fields
}
RemoteSSH is a remote casync store accessed via SSH. Supports running multiple sessions to improve throughput.
func NewRemoteSSHStore ¶
NewRemoteSSHStore establishes up to n connections with a casync chunk server
func (*RemoteSSH) GetChunk ¶
GetChunk requests a chunk from the server and returns a (compressed) one. It uses any of the n sessions this store maintains in its pool. Blocks until one session becomes available
type S3IndexStore ¶ added in v0.3.0
type S3IndexStore struct {
S3StoreBase
}
S3Store is a read-write store with S3 backing
func NewS3IndexStore ¶ added in v0.3.0
func NewS3IndexStore(location *url.URL, s3Creds *credentials.Credentials, region string) (s S3IndexStore, e error)
NewS3Store creates an index store with S3 backing. The URL should be provided like this: s3+http://host:port/bucket Credentials are passed in via the environment variables S3_ACCESS_KEY and S3S3_SECRET_KEY, or via the desync config file.
func (S3IndexStore) GetIndex ¶ added in v0.3.0
func (s S3IndexStore) GetIndex(name string) (i Index, e error)
GetIndex returns an Index structure from the store
func (S3IndexStore) GetIndexReader ¶ added in v0.3.0
func (s S3IndexStore) GetIndexReader(name string) (r io.ReadCloser, e error)
Get and Index Reader from an S3 store, returns an error if the specified index file does not exist.
func (S3IndexStore) StoreIndex ¶ added in v0.3.0
func (s S3IndexStore) StoreIndex(name string, idx Index) error
StoreIndex writes the index file to the S3 store
type S3Store ¶ added in v0.2.0
type S3Store struct {
S3StoreBase
}
S3Store is a read-write store with S3 backing
func NewS3Store ¶ added in v0.2.0
func NewS3Store(location *url.URL, s3Creds *credentials.Credentials, region string) (s S3Store, e error)
NewS3Store creates a chunk store with S3 backing. The URL should be provided like this: s3+http://host:port/bucket Credentials are passed in via the environment variables S3_ACCESS_KEY and S3S3_SECRET_KEY, or via the desync config file.
func (S3Store) GetChunk ¶ added in v0.2.0
GetChunk reads and returns one (compressed!) chunk from the store
func (S3Store) Prune ¶ added in v0.2.0
Prune removes any chunks from the store that are not contained in a list (map)
func (S3Store) RemoveChunk ¶ added in v0.2.0
RemoveChunk deletes a chunk, typically an invalid one, from the filesystem. Used when verifying and repairing caches.
func (S3Store) StoreChunk ¶ added in v0.2.0
StoreChunk adds a new chunk to the store
func (S3Store) Upgrade ¶ added in v0.2.0
Upgrade converts the storage layout in S3 from the old format (just a flat layout) to the current layout which prefixes every chunk with the first 4 characters of the checksum as well as a .cacnk extension. This aligns the layout with that of local stores and allows the used of sync tools outside of this tool, local stores could be copied into S3 for example.
type S3StoreBase ¶ added in v0.3.0
type S3StoreBase struct { Location string // contains filtered or unexported fields }
S3Store is a read-write store with S3 backing
func NewS3StoreBase ¶ added in v0.3.0
func NewS3StoreBase(u *url.URL, s3Creds *credentials.Credentials, region string) (S3StoreBase, error)
func (S3StoreBase) Close ¶ added in v0.3.0
func (s S3StoreBase) Close() error
func (S3StoreBase) String ¶ added in v0.3.0
func (s S3StoreBase) String() string
type SFTPIndexStore ¶ added in v0.3.0
type SFTPIndexStore struct {
*SFTPStoreBase
}
func NewSFTPIndexStore ¶ added in v0.3.0
func NewSFTPIndexStore(location *url.URL) (*SFTPIndexStore, error)
NewSFTPIndexStore establishes up to n connections with a casync index server
func (*SFTPIndexStore) GetIndex ¶ added in v0.3.0
func (s *SFTPIndexStore) GetIndex(name string) (i Index, e error)
Get and Index from an SFTP store, returns an error if the specified index file does not exist.
func (*SFTPIndexStore) GetIndexReader ¶ added in v0.3.0
func (s *SFTPIndexStore) GetIndexReader(name string) (r io.ReadCloser, e error)
Get and Index Reader from an SFTP store, returns an error if the specified index file does not exist.
func (*SFTPIndexStore) StoreIndex ¶ added in v0.3.0
func (s *SFTPIndexStore) StoreIndex(name string, idx Index) error
StoreChunk adds a new chunk to the store
type SFTPStore ¶ added in v0.2.0
type SFTPStore struct {
*SFTPStoreBase
}
func NewSFTPStore ¶ added in v0.2.0
NewRemoteSSHStore establishes up to n connections with a casync chunk server
func (*SFTPStore) GetChunk ¶ added in v0.2.0
Get a chunk from an SFTP store, returns ChunkMissing if the file does not exist
func (*SFTPStore) Prune ¶ added in v0.2.0
Prune removes any chunks from the store that are not contained in a list of chunks
func (*SFTPStore) RemoveChunk ¶ added in v0.2.0
RemoveChunk deletes a chunk, typically an invalid one, from the filesystem. Used when verifying and repairing caches.
type SFTPStoreBase ¶ added in v0.3.0
type SFTPStoreBase struct {
// contains filtered or unexported fields
}
SFTPStore is a remote store that uses SFTP over SSH to access chunks
func (*SFTPStoreBase) Close ¶ added in v0.3.0
func (s *SFTPStoreBase) Close() error
Close terminates all client connections
func (*SFTPStoreBase) StoreObject ¶ added in v0.3.0
func (s *SFTPStoreBase) StoreObject(name string, r io.Reader) error
StoreChunk adds a new chunk to the store
func (*SFTPStoreBase) String ¶ added in v0.3.0
func (s *SFTPStoreBase) String() string
type Store ¶
type Store interface { GetChunk(id ChunkID) ([]byte, error) HasChunk(id ChunkID) bool io.Closer fmt.Stringer }
Store is a generic interface implemented by read-only stores, like SSH or HTTP remote stores currently.
type StoreRouter ¶ added in v0.2.0
type StoreRouter struct {
Stores []Store
}
StoreRouter is used to route requests to multiple stores. When a chunk is requested from the router, it'll query the first store and if that returns ChunkMissing, it'll move on to the next.
func NewStoreRouter ¶ added in v0.2.0
func NewStoreRouter(stores ...Store) StoreRouter
NewStoreRouter returns an initialized router
func (StoreRouter) Close ¶ added in v0.2.0
func (r StoreRouter) Close() error
Close calls the Close() method on every store in the router. Returns only the first error encountered.
func (StoreRouter) GetChunk ¶ added in v0.2.0
func (r StoreRouter) GetChunk(id ChunkID) ([]byte, error)
GetChunk queries the available stores in order and moves to the next if it gets a ChunkMissing. Fails if any store returns a different error.
func (StoreRouter) HasChunk ¶ added in v0.2.0
func (r StoreRouter) HasChunk(id ChunkID) bool
HasChunk returns true if one of the containing stores has the chunk. It goes through the stores in order and returns as soon as the chunk is found.
func (StoreRouter) String ¶ added in v0.2.0
func (r StoreRouter) String() string
type UntarOptions ¶ added in v0.2.0
UntarOptions are used to influence the behaviour of untar
Source Files
¶
- archive.go
- assemble.go
- cache.go
- chop.go
- chunker.go
- chunkstorage.go
- compress.go
- consoleindex.go
- const.go
- copy.go
- doc.go
- errors.go
- format.go
- httphandler.go
- httphandlerbase.go
- httpindexhandler.go
- index.go
- local.go
- localindex.go
- make.go
- mount-index.go
- nullchunk.go
- progressbar.go
- protocol.go
- protocolserver.go
- reader.go
- readseeker.go
- remotehttp.go
- remotehttpindex.go
- remotessh.go
- s3.go
- s3index.go
- sftp.go
- sftpindex.go
- sip.go
- store.go
- storerouter.go
- tar.go
- types.go
- untar.go
- verifyindex.go
- writer.go