loadtest

package
v0.0.0-...-10e72c1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 28, 2025 License: Apache-2.0 Imports: 28 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var ErrRetry = errors.New("retry")

Functions

func MonotonicallyIncreasingNextLeaf

func MonotonicallyIncreasingNextLeaf() func(uint64) uint64

MonotonicallyIncreasingNextLeaf returns a function that always wants the next available leaf after the one it previously fetched. It starts at leaf 0.

func NewController

func NewController(h *Hammer, a *HammerAnalyser) *tuiController

func NewLogClients

func NewLogClients(readLogURLs, writeLogURLs []string, opts ClientOpts) (LogReader, LeafWriter, error)

NewLogClients returns a fetcher and a writer that will read and write leaves to all logs in the `log_url` flag set.

func RandomNextLeaf

func RandomNextLeaf() func(uint64) uint64

RandomNextLeaf returns a function that fetches a random leaf available in the tree.

Types

type ClientOpts

type ClientOpts struct {
	BearerToken      string
	BearerTokenWrite string

	Client *http.Client
}

type Hammer

type Hammer struct {
	// contains filtered or unexported fields
}

Hammer is responsible for coordinating the operations against the log in the form of write and read operations. The work of analysing the results of hammering should live outside of this class.

func NewHammer

func NewHammer(tracker *client.LogStateTracker, f client.EntryBundleFetcherFunc, w LeafWriter, gen func() []byte, seqLeafChan chan<- LeafTime, errChan chan<- error, opts HammerOpts) *Hammer

func (*Hammer) Run

func (h *Hammer) Run(ctx context.Context)

type HammerAnalyser

type HammerAnalyser struct {
	SeqLeafChan chan LeafTime
	ErrChan     chan error

	QueueTime       *movingaverage.ConcurrentMovingAverage
	IntegrationTime *movingaverage.ConcurrentMovingAverage
	// contains filtered or unexported fields
}

HammerAnalyser is responsible for measuring and interpreting the result of hammering.

func NewHammerAnalyser

func NewHammerAnalyser(treeSizeFn func() uint64) *HammerAnalyser

func (*HammerAnalyser) Run

func (a *HammerAnalyser) Run(ctx context.Context)

type HammerOpts

type HammerOpts struct {
	MaxReadOpsPerSecond  int
	MaxWriteOpsPerSecond int

	NumReadersRandom int
	NumReadersFull   int
	NumWriters       int
	NumMMDVerifiers  int
	MMDDuration      time.Duration
}

type LeafMMD

type LeafMMD struct {
	// contains filtered or unexported fields
}

LeafMMD records the generated leaf in the request and the timestamp in the response.

This is used to verify the MMD violation by the performing the inclusion proof.

type LeafReader

type LeafReader struct {
	// contains filtered or unexported fields
}

LeafReader reads leaves from the tree. This class is not thread safe.

func NewLeafReader

func NewLeafReader(tracker *client.LogStateTracker, f client.EntryBundleFetcherFunc, next func(uint64) uint64, throttle <-chan bool, errChan chan<- error) *LeafReader

NewLeafReader creates a LeafReader. The next function provides a strategy for which leaves will be read. Custom implementations can be passed, or use RandomNextLeaf or MonotonicallyIncreasingNextLeaf.

func (*LeafReader) Kill

func (r *LeafReader) Kill()

Kills this leaf reader at the next opportune moment. This function may return before the reader is dead.

func (*LeafReader) Run

func (r *LeafReader) Run(ctx context.Context)

Run runs the log reader. This should be called in a goroutine.

type LeafTime

type LeafTime struct {
	Index      uint64
	QueuedAt   time.Time
	AssignedAt time.Time
}

LeafTime records the time at which a leaf was assigned the given index.

This is used when sampling leaves which are added in order to later calculate how long it took to for them to become integrated.

type LeafWriter

type LeafWriter func(ctx context.Context, data []byte) (index uint64, timestamp uint64, err error)

LeafWriter is the signature of a function which can write arbitrary data to a log. The data to be written is provided, and the implementation must return the sequence number at which this data will be found in the log and the timestamp of the SCT issued for the data, or an error.

type LogReader

type LogReader interface {
	ReadCheckpoint(ctx context.Context) ([]byte, error)

	ReadTile(ctx context.Context, l, i uint64, p uint8) ([]byte, error)

	ReadEntryBundle(ctx context.Context, i uint64, p uint8) ([]byte, error)
}

type LogWriter

type LogWriter struct {
	// contains filtered or unexported fields
}

LogWriter writes new leaves to the log that are generated by `gen`.

func NewLogWriter

func NewLogWriter(writer LeafWriter, gen func() []byte, throttle <-chan bool, errChan chan<- error, leafSampleChan chan<- LeafTime, leafMMDChan chan<- LeafMMD) *LogWriter

NewLogWriter creates a LogWriter. u is the URL of the write endpoint for the log. gen is a function that generates new leaves to add.

func (*LogWriter) Kill

func (w *LogWriter) Kill()

Kills this writer at the next opportune moment. This function may return before the writer is dead.

func (*LogWriter) Run

func (w *LogWriter) Run(ctx context.Context)

Run runs the log writer. This should be called in a goroutine.

type MMDVerifier

type MMDVerifier struct {
	// contains filtered or unexported fields
}

MMDVerifier verifies the signed timestamp against the MMD policy for newly added entries by performing inclusion proof.

func NewMMDVerifier

func NewMMDVerifier(tracker *client.LogStateTracker, mmdDuration time.Duration, errChan chan<- error, leafMMDChan <-chan LeafMMD) *MMDVerifier

NewMMDVerifier creates a MMDVerifier.

func (*MMDVerifier) Kill

func (v *MMDVerifier) Kill()

Kills this verifier at the next opportune moment. This function may return before the verifier is dead.

func (*MMDVerifier) Run

func (v *MMDVerifier) Run(ctx context.Context)

Run runs the MMD verifier. This should be called in a goroutine.

type Throttle

type Throttle struct {
	TokenChan chan bool
	// contains filtered or unexported fields
}

func NewThrottle

func NewThrottle(opsPerSecond int) *Throttle

func (*Throttle) Decrease

func (t *Throttle) Decrease()

func (*Throttle) Increase

func (t *Throttle) Increase()

func (*Throttle) Run

func (t *Throttle) Run(ctx context.Context)

func (*Throttle) String

func (t *Throttle) String() string

type Worker

type Worker interface {
	Run(ctx context.Context)
	Kill()
}

type WorkerPool

type WorkerPool struct {
	// contains filtered or unexported fields
}

WorkerPool contains a collection of _running_ workers.

func NewWorkerPool

func NewWorkerPool(factory func() Worker) WorkerPool

NewWorkerPool creates a simple pool of workers.

This works well enough for the simple task we ask of it at the moment. If we find ourselves adding more features to this, consider swapping it for a library such as https://github.com/alitto/pond.

func (*WorkerPool) Grow

func (p *WorkerPool) Grow(ctx context.Context)

func (*WorkerPool) Shrink

func (p *WorkerPool) Shrink(ctx context.Context)

func (*WorkerPool) Size

func (p *WorkerPool) Size() int

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL