Documentation
¶
Index ¶
- Variables
- func CheckFlinkOperatorRelations(operator []*flinkpb.FlinkOperator) (err error)
- func CreateRandomString(len int) string
- func FlinkJobFree(req *request.JobFree) (resp response.JobFree, err error)
- func GetSqlColumnDefine(sqlColumns []*flinkpb.SqlColumnType, timeColumns []*flinkpb.SqlTimeColumnType) (define string)
- func In(haystack interface{}, needle interface{}) bool
- type EngineClient
- type JobdeveloperExecutor
- func (ex *JobdeveloperExecutor) FlinkNodeRelations(ctx context.Context) (resp response.NodeRelations, err error)
- func (ex *JobdeveloperExecutor) JobFree(ctx context.Context, req *request.JobFree) (resp response.JobFree, err error)
- func (ex *JobdeveloperExecutor) JobParser(ctx context.Context, req *request.JobParser) (resp response.JobParser, err error)
- type OperatorRelation
- type ResourceClient
- type SourceClient
- func (s *SourceClient) DescribeSourceManager(ctx context.Context, ID string) (sourceType model.DataSource_Type, url *datasourcepb.DataSourceURL, err error)
- func (s *SourceClient) DescribeSourceTable(ctx context.Context, ID string) (sourceID string, tableName string, tableSchema *flinkpb.TableSchema, err error)
- type SqlStack
- type UdfClient
Constants ¶
This section is empty.
Variables ¶
Functions ¶
func CheckFlinkOperatorRelations ¶
func CheckFlinkOperatorRelations(operator []*flinkpb.FlinkOperator) (err error)
func CreateRandomString ¶
func GetSqlColumnDefine ¶
func GetSqlColumnDefine(sqlColumns []*flinkpb.SqlColumnType, timeColumns []*flinkpb.SqlTimeColumnType) (define string)
Types ¶
type EngineClient ¶
type EngineClient struct {
// contains filtered or unexported fields
}
func NewEngineClient ¶
func NewEngineClient(conn *grpcwrap.ClientConn) (c EngineClient, err error)
type JobdeveloperExecutor ¶
type JobdeveloperExecutor struct { FlinkHome string HadoopConf string FlinkExecuteJars string // contains filtered or unexported fields }
func NewJobDeveloperExecutor ¶
func NewJobDeveloperExecutor(l *glog.Logger, eClient EngineClient, sClient SourceClient, uClient UdfClient, rClient ResourceClient, flinkHome string, hadoopConf string, flinkExecuteJars string) *JobdeveloperExecutor
func (*JobdeveloperExecutor) FlinkNodeRelations ¶
func (ex *JobdeveloperExecutor) FlinkNodeRelations(ctx context.Context) (resp response.NodeRelations, err error)
type OperatorRelation ¶
type OperatorRelation struct { OperatorType flinkpb.FlinkOperator_Type `json:"nodetype"` AllowUpstream []flinkpb.FlinkOperator_Type `json:"allowupstream"` AllowDownStream []flinkpb.FlinkOperator_Type `json:"allowdownstream"` }
func GetOperatorRelation ¶
func GetOperatorRelation(nodeType flinkpb.FlinkOperator_Type) (nodeRelation OperatorRelation, jsonRelation string, err error)
type ResourceClient ¶
type ResourceClient struct {
// contains filtered or unexported fields
}
func NewFileClient ¶
func NewFileClient(conn *grpcwrap.ClientConn) (c ResourceClient, err error)
func (*ResourceClient) GetFileById ¶
type SourceClient ¶
type SourceClient struct {
// contains filtered or unexported fields
}
func NewSourceClient ¶
func NewSourceClient(conn *grpcwrap.ClientConn) (c SourceClient, err error)
func (*SourceClient) DescribeSourceManager ¶
func (s *SourceClient) DescribeSourceManager(ctx context.Context, ID string) (sourceType model.DataSource_Type, url *datasourcepb.DataSourceURL, err error)
func (*SourceClient) DescribeSourceTable ¶
func (s *SourceClient) DescribeSourceTable(ctx context.Context, ID string) (sourceID string, tableName string, tableSchema *flinkpb.TableSchema, err error)
type UdfClient ¶
type UdfClient struct {
// contains filtered or unexported fields
}
func NewUdfClient ¶
func NewUdfClient(conn *grpcwrap.ClientConn) (c UdfClient, err error)
Click to show internal directories.
Click to hide internal directories.