Documentation
¶
Index ¶
- Constants
- Variables
- func CallID() string
- func ChatCompletionID() string
- func ConvertEmbeddingsRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func ConvertImageRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func ConvertRerankRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (string, http.Header, io.Reader, error)
- func ConvertTTSRequest(meta *meta.Meta, req *http.Request, defaultVoice string) (string, http.Header, io.Reader, error)
- func ConvertTextRequest(meta *meta.Meta, req *http.Request, doNotPatchStreamOptionsIncludeUsage bool) (string, http.Header, io.Reader, error)
- func CountTokenInput(input any, model string) int64
- func CountTokenMessages(messages []*model.Message, model string) int64
- func CountTokenText(text string, model string) int64
- func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *model.Usage, err *relaymodel.ErrorWithStatusCode)
- func ErrorHanlder(resp *http.Response) *model.ErrorWithStatusCode
- func ErrorWrapper(err error, code any, statusCode int) *model.ErrorWithStatusCode
- func ErrorWrapperWithMessage(message string, code any, statusCode int) *model.ErrorWithStatusCode
- func GetBalance(channel *model.Channel) (float64, error)
- func GetFullRequestURL(baseURL string, requestURL string) string
- func GetScannerBuffer() *[]byte
- func GetUsageOrChatChoicesResponseFromNode(node *ast.Node) (*relaymodel.Usage, []*relaymodel.ChatCompletionsStreamResponseChoice, error)
- func GetUsageOrChoicesResponseFromNode(node *ast.Node) (*relaymodel.Usage, []*relaymodel.TextResponseChoice, error)
- func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response, preHandler PreHandler) (*model.Usage, *relaymodel.ErrorWithStatusCode)
- func ImageHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
- func ModerationsHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
- func PutScannerBuffer(buf *[]byte)
- func RerankHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
- func ResponseText2Usage(responseText string, modeName string, promptTokens int64) *model.Usage
- func STTHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
- func SplitThink(data map[string]any)
- func SplitThinkModeld(data *relaymodel.TextResponse)
- func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response, preHandler PreHandler) (*model.Usage, *relaymodel.ErrorWithStatusCode)
- func StreamSplitThink(data map[string]any, thinkSplitter *splitter.Splitter, ...)
- func StreamSplitThinkModeld(data *relaymodel.ChatCompletionsStreamResponse, ...)
- func TTSHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
- type Adaptor
- func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)
- func (a *Adaptor) DoRequest(_ *meta.Meta, _ *gin.Context, req *http.Request) (*http.Response, error)
- func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *model.Usage, err *relaymodel.ErrorWithStatusCode)
- func (a *Adaptor) GetBalance(channel *model.Channel) (float64, error)
- func (a *Adaptor) GetBaseURL() string
- func (a *Adaptor) GetChannelName() string
- func (a *Adaptor) GetModelList() []*model.ModelConfig
- func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error)
- func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error
- type GeneralErrorResponse
- type PreHandler
- type SubscriptionResponse
- type UsageResponse
Constants ¶
View Source
const ( ErrorTypeAIProxy = middleware.ErrorTypeAIPROXY ErrorTypeUpstream = "upstream_error" ErrorCodeBadResponse = "bad_response" )
View Source
const ( DataPrefix = "data:" Done = "[DONE]" DataPrefixLength = len(DataPrefix) )
View Source
const MetaEmbeddingsPatchInputToSlices = "embeddings_input_to_slices"
View Source
const MetaResponseFormat = "response_format"
Variables ¶
View Source
var ( DataPrefixBytes = conv.StringToBytes(DataPrefix) DoneBytes = conv.StringToBytes(Done) )
View Source
var ModelList = []*model.ModelConfig{ { Model: "gpt-3.5-turbo", Type: mode.ChatCompletions, Owner: model.ModelOwnerOpenAI, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(4096), model.WithModelConfigToolChoice(true), ), }, { Model: "gpt-3.5-turbo-16k", Type: mode.ChatCompletions, Owner: model.ModelOwnerOpenAI, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(16384), model.WithModelConfigToolChoice(true), ), }, { Model: "gpt-3.5-turbo-instruct", Type: mode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4", Type: mode.ChatCompletions, Owner: model.ModelOwnerOpenAI, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(8192), model.WithModelConfigToolChoice(true), ), }, { Model: "gpt-4-32k", Type: mode.ChatCompletions, Owner: model.ModelOwnerOpenAI, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(32768), model.WithModelConfigToolChoice(true), ), }, { Model: "gpt-4-turbo", Type: mode.ChatCompletions, Owner: model.ModelOwnerOpenAI, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigToolChoice(true), ), }, { Model: "gpt-4o", Type: mode.ChatCompletions, Owner: model.ModelOwnerOpenAI, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigVision(true), model.WithModelConfigToolChoice(true), ), }, { Model: "chatgpt-4o-latest", Type: mode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "gpt-4o-mini", Type: mode.ChatCompletions, Owner: model.ModelOwnerOpenAI, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), model.WithModelConfigToolChoice(true), ), }, { Model: "gpt-4-vision-preview", Type: mode.ChatCompletions, Owner: model.ModelOwnerOpenAI, }, { Model: "o1-mini", Type: mode.ChatCompletions, Owner: model.ModelOwnerOpenAI, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), ), }, { Model: "o1-preview", Type: mode.ChatCompletions, Owner: model.ModelOwnerOpenAI, Config: model.NewModelConfig( model.WithModelConfigMaxContextTokens(131072), ), }, { Model: "text-embedding-ada-002", Type: mode.Embeddings, Owner: model.ModelOwnerOpenAI, }, { Model: "text-embedding-3-small", Type: mode.Embeddings, Owner: model.ModelOwnerOpenAI, }, { Model: "text-embedding-3-large", Type: mode.Embeddings, Owner: model.ModelOwnerOpenAI, }, { Model: "text-curie-001", Type: mode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-babbage-001", Type: mode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-ada-001", Type: mode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-davinci-002", Type: mode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-davinci-003", Type: mode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "text-moderation-latest", Type: mode.Moderations, Owner: model.ModelOwnerOpenAI, }, { Model: "text-moderation-stable", Type: mode.Moderations, Owner: model.ModelOwnerOpenAI, }, { Model: "text-davinci-edit-001", Type: mode.Edits, Owner: model.ModelOwnerOpenAI, }, { Model: "davinci-002", Type: mode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "babbage-002", Type: mode.Completions, Owner: model.ModelOwnerOpenAI, }, { Model: "dall-e-2", Type: mode.ImagesGenerations, Owner: model.ModelOwnerOpenAI, }, { Model: "dall-e-3", Type: mode.ImagesGenerations, Owner: model.ModelOwnerOpenAI, }, { Model: "whisper-1", Type: mode.AudioTranscription, Owner: model.ModelOwnerOpenAI, }, { Model: "tts-1", Type: mode.AudioSpeech, Owner: model.ModelOwnerOpenAI, }, { Model: "tts-1-1106", Type: mode.AudioSpeech, Owner: model.ModelOwnerOpenAI, }, { Model: "tts-1-hd", Type: mode.AudioSpeech, Owner: model.ModelOwnerOpenAI, }, { Model: "tts-1-hd-1106", Type: mode.AudioSpeech, Owner: model.ModelOwnerOpenAI, }, }
Functions ¶
func ChatCompletionID ¶ added in v0.1.4
func ChatCompletionID() string
func ConvertImageRequest ¶
func ConvertRequest ¶
func ConvertRerankRequest ¶
func ConvertSTTRequest ¶
func ConvertTTSRequest ¶
func ConvertTextRequest ¶
func CountTokenInput ¶
func CountTokenText ¶
func DoResponse ¶
func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *model.Usage, err *relaymodel.ErrorWithStatusCode)
func ErrorHanlder ¶
func ErrorHanlder(resp *http.Response) *model.ErrorWithStatusCode
func ErrorWrapper ¶
func ErrorWrapper(err error, code any, statusCode int) *model.ErrorWithStatusCode
func ErrorWrapperWithMessage ¶
func ErrorWrapperWithMessage(message string, code any, statusCode int) *model.ErrorWithStatusCode
func GetFullRequestURL ¶
func GetScannerBuffer ¶ added in v0.1.4
func GetScannerBuffer() *[]byte
func GetUsageOrChatChoicesResponseFromNode ¶ added in v0.1.4
func GetUsageOrChatChoicesResponseFromNode(node *ast.Node) (*relaymodel.Usage, []*relaymodel.ChatCompletionsStreamResponseChoice, error)
func GetUsageOrChoicesResponseFromNode ¶ added in v0.1.4
func GetUsageOrChoicesResponseFromNode(node *ast.Node) (*relaymodel.Usage, []*relaymodel.TextResponseChoice, error)
func Handler ¶
func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response, preHandler PreHandler) (*model.Usage, *relaymodel.ErrorWithStatusCode)
func ImageHandler ¶
func ImageHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
func ModerationsHandler ¶
func ModerationsHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
func PutScannerBuffer ¶ added in v0.1.4
func PutScannerBuffer(buf *[]byte)
func RerankHandler ¶
func RerankHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
func ResponseText2Usage ¶
func STTHandler ¶
func STTHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
func SplitThink ¶
func SplitThinkModeld ¶
func SplitThinkModeld(data *relaymodel.TextResponse)
func StreamHandler ¶
func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response, preHandler PreHandler) (*model.Usage, *relaymodel.ErrorWithStatusCode)
func StreamSplitThink ¶
func StreamSplitThink(data map[string]any, thinkSplitter *splitter.Splitter, renderCallback func(data map[string]any))
renderCallback maybe reuse data, so don't modify data
func StreamSplitThinkModeld ¶
func StreamSplitThinkModeld(data *relaymodel.ChatCompletionsStreamResponse, thinkSplitter *splitter.Splitter, renderCallback func(data *relaymodel.ChatCompletionsStreamResponse))
func TTSHandler ¶
func TTSHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)
Types ¶
type Adaptor ¶
type Adaptor struct{}
func (*Adaptor) ConvertRequest ¶
func (*Adaptor) DoResponse ¶
func (*Adaptor) GetBaseURL ¶
func (*Adaptor) GetChannelName ¶
func (*Adaptor) GetModelList ¶
func (a *Adaptor) GetModelList() []*model.ModelConfig
type GeneralErrorResponse ¶
type GeneralErrorResponse struct { Error model.Error `json:"error"` Message string `json:"message"` Msg string `json:"msg"` Err string `json:"err"` ErrorMsg string `json:"error_msg"` Header struct { Message string `json:"message"` } `json:"header"` Response struct { Error struct { Message string `json:"message"` } `json:"error"` } `json:"response"` }
func (GeneralErrorResponse) ToMessage ¶
func (e GeneralErrorResponse) ToMessage() string
type SubscriptionResponse ¶
type SubscriptionResponse struct { Object string `json:"object"` HasPaymentMethod bool `json:"has_payment_method"` SoftLimitUSD float64 `json:"soft_limit_usd"` HardLimitUSD float64 `json:"hard_limit_usd"` SystemHardLimitUSD float64 `json:"system_hard_limit_usd"` AccessUntil int64 `json:"access_until"` }
type UsageResponse ¶
Click to show internal directories.
Click to hide internal directories.