openai

package
v0.1.7 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 11, 2025 License: MIT Imports: 35 Imported by: 0

Documentation

Index

Constants

View Source
const (
	ErrorTypeAIProxy     = middleware.ErrorTypeAIPROXY
	ErrorTypeUpstream    = "upstream_error"
	ErrorCodeBadResponse = "bad_response"
)
View Source
const (
	DataPrefix       = "data:"
	Done             = "[DONE]"
	DataPrefixLength = len(DataPrefix)
)
View Source
const MetaEmbeddingsPatchInputToSlices = "embeddings_input_to_slices"
View Source
const MetaResponseFormat = "response_format"

Variables

View Source
var (
	DataPrefixBytes = conv.StringToBytes(DataPrefix)
	DoneBytes       = conv.StringToBytes(Done)
)
View Source
var ModelList = []*model.ModelConfig{
	{
		Model: "gpt-3.5-turbo",
		Type:  mode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(4096),
			model.WithModelConfigToolChoice(true),
		),
	},
	{
		Model: "gpt-3.5-turbo-16k",
		Type:  mode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(16384),
			model.WithModelConfigToolChoice(true),
		),
	},
	{
		Model: "gpt-3.5-turbo-instruct",
		Type:  mode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "gpt-4",
		Type:  mode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(8192),
			model.WithModelConfigToolChoice(true),
		),
	},
	{
		Model: "gpt-4-32k",
		Type:  mode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(32768),
			model.WithModelConfigToolChoice(true),
		),
	},
	{
		Model: "gpt-4-turbo",
		Type:  mode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(131072),
			model.WithModelConfigToolChoice(true),
		),
	},
	{
		Model: "gpt-4o",
		Type:  mode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(131072),
			model.WithModelConfigVision(true),
			model.WithModelConfigToolChoice(true),
		),
	},
	{
		Model: "chatgpt-4o-latest",
		Type:  mode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "gpt-4o-mini",
		Type:  mode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(131072),
			model.WithModelConfigToolChoice(true),
		),
	},
	{
		Model: "gpt-4-vision-preview",
		Type:  mode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "o1-mini",
		Type:  mode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(131072),
		),
	},
	{
		Model: "o1-preview",
		Type:  mode.ChatCompletions,
		Owner: model.ModelOwnerOpenAI,
		Config: model.NewModelConfig(
			model.WithModelConfigMaxContextTokens(131072),
		),
	},

	{
		Model: "text-embedding-ada-002",
		Type:  mode.Embeddings,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-embedding-3-small",
		Type:  mode.Embeddings,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-embedding-3-large",
		Type:  mode.Embeddings,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-curie-001",
		Type:  mode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-babbage-001",
		Type:  mode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-ada-001",
		Type:  mode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-davinci-002",
		Type:  mode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-davinci-003",
		Type:  mode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-moderation-latest",
		Type:  mode.Moderations,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-moderation-stable",
		Type:  mode.Moderations,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "text-davinci-edit-001",
		Type:  mode.Edits,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "davinci-002",
		Type:  mode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "babbage-002",
		Type:  mode.Completions,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "dall-e-2",
		Type:  mode.ImagesGenerations,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "dall-e-3",
		Type:  mode.ImagesGenerations,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "whisper-1",
		Type:  mode.AudioTranscription,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "tts-1",
		Type:  mode.AudioSpeech,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "tts-1-1106",
		Type:  mode.AudioSpeech,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "tts-1-hd",
		Type:  mode.AudioSpeech,
		Owner: model.ModelOwnerOpenAI,
	},
	{
		Model: "tts-1-hd-1106",
		Type:  mode.AudioSpeech,
		Owner: model.ModelOwnerOpenAI,
	},
}

Functions

func CallID added in v0.1.4

func CallID() string

func ChatCompletionID added in v0.1.4

func ChatCompletionID() string

func ConvertEmbeddingsRequest

func ConvertEmbeddingsRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)

func ConvertImageRequest

func ConvertImageRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)

func ConvertRequest

func ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)

func ConvertRerankRequest

func ConvertRerankRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)

func ConvertSTTRequest

func ConvertSTTRequest(meta *meta.Meta, request *http.Request) (string, http.Header, io.Reader, error)

func ConvertTTSRequest

func ConvertTTSRequest(meta *meta.Meta, req *http.Request, defaultVoice string) (string, http.Header, io.Reader, error)

func ConvertTextRequest

func ConvertTextRequest(meta *meta.Meta, req *http.Request, doNotPatchStreamOptionsIncludeUsage bool) (string, http.Header, io.Reader, error)

func CountTokenInput

func CountTokenInput(input any, model string) int64

func CountTokenMessages

func CountTokenMessages(messages []*model.Message, model string) int64

func CountTokenText

func CountTokenText(text string, model string) int64

func DoResponse

func DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *model.Usage, err *relaymodel.ErrorWithStatusCode)

func ErrorHanlder

func ErrorHanlder(resp *http.Response) *model.ErrorWithStatusCode

func ErrorWrapper

func ErrorWrapper(err error, code any, statusCode int) *model.ErrorWithStatusCode

func ErrorWrapperWithMessage

func ErrorWrapperWithMessage(message string, code any, statusCode int) *model.ErrorWithStatusCode

func GetBalance

func GetBalance(channel *model.Channel) (float64, error)

func GetFullRequestURL

func GetFullRequestURL(baseURL string, requestURL string) string

func GetScannerBuffer added in v0.1.4

func GetScannerBuffer() *[]byte

func GetUsageOrChatChoicesResponseFromNode added in v0.1.4

func GetUsageOrChatChoicesResponseFromNode(node *ast.Node) (*relaymodel.Usage, []*relaymodel.ChatCompletionsStreamResponseChoice, error)

func GetUsageOrChoicesResponseFromNode added in v0.1.4

func GetUsageOrChoicesResponseFromNode(node *ast.Node) (*relaymodel.Usage, []*relaymodel.TextResponseChoice, error)

func Handler

func Handler(meta *meta.Meta, c *gin.Context, resp *http.Response, preHandler PreHandler) (*model.Usage, *relaymodel.ErrorWithStatusCode)

func ImageHandler

func ImageHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)

func ModerationsHandler

func ModerationsHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)

func PutScannerBuffer added in v0.1.4

func PutScannerBuffer(buf *[]byte)

func RerankHandler

func RerankHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)

func ResponseText2Usage

func ResponseText2Usage(responseText string, modeName string, promptTokens int64) *model.Usage

func STTHandler

func STTHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)

func SplitThink

func SplitThink(data map[string]any)

func SplitThinkModeld

func SplitThinkModeld(data *relaymodel.TextResponse)

func StreamHandler

func StreamHandler(meta *meta.Meta, c *gin.Context, resp *http.Response, preHandler PreHandler) (*model.Usage, *relaymodel.ErrorWithStatusCode)

func StreamSplitThink

func StreamSplitThink(data map[string]any, thinkSplitter *splitter.Splitter, renderCallback func(data map[string]any))

renderCallback maybe reuse data, so don't modify data

func StreamSplitThinkModeld

func StreamSplitThinkModeld(data *relaymodel.ChatCompletionsStreamResponse, thinkSplitter *splitter.Splitter, renderCallback func(data *relaymodel.ChatCompletionsStreamResponse))

func TTSHandler

func TTSHandler(meta *meta.Meta, c *gin.Context, resp *http.Response) (*model.Usage, *relaymodel.ErrorWithStatusCode)

Types

type Adaptor

type Adaptor struct{}

func (*Adaptor) ConvertRequest

func (a *Adaptor) ConvertRequest(meta *meta.Meta, req *http.Request) (string, http.Header, io.Reader, error)

func (*Adaptor) DoRequest

func (a *Adaptor) DoRequest(_ *meta.Meta, _ *gin.Context, req *http.Request) (*http.Response, error)

func (*Adaptor) DoResponse

func (a *Adaptor) DoResponse(meta *meta.Meta, c *gin.Context, resp *http.Response) (usage *model.Usage, err *relaymodel.ErrorWithStatusCode)

func (*Adaptor) GetBalance

func (a *Adaptor) GetBalance(channel *model.Channel) (float64, error)

func (*Adaptor) GetBaseURL

func (a *Adaptor) GetBaseURL() string

func (*Adaptor) GetChannelName

func (a *Adaptor) GetChannelName() string

func (*Adaptor) GetModelList

func (a *Adaptor) GetModelList() []*model.ModelConfig

func (*Adaptor) GetRequestURL

func (a *Adaptor) GetRequestURL(meta *meta.Meta) (string, error)

func (*Adaptor) SetupRequestHeader

func (a *Adaptor) SetupRequestHeader(meta *meta.Meta, _ *gin.Context, req *http.Request) error

type GeneralErrorResponse

type GeneralErrorResponse struct {
	Error    model.Error `json:"error"`
	Message  string      `json:"message"`
	Msg      string      `json:"msg"`
	Err      string      `json:"err"`
	ErrorMsg string      `json:"error_msg"`
	Header   struct {
		Message string `json:"message"`
	} `json:"header"`
	Response struct {
		Error struct {
			Message string `json:"message"`
		} `json:"error"`
	} `json:"response"`
}

func (GeneralErrorResponse) ToMessage

func (e GeneralErrorResponse) ToMessage() string

type PreHandler added in v0.1.4

type PreHandler func(meta *meta.Meta, node *ast.Node) error

type SubscriptionResponse

type SubscriptionResponse struct {
	Object             string  `json:"object"`
	HasPaymentMethod   bool    `json:"has_payment_method"`
	SoftLimitUSD       float64 `json:"soft_limit_usd"`
	HardLimitUSD       float64 `json:"hard_limit_usd"`
	SystemHardLimitUSD float64 `json:"system_hard_limit_usd"`
	AccessUntil        int64   `json:"access_until"`
}

type UsageResponse

type UsageResponse struct {
	Object string `json:"object"`
	// DailyCosts []OpenAIUsageDailyCost `json:"daily_costs"`
	TotalUsage float64 `json:"total_usage"` // unit: 0.01 dollar
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL