diff --git a/internal/api/chat/create_conversation_message_stream_v2.go b/internal/api/chat/create_conversation_message_stream_v2.go index d1dfb3e6..3537ec79 100644 --- a/internal/api/chat/create_conversation_message_stream_v2.go +++ b/internal/api/chat/create_conversation_message_stream_v2.go @@ -321,7 +321,7 @@ func (s *ChatServerV2) CreateConversationMessageStream( } } - openaiChatHistory, inappChatHistory, err := s.aiClientV2.ChatCompletionStreamV2(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistoryCompletion, llmProvider) + openaiChatHistory, inappChatHistory, err := s.aiClientV2.ChatCompletionStreamV2(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistoryCompletion, llmProvider, customModel) if err != nil { return s.sendStreamError(stream, err) } @@ -347,7 +347,7 @@ func (s *ChatServerV2) CreateConversationMessageStream( for i, bsonMsg := range conversation.InappChatHistory { protoMessages[i] = mapper.BSONToChatMessageV2(bsonMsg) } - title, err := s.aiClientV2.GetConversationTitleV2(ctx, protoMessages, llmProvider, modelSlug) + title, err := s.aiClientV2.GetConversationTitleV2(ctx, protoMessages, llmProvider, modelSlug, customModel) if err != nil { s.logger.Error("Failed to get conversation title", "error", err, "conversationID", conversation.ID.Hex()) return diff --git a/internal/api/mapper/user.go b/internal/api/mapper/user.go index ea9a4f7b..09d31aa7 100644 --- a/internal/api/mapper/user.go +++ b/internal/api/mapper/user.go @@ -19,15 +19,18 @@ func MapProtoSettingsToModel(settings *userv1.Settings) *models.Settings { } customModels[i] = models.CustomModel{ - Id: id, - Slug: m.Slug, - Name: m.Name, - BaseUrl: m.BaseUrl, - APIKey: m.ApiKey, - ContextWindow: m.ContextWindow, - MaxOutput: m.MaxOutput, - InputPrice: m.InputPrice, - OutputPrice: m.OutputPrice, + Id: id, + Slug: m.Slug, + Name: m.Name, + BaseUrl: m.BaseUrl, + APIKey: m.ApiKey, + ContextWindow: m.ContextWindow, + MaxOutput: m.MaxOutput, + InputPrice: m.InputPrice, + OutputPrice: m.OutputPrice, + Temperature: m.Temperature, + ParallelToolCalls: m.ParallelToolCalls, + Store: m.Store, } } @@ -47,15 +50,18 @@ func MapModelSettingsToProto(settings *models.Settings) *userv1.Settings { customModels := make([]*userv1.CustomModel, len(settings.CustomModels)) for i, m := range settings.CustomModels { customModels[i] = &userv1.CustomModel{ - Id: m.Id.Hex(), - Slug: m.Slug, - Name: m.Name, - BaseUrl: m.BaseUrl, - ApiKey: m.APIKey, - ContextWindow: m.ContextWindow, - MaxOutput: m.MaxOutput, - InputPrice: m.InputPrice, - OutputPrice: m.OutputPrice, + Id: m.Id.Hex(), + Slug: m.Slug, + Name: m.Name, + BaseUrl: m.BaseUrl, + ApiKey: m.APIKey, + ContextWindow: m.ContextWindow, + MaxOutput: m.MaxOutput, + InputPrice: m.InputPrice, + OutputPrice: m.OutputPrice, + Temperature: m.Temperature, + ParallelToolCalls: m.ParallelToolCalls, + Store: m.Store, } } diff --git a/internal/models/user.go b/internal/models/user.go index 413be929..a0350301 100644 --- a/internal/models/user.go +++ b/internal/models/user.go @@ -3,15 +3,18 @@ package models import "go.mongodb.org/mongo-driver/v2/bson" type CustomModel struct { - Id bson.ObjectID `bson:"_id"` - Slug string `bson:"slug"` - Name string `bson:"name"` - BaseUrl string `bson:"base_url"` - APIKey string `bson:"api_key"` - ContextWindow int32 `bson:"context_window"` - MaxOutput int32 `bson:"max_output"` - InputPrice int32 `bson:"input_price"` - OutputPrice int32 `bson:"output_price"` + Id bson.ObjectID `bson:"_id"` + Slug string `bson:"slug"` + Name string `bson:"name"` + BaseUrl string `bson:"base_url"` + APIKey string `bson:"api_key"` + ContextWindow int32 `bson:"context_window"` + MaxOutput int32 `bson:"max_output"` + InputPrice int32 `bson:"input_price"` + OutputPrice int32 `bson:"output_price"` + Temperature float32 `bson:"temperature"` + ParallelToolCalls bool `bson:"parallel_tool_calls"` + Store bool `bson:"store"` } type Settings struct { diff --git a/internal/services/toolkit/client/completion_v2.go b/internal/services/toolkit/client/completion_v2.go index 47caaad4..7266d669 100644 --- a/internal/services/toolkit/client/completion_v2.go +++ b/internal/services/toolkit/client/completion_v2.go @@ -25,8 +25,8 @@ import ( // 1. The full chat history sent to the language model (including any tool call results). // 2. The incremental chat history visible to the user (including tool call results and assistant responses). // 3. An error, if any occurred during the process. -func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig) (OpenAIChatHistory, AppChatHistory, error) { - openaiChatHistory, inappChatHistory, err := a.ChatCompletionStreamV2(ctx, nil, "", modelSlug, messages, llmProvider) +func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig, customModel *models.CustomModel) (OpenAIChatHistory, AppChatHistory, error) { + openaiChatHistory, inappChatHistory, err := a.ChatCompletionStreamV2(ctx, nil, "", modelSlug, messages, llmProvider, customModel) if err != nil { return nil, nil, err } @@ -54,7 +54,7 @@ func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, mes // - If tool calls are required, it handles them and appends the results to the chat history, then continues the loop. // - If no tool calls are needed, it appends the assistant's response and exits the loop. // - Finally, it returns the updated chat histories and any error encountered. -func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream chatv2.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig) (OpenAIChatHistory, AppChatHistory, error) { +func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream chatv2.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig, customModel *models.CustomModel) (OpenAIChatHistory, AppChatHistory, error) { openaiChatHistory := messages inappChatHistory := AppChatHistory{} @@ -66,7 +66,7 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream }() oaiClient := a.GetOpenAIClient(llmProvider) - params := getDefaultParamsV2(modelSlug, a.toolCallHandler.Registry, llmProvider.IsCustomModel) + params := getDefaultParamsV2(modelSlug, a.toolCallHandler.Registry, customModel) for { params.Messages = openaiChatHistory diff --git a/internal/services/toolkit/client/get_citation_keys.go b/internal/services/toolkit/client/get_citation_keys.go index 1995d590..2344d49d 100644 --- a/internal/services/toolkit/client/get_citation_keys.go +++ b/internal/services/toolkit/client/get_citation_keys.go @@ -244,7 +244,7 @@ func (a *AIClientV2) GetCitationKeys(ctx context.Context, sentence string, userI _, resp, err := a.ChatCompletionV2(ctx, "gpt-5.2", OpenAIChatHistory{ openai.SystemMessage("You are a helpful assistant that suggests relevant citation keys."), openai.UserMessage(message), - }, llmProvider) + }, llmProvider, nil) if err != nil { return nil, err diff --git a/internal/services/toolkit/client/get_conversation_title_v2.go b/internal/services/toolkit/client/get_conversation_title_v2.go index a58617d7..27840c7c 100644 --- a/internal/services/toolkit/client/get_conversation_title_v2.go +++ b/internal/services/toolkit/client/get_conversation_title_v2.go @@ -13,7 +13,7 @@ import ( "github.com/samber/lo" ) -func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistory []*chatv2.Message, llmProvider *models.LLMProviderConfig, modelSlug string) (string, error) { +func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistory []*chatv2.Message, llmProvider *models.LLMProviderConfig, modelSlug string, customModel *models.CustomModel) (string, error) { messages := lo.Map(inappChatHistory, func(message *chatv2.Message, _ int) string { if _, ok := message.Payload.MessageType.(*chatv2.MessagePayload_Assistant); ok { return fmt.Sprintf("Assistant: %s", message.Payload.GetAssistant().GetContent()) @@ -38,7 +38,7 @@ func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistor _, resp, err := a.ChatCompletionV2(ctx, modelToUse, OpenAIChatHistory{ openai.SystemMessage("You are a helpful assistant that generates a title for a conversation."), openai.UserMessage(message), - }, llmProvider) + }, llmProvider, customModel) if err != nil { return "", err } diff --git a/internal/services/toolkit/client/utils_v2.go b/internal/services/toolkit/client/utils_v2.go index 7890c34c..884b91eb 100644 --- a/internal/services/toolkit/client/utils_v2.go +++ b/internal/services/toolkit/client/utils_v2.go @@ -10,6 +10,7 @@ import ( "paperdebugger/internal/libs/cfg" "paperdebugger/internal/libs/db" "paperdebugger/internal/libs/logger" + "paperdebugger/internal/models" "paperdebugger/internal/services" "paperdebugger/internal/services/toolkit/registry" filetools "paperdebugger/internal/services/toolkit/tools/files" @@ -53,7 +54,7 @@ func appendAssistantTextResponseV2(openaiChatHistory *OpenAIChatHistory, inappCh }) } -func getDefaultParamsV2(modelSlug string, toolRegistry *registry.ToolRegistryV2, isCustomModel bool) openaiv3.ChatCompletionNewParams { +func getDefaultParamsV2(modelSlug string, toolRegistry *registry.ToolRegistryV2, customModel *models.CustomModel) openaiv3.ChatCompletionNewParams { var reasoningModels = []string{ "gpt-5", "gpt-5-mini", @@ -67,15 +68,22 @@ func getDefaultParamsV2(modelSlug string, toolRegistry *registry.ToolRegistryV2, "codex-mini-latest", } - // Other model providers generally do not support the Store param - if isCustomModel { - return openaiv3.ChatCompletionNewParams{ - Model: modelSlug, - Temperature: openaiv3.Float(0.7), - MaxCompletionTokens: openaiv3.Int(4000), + if customModel != nil { + params := openaiv3.ChatCompletionNewParams{ + Model: customModel.Slug, + Temperature: openaiv3.Float(float64(customModel.Temperature)), + MaxCompletionTokens: openaiv3.Int(int64(customModel.MaxOutput)), Tools: toolRegistry.GetTools(), - ParallelToolCalls: openaiv3.Bool(true), + ParallelToolCalls: openaiv3.Bool(customModel.ParallelToolCalls), } + + // Store param should only be included if it is true + // Some providers like Gemini might not support the param at all even if false + if customModel.Store { + params.Store = openaiv3.Bool(customModel.Store) + } + + return params } for _, model := range reasoningModels { diff --git a/pkg/gen/api/user/v1/user.pb.go b/pkg/gen/api/user/v1/user.pb.go index 1f727599..ef08a101 100644 --- a/pkg/gen/api/user/v1/user.pb.go +++ b/pkg/gen/api/user/v1/user.pb.go @@ -616,18 +616,21 @@ func (*DeletePromptResponse) Descriptor() ([]byte, []int) { } type CustomModel struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Slug string `protobuf:"bytes,2,opt,name=slug,proto3" json:"slug,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - BaseUrl string `protobuf:"bytes,4,opt,name=base_url,json=baseUrl,proto3" json:"base_url,omitempty"` - ApiKey string `protobuf:"bytes,5,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"` - ContextWindow int32 `protobuf:"varint,6,opt,name=context_window,json=contextWindow,proto3" json:"context_window,omitempty"` - MaxOutput int32 `protobuf:"varint,7,opt,name=max_output,json=maxOutput,proto3" json:"max_output,omitempty"` - InputPrice int32 `protobuf:"varint,8,opt,name=input_price,json=inputPrice,proto3" json:"input_price,omitempty"` - OutputPrice int32 `protobuf:"varint,9,opt,name=output_price,json=outputPrice,proto3" json:"output_price,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Slug string `protobuf:"bytes,2,opt,name=slug,proto3" json:"slug,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + BaseUrl string `protobuf:"bytes,4,opt,name=base_url,json=baseUrl,proto3" json:"base_url,omitempty"` + ApiKey string `protobuf:"bytes,5,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"` + ContextWindow int32 `protobuf:"varint,6,opt,name=context_window,json=contextWindow,proto3" json:"context_window,omitempty"` + MaxOutput int32 `protobuf:"varint,7,opt,name=max_output,json=maxOutput,proto3" json:"max_output,omitempty"` + InputPrice int32 `protobuf:"varint,8,opt,name=input_price,json=inputPrice,proto3" json:"input_price,omitempty"` + OutputPrice int32 `protobuf:"varint,9,opt,name=output_price,json=outputPrice,proto3" json:"output_price,omitempty"` + Temperature float32 `protobuf:"fixed32,10,opt,name=temperature,proto3" json:"temperature,omitempty"` + ParallelToolCalls bool `protobuf:"varint,11,opt,name=parallel_tool_calls,json=parallelToolCalls,proto3" json:"parallel_tool_calls,omitempty"` + Store bool `protobuf:"varint,12,opt,name=store,proto3" json:"store,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CustomModel) Reset() { @@ -723,6 +726,27 @@ func (x *CustomModel) GetOutputPrice() int32 { return 0 } +func (x *CustomModel) GetTemperature() float32 { + if x != nil { + return x.Temperature + } + return 0 +} + +func (x *CustomModel) GetParallelToolCalls() bool { + if x != nil { + return x.ParallelToolCalls + } + return false +} + +func (x *CustomModel) GetStore() bool { + if x != nil { + return x.Store + } + return false +} + type Settings struct { state protoimpl.MessageState `protogen:"open.v1"` ShowShortcutsAfterSelection bool `protobuf:"varint,1,opt,name=show_shortcuts_after_selection,json=showShortcutsAfterSelection,proto3" json:"show_shortcuts_after_selection,omitempty"` @@ -1269,7 +1293,7 @@ const file_user_v1_user_proto_rawDesc = "" + "\x06prompt\x18\x01 \x01(\v2\x0f.user.v1.PromptR\x06prompt\"2\n" + "\x13DeletePromptRequest\x12\x1b\n" + "\tprompt_id\x18\x01 \x01(\tR\bpromptId\"\x16\n" + - "\x14DeletePromptResponse\"\x83\x02\n" + + "\x14DeletePromptResponse\"\xeb\x02\n" + "\vCustomModel\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + "\x04slug\x18\x02 \x01(\tR\x04slug\x12\x12\n" + @@ -1281,7 +1305,11 @@ const file_user_v1_user_proto_rawDesc = "" + "max_output\x18\a \x01(\x05R\tmaxOutput\x12\x1f\n" + "\vinput_price\x18\b \x01(\x05R\n" + "inputPrice\x12!\n" + - "\foutput_price\x18\t \x01(\x05R\voutputPrice\"\x8f\x03\n" + + "\foutput_price\x18\t \x01(\x05R\voutputPrice\x12 \n" + + "\vtemperature\x18\n" + + " \x01(\x02R\vtemperature\x12.\n" + + "\x13parallel_tool_calls\x18\v \x01(\bR\x11parallelToolCalls\x12\x14\n" + + "\x05store\x18\f \x01(\bR\x05store\"\x8f\x03\n" + "\bSettings\x12C\n" + "\x1eshow_shortcuts_after_selection\x18\x01 \x01(\bR\x1bshowShortcutsAfterSelection\x12F\n" + " full_width_paper_debugger_button\x18\x02 \x01(\bR\x1cfullWidthPaperDebuggerButton\x12<\n" + diff --git a/proto/user/v1/user.proto b/proto/user/v1/user.proto index 9f45503a..fa5606fa 100644 --- a/proto/user/v1/user.proto +++ b/proto/user/v1/user.proto @@ -124,6 +124,9 @@ message CustomModel { int32 max_output = 7; int32 input_price = 8; int32 output_price = 9; + float temperature = 10; + bool parallel_tool_calls = 11; + bool store = 12; } message Settings { diff --git a/webapp/_webapp/src/pkg/gen/apiclient/user/v1/user_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/user/v1/user_pb.ts index 5ff1e27a..38ffdd34 100644 --- a/webapp/_webapp/src/pkg/gen/apiclient/user/v1/user_pb.ts +++ b/webapp/_webapp/src/pkg/gen/apiclient/user/v1/user_pb.ts @@ -13,7 +13,7 @@ import type { Message } from "@bufbuild/protobuf"; * Describes the file user/v1/user.proto. */ export const file_user_v1_user: GenFile = /*@__PURE__*/ - fileDesc("ChJ1c2VyL3YxL3VzZXIucHJvdG8SB3VzZXIudjEiQAoEVXNlchIKCgJpZBgBIAEoCRINCgVlbWFpbBgCIAEoCRIMCgRuYW1lGAMgASgJEg8KB3BpY3R1cmUYBCABKAkiEAoOR2V0VXNlclJlcXVlc3QiLgoPR2V0VXNlclJlc3BvbnNlEhsKBHVzZXIYASABKAsyDS51c2VyLnYxLlVzZXIirAEKBlByb21wdBIKCgJpZBgBIAEoCRIuCgpjcmVhdGVkX2F0GAIgASgLMhouZ29vZ2xlLnByb3RvYnVmLlRpbWVzdGFtcBIuCgp1cGRhdGVkX2F0GAMgASgLMhouZ29vZ2xlLnByb3RvYnVmLlRpbWVzdGFtcBINCgV0aXRsZRgEIAEoCRIPCgdjb250ZW50GAUgASgJEhYKDmlzX3VzZXJfcHJvbXB0GAYgASgIIhQKEkxpc3RQcm9tcHRzUmVxdWVzdCI3ChNMaXN0UHJvbXB0c1Jlc3BvbnNlEiAKB3Byb21wdHMYASADKAsyDy51c2VyLnYxLlByb21wdCI1ChNDcmVhdGVQcm9tcHRSZXF1ZXN0Eg0KBXRpdGxlGAEgASgJEg8KB2NvbnRlbnQYAiABKAkiNwoUQ3JlYXRlUHJvbXB0UmVzcG9uc2USHwoGcHJvbXB0GAEgASgLMg8udXNlci52MS5Qcm9tcHQiSAoTVXBkYXRlUHJvbXB0UmVxdWVzdBIRCglwcm9tcHRfaWQYASABKAkSDQoFdGl0bGUYAiABKAkSDwoHY29udGVudBgDIAEoCSI3ChRVcGRhdGVQcm9tcHRSZXNwb25zZRIfCgZwcm9tcHQYASABKAsyDy51c2VyLnYxLlByb21wdCIoChNEZWxldGVQcm9tcHRSZXF1ZXN0EhEKCXByb21wdF9pZBgBIAEoCSIWChREZWxldGVQcm9tcHRSZXNwb25zZSKvAQoLQ3VzdG9tTW9kZWwSCgoCaWQYASABKAkSDAoEc2x1ZxgCIAEoCRIMCgRuYW1lGAMgASgJEhAKCGJhc2VfdXJsGAQgASgJEg8KB2FwaV9rZXkYBSABKAkSFgoOY29udGV4dF93aW5kb3cYBiABKAUSEgoKbWF4X291dHB1dBgHIAEoBRITCgtpbnB1dF9wcmljZRgIIAEoBRIUCgxvdXRwdXRfcHJpY2UYCSABKAUi+wEKCFNldHRpbmdzEiYKHnNob3dfc2hvcnRjdXRzX2FmdGVyX3NlbGVjdGlvbhgBIAEoCBIoCiBmdWxsX3dpZHRoX3BhcGVyX2RlYnVnZ2VyX2J1dHRvbhgCIAEoCBIiChplbmFibGVfY2l0YXRpb25fc3VnZ2VzdGlvbhgDIAEoCBIZChFmdWxsX2RvY3VtZW50X3JhZxgEIAEoCBIZChFzaG93ZWRfb25ib2FyZGluZxgFIAEoCBIWCg5vcGVuYWlfYXBpX2tleRgGIAEoCRIrCg1jdXN0b21fbW9kZWxzGAcgAygLMhQudXNlci52MS5DdXN0b21Nb2RlbCIUChJHZXRTZXR0aW5nc1JlcXVlc3QiOgoTR2V0U2V0dGluZ3NSZXNwb25zZRIjCghzZXR0aW5ncxgBIAEoCzIRLnVzZXIudjEuU2V0dGluZ3MiPAoVVXBkYXRlU2V0dGluZ3NSZXF1ZXN0EiMKCHNldHRpbmdzGAEgASgLMhEudXNlci52MS5TZXR0aW5ncyI9ChZVcGRhdGVTZXR0aW5nc1Jlc3BvbnNlEiMKCHNldHRpbmdzGAEgASgLMhEudXNlci52MS5TZXR0aW5ncyIWChRSZXNldFNldHRpbmdzUmVxdWVzdCI8ChVSZXNldFNldHRpbmdzUmVzcG9uc2USIwoIc2V0dGluZ3MYASABKAsyES51c2VyLnYxLlNldHRpbmdzIhwKGkdldFVzZXJJbnN0cnVjdGlvbnNSZXF1ZXN0IjMKG0dldFVzZXJJbnN0cnVjdGlvbnNSZXNwb25zZRIUCgxpbnN0cnVjdGlvbnMYASABKAkiNQodVXBzZXJ0VXNlckluc3RydWN0aW9uc1JlcXVlc3QSFAoMaW5zdHJ1Y3Rpb25zGAEgASgJIjYKHlVwc2VydFVzZXJJbnN0cnVjdGlvbnNSZXNwb25zZRIUCgxpbnN0cnVjdGlvbnMYASABKAkygwoKC1VzZXJTZXJ2aWNlEl0KB0dldFVzZXISFy51c2VyLnYxLkdldFVzZXJSZXF1ZXN0GhgudXNlci52MS5HZXRVc2VyUmVzcG9uc2UiH4LT5JMCGRIXL19wZC9hcGkvdjEvdXNlcnMvQHNlbGYScQoLTGlzdFByb21wdHMSGy51c2VyLnYxLkxpc3RQcm9tcHRzUmVxdWVzdBocLnVzZXIudjEuTGlzdFByb21wdHNSZXNwb25zZSIngtPkkwIhEh8vX3BkL2FwaS92MS91c2Vycy9Ac2VsZi9wcm9tcHRzEncKDENyZWF0ZVByb21wdBIcLnVzZXIudjEuQ3JlYXRlUHJvbXB0UmVxdWVzdBodLnVzZXIudjEuQ3JlYXRlUHJvbXB0UmVzcG9uc2UiKoLT5JMCJDoBKiIfL19wZC9hcGkvdjEvdXNlcnMvQHNlbGYvcHJvbXB0cxKDAQoMVXBkYXRlUHJvbXB0EhwudXNlci52MS5VcGRhdGVQcm9tcHRSZXF1ZXN0Gh0udXNlci52MS5VcGRhdGVQcm9tcHRSZXNwb25zZSI2gtPkkwIwOgEqGisvX3BkL2FwaS92MS91c2Vycy9Ac2VsZi9wcm9tcHRzL3twcm9tcHRfaWR9Eo4BChNHZXRVc2VySW5zdHJ1Y3Rpb25zEiMudXNlci52MS5HZXRVc2VySW5zdHJ1Y3Rpb25zUmVxdWVzdBokLnVzZXIudjEuR2V0VXNlckluc3RydWN0aW9uc1Jlc3BvbnNlIiyC0+STAiYSJC9fcGQvYXBpL3YxL3VzZXJzL0BzZWxmL2luc3RydWN0aW9ucxKaAQoWVXBzZXJ0VXNlckluc3RydWN0aW9ucxImLnVzZXIudjEuVXBzZXJ0VXNlckluc3RydWN0aW9uc1JlcXVlc3QaJy51c2VyLnYxLlVwc2VydFVzZXJJbnN0cnVjdGlvbnNSZXNwb25zZSIvgtPkkwIpOgEqIiQvX3BkL2FwaS92MS91c2Vycy9Ac2VsZi9pbnN0cnVjdGlvbnMSgAEKDERlbGV0ZVByb21wdBIcLnVzZXIudjEuRGVsZXRlUHJvbXB0UmVxdWVzdBodLnVzZXIudjEuRGVsZXRlUHJvbXB0UmVzcG9uc2UiM4LT5JMCLSorL19wZC9hcGkvdjEvdXNlcnMvQHNlbGYvcHJvbXB0cy97cHJvbXB0X2lkfRJyCgtHZXRTZXR0aW5ncxIbLnVzZXIudjEuR2V0U2V0dGluZ3NSZXF1ZXN0GhwudXNlci52MS5HZXRTZXR0aW5nc1Jlc3BvbnNlIiiC0+STAiISIC9fcGQvYXBpL3YxL3VzZXJzL0BzZWxmL3NldHRpbmdzEn4KDlVwZGF0ZVNldHRpbmdzEh4udXNlci52MS5VcGRhdGVTZXR0aW5nc1JlcXVlc3QaHy51c2VyLnYxLlVwZGF0ZVNldHRpbmdzUmVzcG9uc2UiK4LT5JMCJToBKhogL19wZC9hcGkvdjEvdXNlcnMvQHNlbGYvc2V0dGluZ3MSfgoNUmVzZXRTZXR0aW5ncxIdLnVzZXIudjEuUmVzZXRTZXR0aW5nc1JlcXVlc3QaHi51c2VyLnYxLlJlc2V0U2V0dGluZ3NSZXNwb25zZSIugtPkkwIoIiYvX3BkL2FwaS92MS91c2Vycy9Ac2VsZi9zZXR0aW5ncy9yZXNldEJ/Cgtjb20udXNlci52MUIJVXNlclByb3RvUAFaKHBhcGVyZGVidWdnZXIvcGtnL2dlbi9hcGkvdXNlci92MTt1c2VydjGiAgNVWFiqAgdVc2VyLlYxygIHVXNlclxWMeICE1VzZXJcVjFcR1BCTWV0YWRhdGHqAghVc2VyOjpWMWIGcHJvdG8z", [file_google_api_annotations, file_google_protobuf_timestamp]); + fileDesc("ChJ1c2VyL3YxL3VzZXIucHJvdG8SB3VzZXIudjEiQAoEVXNlchIKCgJpZBgBIAEoCRINCgVlbWFpbBgCIAEoCRIMCgRuYW1lGAMgASgJEg8KB3BpY3R1cmUYBCABKAkiEAoOR2V0VXNlclJlcXVlc3QiLgoPR2V0VXNlclJlc3BvbnNlEhsKBHVzZXIYASABKAsyDS51c2VyLnYxLlVzZXIirAEKBlByb21wdBIKCgJpZBgBIAEoCRIuCgpjcmVhdGVkX2F0GAIgASgLMhouZ29vZ2xlLnByb3RvYnVmLlRpbWVzdGFtcBIuCgp1cGRhdGVkX2F0GAMgASgLMhouZ29vZ2xlLnByb3RvYnVmLlRpbWVzdGFtcBINCgV0aXRsZRgEIAEoCRIPCgdjb250ZW50GAUgASgJEhYKDmlzX3VzZXJfcHJvbXB0GAYgASgIIhQKEkxpc3RQcm9tcHRzUmVxdWVzdCI3ChNMaXN0UHJvbXB0c1Jlc3BvbnNlEiAKB3Byb21wdHMYASADKAsyDy51c2VyLnYxLlByb21wdCI1ChNDcmVhdGVQcm9tcHRSZXF1ZXN0Eg0KBXRpdGxlGAEgASgJEg8KB2NvbnRlbnQYAiABKAkiNwoUQ3JlYXRlUHJvbXB0UmVzcG9uc2USHwoGcHJvbXB0GAEgASgLMg8udXNlci52MS5Qcm9tcHQiSAoTVXBkYXRlUHJvbXB0UmVxdWVzdBIRCglwcm9tcHRfaWQYASABKAkSDQoFdGl0bGUYAiABKAkSDwoHY29udGVudBgDIAEoCSI3ChRVcGRhdGVQcm9tcHRSZXNwb25zZRIfCgZwcm9tcHQYASABKAsyDy51c2VyLnYxLlByb21wdCIoChNEZWxldGVQcm9tcHRSZXF1ZXN0EhEKCXByb21wdF9pZBgBIAEoCSIWChREZWxldGVQcm9tcHRSZXNwb25zZSLwAQoLQ3VzdG9tTW9kZWwSCgoCaWQYASABKAkSDAoEc2x1ZxgCIAEoCRIMCgRuYW1lGAMgASgJEhAKCGJhc2VfdXJsGAQgASgJEg8KB2FwaV9rZXkYBSABKAkSFgoOY29udGV4dF93aW5kb3cYBiABKAUSEgoKbWF4X291dHB1dBgHIAEoBRITCgtpbnB1dF9wcmljZRgIIAEoBRIUCgxvdXRwdXRfcHJpY2UYCSABKAUSEwoLdGVtcGVyYXR1cmUYCiABKAISGwoTcGFyYWxsZWxfdG9vbF9jYWxscxgLIAEoCBINCgVzdG9yZRgMIAEoCCL7AQoIU2V0dGluZ3MSJgoec2hvd19zaG9ydGN1dHNfYWZ0ZXJfc2VsZWN0aW9uGAEgASgIEigKIGZ1bGxfd2lkdGhfcGFwZXJfZGVidWdnZXJfYnV0dG9uGAIgASgIEiIKGmVuYWJsZV9jaXRhdGlvbl9zdWdnZXN0aW9uGAMgASgIEhkKEWZ1bGxfZG9jdW1lbnRfcmFnGAQgASgIEhkKEXNob3dlZF9vbmJvYXJkaW5nGAUgASgIEhYKDm9wZW5haV9hcGlfa2V5GAYgASgJEisKDWN1c3RvbV9tb2RlbHMYByADKAsyFC51c2VyLnYxLkN1c3RvbU1vZGVsIhQKEkdldFNldHRpbmdzUmVxdWVzdCI6ChNHZXRTZXR0aW5nc1Jlc3BvbnNlEiMKCHNldHRpbmdzGAEgASgLMhEudXNlci52MS5TZXR0aW5ncyI8ChVVcGRhdGVTZXR0aW5nc1JlcXVlc3QSIwoIc2V0dGluZ3MYASABKAsyES51c2VyLnYxLlNldHRpbmdzIj0KFlVwZGF0ZVNldHRpbmdzUmVzcG9uc2USIwoIc2V0dGluZ3MYASABKAsyES51c2VyLnYxLlNldHRpbmdzIhYKFFJlc2V0U2V0dGluZ3NSZXF1ZXN0IjwKFVJlc2V0U2V0dGluZ3NSZXNwb25zZRIjCghzZXR0aW5ncxgBIAEoCzIRLnVzZXIudjEuU2V0dGluZ3MiHAoaR2V0VXNlckluc3RydWN0aW9uc1JlcXVlc3QiMwobR2V0VXNlckluc3RydWN0aW9uc1Jlc3BvbnNlEhQKDGluc3RydWN0aW9ucxgBIAEoCSI1Ch1VcHNlcnRVc2VySW5zdHJ1Y3Rpb25zUmVxdWVzdBIUCgxpbnN0cnVjdGlvbnMYASABKAkiNgoeVXBzZXJ0VXNlckluc3RydWN0aW9uc1Jlc3BvbnNlEhQKDGluc3RydWN0aW9ucxgBIAEoCTKDCgoLVXNlclNlcnZpY2USXQoHR2V0VXNlchIXLnVzZXIudjEuR2V0VXNlclJlcXVlc3QaGC51c2VyLnYxLkdldFVzZXJSZXNwb25zZSIfgtPkkwIZEhcvX3BkL2FwaS92MS91c2Vycy9Ac2VsZhJxCgtMaXN0UHJvbXB0cxIbLnVzZXIudjEuTGlzdFByb21wdHNSZXF1ZXN0GhwudXNlci52MS5MaXN0UHJvbXB0c1Jlc3BvbnNlIieC0+STAiESHy9fcGQvYXBpL3YxL3VzZXJzL0BzZWxmL3Byb21wdHMSdwoMQ3JlYXRlUHJvbXB0EhwudXNlci52MS5DcmVhdGVQcm9tcHRSZXF1ZXN0Gh0udXNlci52MS5DcmVhdGVQcm9tcHRSZXNwb25zZSIqgtPkkwIkOgEqIh8vX3BkL2FwaS92MS91c2Vycy9Ac2VsZi9wcm9tcHRzEoMBCgxVcGRhdGVQcm9tcHQSHC51c2VyLnYxLlVwZGF0ZVByb21wdFJlcXVlc3QaHS51c2VyLnYxLlVwZGF0ZVByb21wdFJlc3BvbnNlIjaC0+STAjA6ASoaKy9fcGQvYXBpL3YxL3VzZXJzL0BzZWxmL3Byb21wdHMve3Byb21wdF9pZH0SjgEKE0dldFVzZXJJbnN0cnVjdGlvbnMSIy51c2VyLnYxLkdldFVzZXJJbnN0cnVjdGlvbnNSZXF1ZXN0GiQudXNlci52MS5HZXRVc2VySW5zdHJ1Y3Rpb25zUmVzcG9uc2UiLILT5JMCJhIkL19wZC9hcGkvdjEvdXNlcnMvQHNlbGYvaW5zdHJ1Y3Rpb25zEpoBChZVcHNlcnRVc2VySW5zdHJ1Y3Rpb25zEiYudXNlci52MS5VcHNlcnRVc2VySW5zdHJ1Y3Rpb25zUmVxdWVzdBonLnVzZXIudjEuVXBzZXJ0VXNlckluc3RydWN0aW9uc1Jlc3BvbnNlIi+C0+STAik6ASoiJC9fcGQvYXBpL3YxL3VzZXJzL0BzZWxmL2luc3RydWN0aW9ucxKAAQoMRGVsZXRlUHJvbXB0EhwudXNlci52MS5EZWxldGVQcm9tcHRSZXF1ZXN0Gh0udXNlci52MS5EZWxldGVQcm9tcHRSZXNwb25zZSIzgtPkkwItKisvX3BkL2FwaS92MS91c2Vycy9Ac2VsZi9wcm9tcHRzL3twcm9tcHRfaWR9EnIKC0dldFNldHRpbmdzEhsudXNlci52MS5HZXRTZXR0aW5nc1JlcXVlc3QaHC51c2VyLnYxLkdldFNldHRpbmdzUmVzcG9uc2UiKILT5JMCIhIgL19wZC9hcGkvdjEvdXNlcnMvQHNlbGYvc2V0dGluZ3MSfgoOVXBkYXRlU2V0dGluZ3MSHi51c2VyLnYxLlVwZGF0ZVNldHRpbmdzUmVxdWVzdBofLnVzZXIudjEuVXBkYXRlU2V0dGluZ3NSZXNwb25zZSIrgtPkkwIlOgEqGiAvX3BkL2FwaS92MS91c2Vycy9Ac2VsZi9zZXR0aW5ncxJ+Cg1SZXNldFNldHRpbmdzEh0udXNlci52MS5SZXNldFNldHRpbmdzUmVxdWVzdBoeLnVzZXIudjEuUmVzZXRTZXR0aW5nc1Jlc3BvbnNlIi6C0+STAigiJi9fcGQvYXBpL3YxL3VzZXJzL0BzZWxmL3NldHRpbmdzL3Jlc2V0Qn8KC2NvbS51c2VyLnYxQglVc2VyUHJvdG9QAVoocGFwZXJkZWJ1Z2dlci9wa2cvZ2VuL2FwaS91c2VyL3YxO3VzZXJ2MaICA1VYWKoCB1VzZXIuVjHKAgdVc2VyXFYx4gITVXNlclxWMVxHUEJNZXRhZGF0YeoCCFVzZXI6OlYxYgZwcm90bzM", [file_google_api_annotations, file_google_protobuf_timestamp]); /** * @generated from message user.v1.User @@ -310,6 +310,21 @@ export type CustomModel = Message<"user.v1.CustomModel"> & { * @generated from field: int32 output_price = 9; */ outputPrice: number; + + /** + * @generated from field: float temperature = 10; + */ + temperature: number; + + /** + * @generated from field: bool parallel_tool_calls = 11; + */ + parallelToolCalls: boolean; + + /** + * @generated from field: bool store = 12; + */ + store: boolean; }; /** diff --git a/webapp/_webapp/src/views/settings/sections/api-key-settings.tsx b/webapp/_webapp/src/views/settings/sections/api-key-settings.tsx index 126f0d5d..c58d6dce 100644 --- a/webapp/_webapp/src/views/settings/sections/api-key-settings.tsx +++ b/webapp/_webapp/src/views/settings/sections/api-key-settings.tsx @@ -1,4 +1,4 @@ -import { Fragment, useState } from "react"; +import { Fragment, useEffect, useState } from "react"; import { Icon } from "@iconify/react"; import { Modal } from "../../../components/modal"; import { SettingsSectionContainer, SettingsSectionTitle } from "./components"; @@ -18,6 +18,16 @@ export const ApiKeySettings = () => { customModels: otherCustomModels, }); } else { + const hasDuplicate = otherCustomModels.some( + (model) => + model.name.trim().toLowerCase() === newModel.name.trim().toLowerCase() && + model.slug.trim().toLowerCase() === newModel.slug.trim().toLowerCase(), + ); + + if (hasDuplicate) { + throw new Error("A model with the same name and slug already exists."); + } + await updateSettings({ customModels: [ ...otherCustomModels, @@ -31,6 +41,9 @@ export const ApiKeySettings = () => { maxOutput: newModel.maxOutput, inputPrice: newModel.inputPrice, outputPrice: newModel.outputPrice, + temperature: newModel.temperature, + parallelToolCalls: newModel.parallelToolCalls, + store: newModel.store, }, ], }); @@ -49,26 +62,31 @@ export const ApiKeySettings = () => { content={