Try to resolve import cycle by following existing enterprise resolvers pattern

I hope this will fix your import cycle pattern (didn't run, need to grab dinner now), by making this code use the indirection via the enterpriseservices.
This commit is contained in:
Erik Seliger 2024-07-15 22:31:56 +02:00
parent a8b8645495
commit 87d956abe2
7 changed files with 191 additions and 136 deletions

View File

@ -99,6 +99,7 @@ go_library(
"license.go",
"location.go",
"markdown.go",
"modelconfig.go",
"namespaces.go",
"node.go",
"notebooks.go",

View File

@ -626,6 +626,11 @@ func NewSchema(
resolver.TelemetryRootResolver = telemetryResolver
schemas = append(schemas, telemetrySchema)
}
if modelconfigResolver := optional.ModelconfigResolver; modelconfigResolver != nil {
EnterpriseResolvers.modelconfigResolver = modelconfigResolver
resolver.ModelconfigResolver = modelconfigResolver
}
}
opts := []graphql.SchemaOpt{
@ -679,6 +684,7 @@ type OptionalResolver struct {
SearchContextsResolver
WebhooksResolver
ContentLibraryResolver
ModelconfigResolver
*TelemetryRootResolver
}
@ -801,6 +807,7 @@ var EnterpriseResolvers = struct {
webhooksResolver WebhooksResolver
contentLibraryResolver ContentLibraryResolver
telemetryResolver *TelemetryRootResolver
modelconfigResolver ModelconfigResolver
}{}
// Root returns a new schemaResolver.

View File

@ -0,0 +1,19 @@
package graphqlbackend
import "context"
type ModelconfigResolver interface {
CodyLLMConfiguration(ctx context.Context) (CodyLLMConfigurationResolver, error)
}
type CodyLLMConfigurationResolver interface {
ChatModel() (string, error)
ChatModelMaxTokens() (*int32, error)
SmartContextWindow() string
DisableClientConfigAPI() bool
FastChatModel() (string, error)
FastChatModelMaxTokens() (*int32, error)
Provider() string
CompletionModel() (string, error)
CompletionModelMaxTokens() (*int32, error)
}

View File

@ -3,7 +3,6 @@ package graphqlbackend
import (
"bytes"
"context"
"fmt"
"os"
"strconv"
"strings"
@ -18,7 +17,6 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend/graphqlutil"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/cody"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/modelconfig"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/auth"
@ -50,8 +48,6 @@ import (
"github.com/sourcegraph/sourcegraph/lib/pointers"
"github.com/sourcegraph/sourcegraph/cmd/frontend/globals"
modelconfigSDK "github.com/sourcegraph/sourcegraph/internal/modelconfig/types"
)
const singletonSiteGQLID = "site"
@ -626,21 +622,8 @@ func (r *siteResolver) IsCodyEnabled(ctx context.Context) bool {
return enabled
}
func (r *siteResolver) CodyLLMConfiguration(ctx context.Context) (*codyLLMConfigurationResolver, error) {
siteConfig := conf.Get().SiteConfig()
modelCfgSvc := modelconfig.Get()
modelconfig, err := modelCfgSvc.Get()
if err != nil {
r.logger.Warn("error obtaining model configuration data", log.Error(err))
return nil, errors.New("error fetching model configuration data")
}
resolver := &codyLLMConfigurationResolver{
modelconfig: modelconfig,
doNotUseCompletionsConfig: siteConfig.Completions,
}
return resolver, nil
func (r *siteResolver) CodyLLMConfiguration(ctx context.Context) (CodyLLMConfigurationResolver, error) {
return EnterpriseResolvers.modelconfigResolver.CodyLLMConfiguration(ctx)
}
func (r *siteResolver) CodyConfigFeatures(ctx context.Context) *codyConfigFeaturesResolver {
@ -660,123 +643,6 @@ func (c *codyConfigFeaturesResolver) AutoComplete() bool { return c.config.AutoC
func (c *codyConfigFeaturesResolver) Commands() bool { return c.config.Commands }
func (c *codyConfigFeaturesResolver) Attribution() bool { return c.config.Attribution }
type codyLLMConfigurationResolver struct {
// modelconfig is the LLM model configuration data for this Sourcegraph instance.
// This is the source of truth and accurately reflects the site configuration.
modelconfig *modelconfigSDK.ModelConfiguration
// doNotUseCompletionsConfig is the older-style configuration data for Cody
// Enterprise, and is only passed along for backwards compatibility.
//
// DO NOT USE IT.
//
// The information it returns is only looking at the "completions" site config
// data, which may not even be provided. Only read from this value if you really
// know what you are doing.
doNotUseCompletionsConfig *schema.Completions
}
// toLegacyModelIdentifier converts the "new style" model identity into the old style
// expected by Cody Clients.
//
// This is dangerous, as it will only work if this Sourcegraph backend AND Cody Gateway
// can correctly map the legacy identifier into the correct ModelRef.
//
// Once Cody Clients are capable of natively using the modelref format, we should remove
// this function and have all of our GraphQL APIs only refer to models using a ModelRef.
func toLegacyModelIdentifier(mref modelconfigSDK.ModelRef) string {
return fmt.Sprintf("%s/%s", mref.ProviderID(), mref.ModelID())
}
func (c *codyLLMConfigurationResolver) ChatModel() (string, error) {
defaultChatModelRef := c.modelconfig.DefaultModels.Chat
model := c.modelconfig.GetModelByMRef(defaultChatModelRef)
if model == nil {
return "", errors.Errorf("default chat model %q not found", defaultChatModelRef)
}
return toLegacyModelIdentifier(model.ModelRef), nil
}
func (c *codyLLMConfigurationResolver) ChatModelMaxTokens() (*int32, error) {
defaultChatModelRef := c.modelconfig.DefaultModels.Chat
model := c.modelconfig.GetModelByMRef(defaultChatModelRef)
if model == nil {
return nil, errors.Errorf("default chat model %q not found", defaultChatModelRef)
}
maxTokens := int32(model.ContextWindow.MaxInputTokens)
return &maxTokens, nil
}
func (c *codyLLMConfigurationResolver) SmartContextWindow() string {
if c.doNotUseCompletionsConfig != nil {
if c.doNotUseCompletionsConfig.SmartContextWindow == "disabled" {
return "disabled"
} else {
return "enabled"
}
}
// If the admin has explicitly provided the newer "modelConfiguration" site config
// data, disable SmartContextWindow.
//
// BUG: This probably should be "enabled", but it isn't clear what this actually
// means relative to LLM model configuration.
return "disabled"
}
func (c *codyLLMConfigurationResolver) DisableClientConfigAPI() bool {
if c.doNotUseCompletionsConfig != nil {
if val := c.doNotUseCompletionsConfig.DisableClientConfigAPI; val != nil {
return *val
}
}
return false
}
func (c *codyLLMConfigurationResolver) FastChatModel() (string, error) {
defaultFastChatModelRef := c.modelconfig.DefaultModels.FastChat
model := c.modelconfig.GetModelByMRef(defaultFastChatModelRef)
if model == nil {
return "", errors.Errorf("default fast chat model %q not found", defaultFastChatModelRef)
}
return toLegacyModelIdentifier(model.ModelRef), nil
}
func (c *codyLLMConfigurationResolver) FastChatModelMaxTokens() (*int32, error) {
defaultFastChatModelRef := c.modelconfig.DefaultModels.FastChat
model := c.modelconfig.GetModelByMRef(defaultFastChatModelRef)
if model == nil {
return nil, errors.Errorf("default fast chat model %q not found", defaultFastChatModelRef)
}
maxTokens := int32(model.ContextWindow.MaxInputTokens)
return &maxTokens, nil
}
func (c *codyLLMConfigurationResolver) Provider() string {
if len(c.modelconfig.Providers) != 1 {
return "various"
}
return c.modelconfig.Providers[0].DisplayName
}
func (c *codyLLMConfigurationResolver) CompletionModel() (string, error) {
defaultCompletionModel := c.modelconfig.DefaultModels.CodeCompletion
model := c.modelconfig.GetModelByMRef(defaultCompletionModel)
if model == nil {
return "", errors.Errorf("default code completion model %q not found", defaultCompletionModel)
}
return toLegacyModelIdentifier(model.ModelRef), nil
}
func (c *codyLLMConfigurationResolver) CompletionModelMaxTokens() (*int32, error) {
defaultCompletionModel := c.modelconfig.DefaultModels.CodeCompletion
model := c.modelconfig.GetModelByMRef(defaultCompletionModel)
if model == nil {
return nil, errors.Errorf("default code completion model %q not found", defaultCompletionModel)
}
maxTokens := int32(model.ContextWindow.MaxInputTokens)
return &maxTokens, nil
}
type CodyContextFiltersArgs struct {
Version string
}

View File

@ -7,6 +7,7 @@ go_library(
"builder.go",
"httpapi.go",
"init.go",
"resolvers.go",
"service.go",
"siteconfig.go",
"siteconfig_completions.go",
@ -16,6 +17,7 @@ go_library(
visibility = ["//cmd/frontend:__subpackages__"],
deps = [
"//cmd/frontend/enterprise",
"//cmd/frontend/graphqlbackend",
"//cmd/frontend/internal/auth",
"//cmd/frontend/internal/registry",
"//internal/actor",

View File

@ -104,6 +104,8 @@ func Init(
singletonConfigService.set(updatedConfig)
})
enterpriseServices.ModelconfigResolver = newResolver(logger)
// TODO(chrsmith): When the enhanced model configuration data is available, if configured to do so
// we will spawn a background job that will poll Cody Gateway for any updated model information. This
// will be tricky, because we want to honor any dynamic changes to the site config. e.g. the `conf.Watch`

View File

@ -0,0 +1,158 @@
package modelconfig
import (
"context"
"fmt"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend"
"github.com/sourcegraph/sourcegraph/internal/conf"
modelconfigSDK "github.com/sourcegraph/sourcegraph/internal/modelconfig/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/schema"
)
func newResolver(logger log.Logger) graphqlbackend.ModelconfigResolver {
return &modelconfigResolver{logger: logger}
}
type modelconfigResolver struct {
logger log.Logger
}
func (r *modelconfigResolver) CodyLLMConfiguration(ctx context.Context) (graphqlbackend.CodyLLMConfigurationResolver, error) {
siteConfig := conf.Get().SiteConfig()
modelCfgSvc := Get()
modelconfig, err := modelCfgSvc.Get()
if err != nil {
r.logger.Warn("error obtaining model configuration data", log.Error(err))
return nil, errors.New("error fetching model configuration data")
}
resolver := &codyLLMConfigurationResolver{
modelconfig: modelconfig,
doNotUseCompletionsConfig: siteConfig.Completions,
}
return resolver, nil
}
type codyLLMConfigurationResolver struct {
// modelconfig is the LLM model configuration data for this Sourcegraph instance.
// This is the source of truth and accurately reflects the site configuration.
modelconfig *modelconfigSDK.ModelConfiguration
// doNotUseCompletionsConfig is the older-style configuration data for Cody
// Enterprise, and is only passed along for backwards compatibility.
//
// DO NOT USE IT.
//
// The information it returns is only looking at the "completions" site config
// data, which may not even be provided. Only read from this value if you really
// know what you are doing.
doNotUseCompletionsConfig *schema.Completions
}
// toLegacyModelIdentifier converts the "new style" model identity into the old style
// expected by Cody Clients.
//
// This is dangerous, as it will only work if this Sourcegraph backend AND Cody Gateway
// can correctly map the legacy identifier into the correct ModelRef.
//
// Once Cody Clients are capable of natively using the modelref format, we should remove
// this function and have all of our GraphQL APIs only refer to models using a ModelRef.
func toLegacyModelIdentifier(mref modelconfigSDK.ModelRef) string {
return fmt.Sprintf("%s/%s", mref.ProviderID(), mref.ModelID())
}
func (c *codyLLMConfigurationResolver) ChatModel() (string, error) {
defaultChatModelRef := c.modelconfig.DefaultModels.Chat
model := c.modelconfig.GetModelByMRef(defaultChatModelRef)
if model == nil {
return "", errors.Errorf("default chat model %q not found", defaultChatModelRef)
}
return toLegacyModelIdentifier(model.ModelRef), nil
}
func (c *codyLLMConfigurationResolver) ChatModelMaxTokens() (*int32, error) {
defaultChatModelRef := c.modelconfig.DefaultModels.Chat
model := c.modelconfig.GetModelByMRef(defaultChatModelRef)
if model == nil {
return nil, errors.Errorf("default chat model %q not found", defaultChatModelRef)
}
maxTokens := int32(model.ContextWindow.MaxInputTokens)
return &maxTokens, nil
}
func (c *codyLLMConfigurationResolver) SmartContextWindow() string {
if c.doNotUseCompletionsConfig != nil {
if c.doNotUseCompletionsConfig.SmartContextWindow == "disabled" {
return "disabled"
} else {
return "enabled"
}
}
// If the admin has explicitly provided the newer "modelConfiguration" site config
// data, disable SmartContextWindow.
//
// BUG: This probably should be "enabled", but it isn't clear what this actually
// means relative to LLM model configuration.
return "disabled"
}
func (c *codyLLMConfigurationResolver) DisableClientConfigAPI() bool {
if c.doNotUseCompletionsConfig != nil {
if val := c.doNotUseCompletionsConfig.DisableClientConfigAPI; val != nil {
return *val
}
}
return false
}
func (c *codyLLMConfigurationResolver) FastChatModel() (string, error) {
defaultFastChatModelRef := c.modelconfig.DefaultModels.FastChat
model := c.modelconfig.GetModelByMRef(defaultFastChatModelRef)
if model == nil {
return "", errors.Errorf("default fast chat model %q not found", defaultFastChatModelRef)
}
return toLegacyModelIdentifier(model.ModelRef), nil
}
func (c *codyLLMConfigurationResolver) FastChatModelMaxTokens() (*int32, error) {
defaultFastChatModelRef := c.modelconfig.DefaultModels.FastChat
model := c.modelconfig.GetModelByMRef(defaultFastChatModelRef)
if model == nil {
return nil, errors.Errorf("default fast chat model %q not found", defaultFastChatModelRef)
}
maxTokens := int32(model.ContextWindow.MaxInputTokens)
return &maxTokens, nil
}
func (c *codyLLMConfigurationResolver) Provider() string {
if len(c.modelconfig.Providers) != 1 {
return "various"
}
return c.modelconfig.Providers[0].DisplayName
}
func (c *codyLLMConfigurationResolver) CompletionModel() (string, error) {
defaultCompletionModel := c.modelconfig.DefaultModels.CodeCompletion
model := c.modelconfig.GetModelByMRef(defaultCompletionModel)
if model == nil {
return "", errors.Errorf("default code completion model %q not found", defaultCompletionModel)
}
return toLegacyModelIdentifier(model.ModelRef), nil
}
func (c *codyLLMConfigurationResolver) CompletionModelMaxTokens() (*int32, error) {
defaultCompletionModel := c.modelconfig.DefaultModels.CodeCompletion
model := c.modelconfig.GetModelByMRef(defaultCompletionModel)
if model == nil {
return nil, errors.Errorf("default code completion model %q not found", defaultCompletionModel)
}
maxTokens := int32(model.ContextWindow.MaxInputTokens)
return &maxTokens, nil
}