remove cody code that was moved to separate cody repo (#54729)

Cody code is being moved to https://github.com/sourcegraph/cody. This PR
removes the moved code and configures this repository to use the
`@sourcegraph/cody-{shared,ui}` packages published by the new separate
Cody repository.

## Test plan

Ensure that the new separate Cody repository tests pass. Ensure that
this repository's tests pass now that it uses 2 npm packages published
by the new separate Cody repository.
This commit is contained in:
Quinn Slack 2023-07-10 00:31:47 -10:00 committed by GitHub
parent 84d0a612da
commit 006bd2fea3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
438 changed files with 209 additions and 33496 deletions

View File

@ -6,14 +6,6 @@ client/browser/node_modules
client/build-config/node_modules
client/client-api/node_modules
client/codeintellify/node_modules
client/cody/node_modules
client/cody-cli/node_modules
client/cody-icons-font/node_modules
client/cody-shared/node_modules
client/cody-slack/node_modules
client/cody-agent/node_modules
client/cody-ui/node_modules
client/cody-web/node_modules
client/common/node_modules
client/eslint-plugin-wildcard/node_modules
client/extension-api/node_modules

18
.vscode/launch.json vendored
View File

@ -7,24 +7,6 @@
"request": "attach",
"mode": "local"
},
{
"name": "Launch Cody Extension",
"type": "extensionHost",
"request": "launch",
"runtimeExecutable": "${execPath}",
"preLaunchTask": "Build cody",
"args": [
"--extensionDevelopmentPath=${workspaceRoot}/client/cody",
"--disable-extension=hpargecruos.kodj",
"--disable-extension=sourcegraph.cody-ai"
],
"sourceMaps": true,
"outFiles": ["${workspaceRoot}/client/cody/dist/*.js"],
"env": {
"NODE_ENV": "development",
"CODY_FOCUS_ON_STARTUP": "1"
}
},
{
"name": "Launch sg rfc list",
"type": "go",

9
.vscode/tasks.json vendored
View File

@ -16,15 +16,6 @@
"command": ["node_modules/.bin/tsc"],
"args": ["--build", "tsconfig.all.json", "--watch", "--incremental"]
},
{
"label": "Build cody",
"detail": "Build cody.",
"type": "npm",
"script": "build:dev",
"problemMatcher": "$tsc-watch",
"options": { "cwd": "client/cody" },
"isBackground": true
},
{
"label": "Watch web app",
"detail": "Watch files and build the JavaScript bundle (no development server).",

View File

@ -21,6 +21,7 @@ All notable changes to Sourcegraph are documented in this file.
### Changed
- Cody source code (for the VS Code extension, CLI, and client shared libraries) has been moved to the [sourcegraph/cody repository](https://github.com/sourcegraph/cody).
- `golang.org/x/net/trace` instrumentation, previously available under `/debug/requests` and `/debug/events`, has been removed entirely from core Sourcegraph services. It remains available for Zoekt. [#53795](https://github.com/sourcegraph/sourcegraph/pull/53795)
### Fixed

View File

@ -1,2 +0,0 @@
/dist/
/out/

View File

@ -1,21 +0,0 @@
// @ts-check
const baseConfig = require('../../.eslintrc')
module.exports = {
extends: '../../.eslintrc.js',
parserOptions: {
...baseConfig.parserOptions,
project: [__dirname + '/tsconfig.json'],
},
overrides: baseConfig.overrides,
rules: {
'no-console': 'off',
'id-length': 'off',
'no-restricted-imports': [
'error',
{
paths: ['!highlight.js'],
},
],
},
}

View File

@ -1 +0,0 @@
/dist

View File

@ -1,50 +0,0 @@
load("@aspect_rules_ts//ts:defs.bzl", "ts_config")
load("@npm//:defs.bzl", "npm_link_all_packages")
load("//dev:defs.bzl", "ts_project")
load("//dev:eslint.bzl", "eslint_config_and_lint_root")
npm_link_all_packages(name = "node_modules")
eslint_config_and_lint_root()
ts_config(
name = "tsconfig",
src = "tsconfig.json",
visibility = ["//client:__subpackages__"],
deps = [
"//:tsconfig",
"//client/cody-shared:tsconfig",
"//client/common:tsconfig",
],
)
ts_project(
name = "cody-agent",
srcs = [
"src/agent.ts",
"src/editor.ts",
"src/index.ts",
"src/jsonrpc.ts",
"src/offsets.ts",
"src/protocol.ts",
],
tsconfig = ":tsconfig",
deps = [
":node_modules/@sourcegraph/cody-shared",
"//:node_modules/@types/node",
],
)
ts_project(
name = "cody-agent_tests",
testonly = True,
srcs = [
"src/index.test.ts",
],
tsconfig = ":tsconfig",
deps = [
":cody-agent",
":node_modules/@sourcegraph/cody-shared",
"//:node_modules/@types/node",
],
)

View File

@ -1,3 +0,0 @@
# See https://github.com/sourcegraph/codenotify for documentation.
src/protocol.ts @olafurpg

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2022 Sourcegraph, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,54 +0,0 @@
# Cody agent
The `@sourcegraph/cody-agent` package implements a JSON-RPC server to interact
with Cody via stdout/stdin. This package is intended to be used by
non-ECMAScript clients such as the JetBrains and NeoVim plugins.
## Protocol
The protocol is defined in the file [`src/protocol.ts`](src/protocol.ts). The
TypeScript code is the single source of truth of what JSON-RPC methods are
supported in the protocol.
## Updating the protocol
Directly edit the TypeScript source code to add new JSON-RPC methods or add
properties to existing data structures.
The agent is a new project that is being actively worked on at the time of this
writing. The protocol is subject to breaking changes without notice. Please
let us know if you are implementing an agent client.
## Client bindings
There's no tool to automatically generate bindings for the Cody agent protocol.
Currently, clients have to manually write bindings for the JSON-RPC methods.
## Useful commands
- The command `pnpm run build-agent-binaries` builds standalone binaries for
macOS, Linux, and Windows. By default, the binaries get written to the `dist/`
directory. The destination directory can be configured with the environment
variable `AGENT_EXECUTABLE_TARGET_DIRECTORY`.
- The command `pnpm run test` runs the agent against a minimized testing client.
The tests are disabled in CI because they run against uses an actual Sourcegraph
instance. Set the environment variables `SRC_ENDPOINT` and `SRC_ACCESS_TOKEN`
to run the tests against an actual Sourcegraph instance.
See the file [`src/index.test.ts`](src/index.test.ts) for a detailed but minimized example
interaction between an agent client and agent server.
## Client implementations
- The Sourcegraph JetBrains plugin is defined in the sibling directory
[`client/jetbrains`](../jetbrains/README.md). The file
[`CodyAgentClient.java`](../jetbrains/src/main/java/com/sourcegraph/agent/CodyAgentClient.java)
implements the client-side of the protocol.
## Miscellaneous notes
- By the nature of using JSON-RPC via stdin/stdout, both the agent server and
client run on the same computer and there can only be one client per server.
It's normal for both the client and server to be stateful processes. For
example, the `connectionConfiguration/didChange` notification is sent from the
client to the server to notify that subsequent requests should use the new
connection configuration.

View File

@ -1,11 +0,0 @@
// @ts-check
/** @type {import('@jest/types').Config.InitialOptions} */
const config = require('../../jest.config.base')
/** @type {import('@jest/types').Config.InitialOptions} */
module.exports = {
...config,
displayName: 'cody-agent',
rootDir: __dirname,
}

View File

@ -1,29 +0,0 @@
{
"private": true,
"name": "@sourcegraph/cody-agent",
"version": "0.0.1",
"description": "Cody JSON-RPC agent for consistent cross-editor support",
"license": "Apache-2.0",
"repository": {
"type": "git",
"url": "https://github.com/sourcegraph/sourcegraph.git",
"directory": "client/cody-agent"
},
"main": "src/index.ts",
"sideEffects": false,
"scripts": {
"build": "esbuild ./src/index.ts --bundle --outfile=dist/agent.js --format=cjs --platform=node",
"build-ts": "tsc -b",
"build-agent-binaries": "pnpm run build && pkg -t node16-linux-arm64,node16-linux-x64,node16-macos-arm64,node16-macos-x64,node16-win-x64 dist/agent.js --out-path ${AGENT_EXECUTABLE_TARGET_DIRECTORY:-dist}",
"lint": "pnpm run lint:js",
"lint:js": "eslint --cache '**/*.[tj]s?(x)'",
"test": "pnpm run build && jest"
},
"dependencies": {
"@sourcegraph/cody-shared": "workspace:*",
"@sourcegraph/common": "workspace:*"
},
"devDependencies": {
"pkg": "^5.8.1"
}
}

View File

@ -1,100 +0,0 @@
import { Client, createClient } from '@sourcegraph/cody-shared/src/chat/client'
import { registeredRecipes } from '@sourcegraph/cody-shared/src/chat/recipes/agent-recipes'
import { SourcegraphNodeCompletionsClient } from '@sourcegraph/cody-shared/src/sourcegraph-api/completions/nodeClient'
import { AgentEditor } from './editor'
import { MessageHandler } from './jsonrpc'
import { ConnectionConfiguration, TextDocument } from './protocol'
export class Agent extends MessageHandler {
private client?: Promise<Client>
public workspaceRootPath: string | null = null
public activeDocumentFilePath: string | null = null
public documents: Map<string, TextDocument> = new Map()
constructor() {
super()
this.setClient({
customHeaders: {},
accessToken: process.env.SRC_ACCESS_TOKEN || '',
serverEndpoint: process.env.SRC_ENDPOINT || 'https://sourcegraph.com',
})
this.registerRequest('initialize', client => {
process.stderr.write(
`Cody Agent: handshake with client '${client.name}' (version '${client.version}') at workspace root path '${client.workspaceRootPath}'\n`
)
this.workspaceRootPath = client.workspaceRootPath
if (client.connectionConfiguration) {
this.setClient(client.connectionConfiguration)
}
return Promise.resolve({
name: 'cody-agent',
})
})
this.registerNotification('initialized', () => {})
this.registerRequest('shutdown', () => Promise.resolve(null))
this.registerNotification('exit', () => {
process.exit(0)
})
this.registerNotification('textDocument/didFocus', document => {
this.activeDocumentFilePath = document.filePath
})
this.registerNotification('textDocument/didOpen', document => {
this.documents.set(document.filePath, document)
this.activeDocumentFilePath = document.filePath
})
this.registerNotification('textDocument/didChange', document => {
if (document.content === undefined) {
document.content = this.documents.get(document.filePath)?.content
}
this.documents.set(document.filePath, document)
this.activeDocumentFilePath = document.filePath
})
this.registerNotification('textDocument/didClose', document => {
this.documents.delete(document.filePath)
})
this.registerNotification('connectionConfiguration/didChange', config => {
this.setClient(config)
})
this.registerRequest('recipes/list', () =>
Promise.resolve(
Object.values(registeredRecipes).map(({ id }) => ({
id,
title: id, // TODO: will be added in a follow PR
}))
)
)
this.registerRequest('recipes/execute', async data => {
const client = await this.client
if (!client) {
return null
}
await client.executeRecipe(data.id, {
humanChatInput: data.humanChatInput,
})
return null
})
}
private setClient(config: ConnectionConfiguration): void {
this.client = createClient({
editor: new AgentEditor(this),
config: { ...config, useContext: 'none' },
setMessageInProgress: messageInProgress => {
this.notify('chat/updateMessageInProgress', messageInProgress)
},
setTranscript: () => {
// Not supported yet by agent.
},
createCompletionsClient: config => new SourcegraphNodeCompletionsClient(config),
})
}
}

View File

@ -1,99 +0,0 @@
import {
ActiveTextEditor,
ActiveTextEditorSelection,
ActiveTextEditorViewControllers,
ActiveTextEditorVisibleContent,
Editor,
} from '@sourcegraph/cody-shared/src/editor'
import { Agent } from './agent'
import { DocumentOffsets } from './offsets'
import { TextDocument } from './protocol'
export class AgentEditor implements Editor {
public controllers?: ActiveTextEditorViewControllers | undefined
constructor(private agent: Agent) {}
public didReceiveFixupText(): Promise<void> {
throw new Error('Method not implemented.')
}
public getWorkspaceRootPath(): string | null {
return this.agent.workspaceRootPath
}
private activeDocument(): TextDocument | undefined {
if (this.agent.activeDocumentFilePath === null) {
return undefined
}
return this.agent.documents.get(this.agent.activeDocumentFilePath)
}
public getActiveTextEditor(): ActiveTextEditor | null {
const document = this.activeDocument()
if (document === undefined) {
return null
}
return {
filePath: document.filePath,
content: document.content || '',
}
}
public getActiveTextEditorSelection(): ActiveTextEditorSelection | null {
const document = this.activeDocument()
if (document === undefined || document.content === undefined || document.selection === undefined) {
return null
}
const offsets = new DocumentOffsets(document)
const from = offsets.offset(document.selection.start)
const to = offsets.offset(document.selection.end)
return {
fileName: document.filePath || '',
precedingText: document.content.slice(0, from),
selectedText: document.content.slice(from, to),
followingText: document.content.slice(to, document.content.length),
}
}
public getActiveTextEditorSelectionOrEntireFile(): ActiveTextEditorSelection | null {
const document = this.activeDocument()
if (document !== undefined && document.selection === undefined) {
return {
fileName: document.filePath || '',
precedingText: '',
selectedText: document.content || '',
followingText: '',
}
}
return this.getActiveTextEditorSelection()
}
public getActiveTextEditorVisibleContent(): ActiveTextEditorVisibleContent | null {
const document = this.activeDocument()
if (document === undefined) {
return null
}
return {
content: document.content || '',
fileName: document.filePath,
}
}
public replaceSelection(): Promise<void> {
throw new Error('Not implemented')
}
public showQuickPick(): Promise<string | undefined> {
throw new Error('Not implemented')
}
public showWarningMessage(): Promise<void> {
throw new Error('Not implemented')
}
public showInputBox(): Promise<string | undefined> {
throw new Error('Not implemented')
}
}

View File

@ -1,80 +0,0 @@
import assert from 'assert'
import { spawn } from 'child_process'
import path from 'path'
import { RecipeID } from '@sourcegraph/cody-shared/src/chat/recipes/recipe'
import { MessageHandler } from './jsonrpc'
export class TestClient extends MessageHandler {
public async handshake() {
const info = await this.request('initialize', {
name: 'test-client',
version: 'v1',
workspaceRootPath: '/path/to/foo',
})
this.notify('initialized', null)
return info
}
public listRecipes() {
return this.request('recipes/list', null)
}
public async executeRecipe(id: RecipeID, humanChatInput: string) {
return this.request('recipes/execute', {
id,
humanChatInput,
})
}
public async shutdownAndExit() {
await this.request('shutdown', null)
this.notify('exit', null)
}
}
describe('StandardAgent', () => {
if (process.env.SRC_ACCESS_TOKEN === undefined || process.env.SRC_ENDPOINT === undefined) {
it('no-op test because SRC_ACCESS_TOKEN is not set. To actually run the Cody Agent tests, set the environment variables SRC_ENDPOINT and SRC_ACCESS_TOKEN', () => {})
return
}
const client = new TestClient()
const agentProcess = spawn('node', [path.join(__dirname, '../dist/agent.js')], {
stdio: 'pipe',
})
agentProcess.stdout.pipe(client.messageDecoder)
client.messageEncoder.pipe(agentProcess.stdin)
agentProcess.stderr.on('data', msg => {
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-call
console.log(msg.toString())
})
it('initializes properly', async () => {
assert.deepStrictEqual(await client.handshake(), { name: 'cody-agent' }, 'Agent should be cody-agent')
})
it('lists recipes correctly', async () => {
const recipes = await client.listRecipes()
assert(recipes.length === 8)
})
const streamingChatMessages = new Promise<void>(resolve => {
client.registerNotification('chat/updateMessageInProgress', msg => {
if (msg === null) {
resolve()
}
})
})
it('allows us to execute recipes properly', async () => {
await client.executeRecipe('chat-question', "What's 2+2?")
})
it('sends back transcript updates and makes sense', () => streamingChatMessages, 20_000)
afterAll(async () => {
await client.shutdownAndExit()
})
})

View File

@ -1,10 +0,0 @@
import { Agent } from './agent'
process.stderr.write('Starting Cody Agent...\n')
const agent = new Agent()
console.log = console.error
process.stdin.pipe(agent.messageDecoder)
agent.messageEncoder.pipe(process.stdout)

View File

@ -1,285 +0,0 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import assert from 'assert'
import { Readable, Writable } from 'stream'
import { Notifications, Requests } from './protocol'
// This file is a standalone implementation of JSON-RPC for Node.js
// ReadStream/WriteStream, which conventionally map to stdin/stdout.
// The code assumes familiarity with the JSON-RPC specification as documented
// here https://www.jsonrpc.org/specification
// To learn more about how JSON-RPC protocols work, the LSP specification is
// also a good read
// https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/
// String literal types for the names of the Cody Agent protocol methods.
type RequestMethodName = keyof Requests
type NotificationMethodName = keyof Notifications
type MethodName = RequestMethodName | NotificationMethodName
// Parameter type of a request or notification. Note: JSON-RPC methods can only
// accept one parameter. Multiple parameters must be encoded as an array or an
// object.
type ParamsOf<K extends MethodName> = (Requests & Notifications)[K][0]
// Request result types. Note: notifications don't return values.
type ResultOf<K extends RequestMethodName> = Requests[K][1]
type Id = string | number
// Error codes as defined by the JSON-RPC spec.
enum ErrorCode {
ParseError = -32700,
InvalidRequest = -32600,
MethodNotFound = -32601,
InvalidParams = -32602,
InternalError = -32603,
}
// Result of an erroneous request, which populates the `error` property instead
// of `result` for successful results.
interface ErrorInfo<T> {
code: ErrorCode
message: string
data: T
}
// The three different kinds of toplevel JSON objects that get written to the
// wire: requests, request responses, and notifications.
interface RequestMessage<M extends RequestMethodName> {
jsonrpc: '2.0'
id: Id
method: M
params?: ParamsOf<M>
}
interface ResponseMessage<M extends RequestMethodName> {
jsonrpc: '2.0'
id: Id
result?: ResultOf<M>
error?: ErrorInfo<any>
}
interface NotificationMessage<M extends NotificationMethodName> {
jsonrpc: '2.0'
method: M
params?: ParamsOf<M>
}
type Message = RequestMessage<any> & ResponseMessage<any> & NotificationMessage<any>
type MessageHandlerCallback = (err: Error | null, msg: Message | null) => void
class MessageDecoder extends Writable {
private buffer: Buffer = Buffer.alloc(0)
private contentLengthRemaining: number | null = null
private contentBuffer: Buffer = Buffer.alloc(0)
constructor(public callback: MessageHandlerCallback) {
super()
}
public _write(chunk: Buffer, encoding: string, callback: (error?: Error | null) => void): void {
this.buffer = Buffer.concat([this.buffer, chunk])
// We loop through as we could have a double message that requires processing twice
read: while (true) {
if (this.contentLengthRemaining === null) {
const headerString = this.buffer.toString()
let startIndex = 0
let endIndex
// We create this as we might get partial messages
// so we only want to set the content length
// once we get the whole thing
let newContentLength = 0
const LINE_TERMINATOR = '\r\n'
while ((endIndex = headerString.indexOf(LINE_TERMINATOR, startIndex)) !== -1) {
const entry = headerString.slice(startIndex, endIndex)
const [headerName, headerValue] = entry.split(':').map(_ => _.trim())
if (headerValue === undefined) {
this.buffer = this.buffer.slice(endIndex + LINE_TERMINATOR.length)
// Asserts we actually have a valid header with a Content-Length
// This state is irrecoverable because the stream is polluted
// Also what is the client doing 😭
this.contentLengthRemaining = newContentLength
assert(
isFinite(this.contentLengthRemaining),
`parsed Content-Length ${this.contentLengthRemaining} is not a finite number`
)
continue read
}
switch (headerName) {
case 'Content-Length':
newContentLength = parseInt(headerValue, 10)
break
default:
console.error(`Unknown header '${headerName}': ignoring!`)
break
}
startIndex = endIndex + LINE_TERMINATOR.length
}
break
} else {
if (this.contentLengthRemaining === 0) {
try {
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
const data = JSON.parse(this.contentBuffer.toString())
this.contentBuffer = Buffer.alloc(0)
this.contentLengthRemaining = null
this.callback(null, data)
} catch (error) {
this.callback(error, null)
}
continue
}
const data = this.buffer.slice(0, this.contentLengthRemaining)
this.contentBuffer = Buffer.concat([this.contentBuffer, data])
this.buffer = this.buffer.slice(this.contentLengthRemaining)
this.contentLengthRemaining -= data.byteLength
}
}
callback()
}
}
class MessageEncoder extends Readable {
private buffer: Buffer = Buffer.alloc(0)
public send(data: any): void {
this.pause()
const content = Buffer.from(JSON.stringify(data), 'utf-8')
const header = Buffer.from(`Content-Length: ${content.byteLength}\r\n\r\n`, 'utf-8')
this.buffer = Buffer.concat([this.buffer, header, content])
this.resume()
}
public _read(size: number): void {
this.push(this.buffer.slice(0, size))
this.buffer = this.buffer.slice(size)
}
}
type RequestCallback<M extends RequestMethodName> = (params: ParamsOf<M>) => Promise<ResultOf<M>>
type NotificationCallback<M extends NotificationMethodName> = (params: ParamsOf<M>) => void
/**
* Only exported API in this file. MessageHandler exposes a public `messageDecoder` property
* that can be piped with ReadStream/WriteStream.
*/
export class MessageHandler {
private id = 0
private requestHandlers: Map<RequestMethodName, RequestCallback<any>> = new Map()
private notificationHandlers: Map<NotificationMethodName, NotificationCallback<any>> = new Map()
private responseHandlers: Map<Id, (params: any) => void> = new Map()
// TODO: RPC error handling
public messageDecoder: MessageDecoder = new MessageDecoder((err: Error | null, msg: Message | null) => {
if (err) {
console.error(`Error: ${err}`)
}
if (!msg) {
return
}
if (msg.id !== undefined && msg.method) {
if (typeof msg.id === 'number' && msg.id > this.id) {
this.id = msg.id + 1
}
// Requests have ids and methods
const handler = this.requestHandlers.get(msg.method)
if (handler) {
handler(msg.params).then(
result => {
const data: ResponseMessage<any> = {
jsonrpc: '2.0',
id: msg.id,
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment
result,
}
this.messageEncoder.send(data)
},
error => {
const message = error instanceof Error ? error.message : `${error}`
const data: ResponseMessage<any> = {
jsonrpc: '2.0',
id: msg.id,
error: {
code: ErrorCode.InternalError,
message,
data: JSON.stringify(error),
},
}
this.messageEncoder.send(data)
}
)
} else {
console.error(`No handler for request with method ${msg.method}`)
}
} else if (msg.id !== undefined) {
// Responses have ids
const handler = this.responseHandlers.get(msg.id)
if (handler) {
handler(msg.result)
this.responseHandlers.delete(msg.id)
} else {
console.error(`No handler for response with id ${msg.id}`)
}
} else if (msg.method) {
// Notifications have methods
const notificationHandler = this.notificationHandlers.get(msg.method)
if (notificationHandler) {
notificationHandler(msg.params)
} else {
console.error(`No handler for notification with method ${msg.method}`)
}
}
})
public messageEncoder: MessageEncoder = new MessageEncoder()
public registerRequest<M extends RequestMethodName>(method: M, callback: RequestCallback<M>): void {
this.requestHandlers.set(method, callback)
}
public registerNotification<M extends NotificationMethodName>(method: M, callback: NotificationCallback<M>): void {
this.notificationHandlers.set(method, callback)
}
public request<M extends RequestMethodName>(method: M, params: ParamsOf<M>): Promise<ResultOf<M>> {
const id = this.id++
const data: RequestMessage<M> = {
jsonrpc: '2.0',
id,
method,
params,
}
this.messageEncoder.send(data)
return new Promise(resolve => {
this.responseHandlers.set(id, resolve)
})
}
public notify<M extends NotificationMethodName>(method: M, params: ParamsOf<M>): void {
const data: NotificationMessage<M> = {
jsonrpc: '2.0',
method,
params,
}
this.messageEncoder.send(data)
}
}

View File

@ -1,27 +0,0 @@
import { Position, TextDocument } from './protocol'
/**
* Utility class to convert line/character positions into offsets.
*/
export class DocumentOffsets {
private lines: number[] = []
constructor(public readonly document: TextDocument) {
if (document.content) {
this.lines.push(0)
let index = 1
while (index < document.content.length) {
if (document.content[index] === '\n') {
this.lines.push(index + 1)
}
index++
}
if (document.content.length !== this.lines[this.lines.length - 1]) {
this.lines.push(document.content.length) // sentinel value
}
}
}
public offset(position: Position): number {
const lineStartOffset = this.lines[position.line]
return lineStartOffset + position.character
}
}

View File

@ -1,134 +0,0 @@
/* eslint-disable @typescript-eslint/consistent-type-definitions */
import { RecipeID } from '@sourcegraph/cody-shared/src/chat/recipes/recipe'
import { ChatMessage } from '@sourcegraph/cody-shared/src/chat/transcript/messages'
// This file documents the Cody Agent JSON-RPC protocol. Consult the JSON-RPC
// specification to learn about how JSON-RPC works https://www.jsonrpc.org/specification
// The Cody Agent server only supports transport via stdout/stdin.
// The JSON-RPC requests of the Cody Agent protocol. Requests are async
// functions that return some (possibly null) value.
export type Requests = {
// ================
// Client -> Server
// ================
// The 'initialize' request must be sent at the start of the connection
// before any other request/notification is sent.
initialize: [ClientInfo, ServerInfo]
// The 'shutdown' request must be sent before terminating the agent process.
shutdown: [null, null]
// Client requests the agent server to lists all recipes that are supported
// by the agent.
'recipes/list': [null, RecipeInfo[]]
// Client requests the agent server to execute an individual recipe.
// The response is null because the AI/Assistant messages are streamed through
// the chat/updateMessageInProgress notification. The flow to trigger a recipe
// is like this:
// client --- recipes/execute --> server
// client <-- chat/updateMessageInProgress --- server
// ....
// client <-- chat/updateMessageInProgress --- server
'recipes/execute': [ExecuteRecipeParams, null]
// ================
// Server -> Client
// ================
}
// The JSON-RPC notifications of the Cody Agent protocol. Notifications are
// synchronous fire-and-forget messages that have no return value. Notifications are
// conventionally used to represent streams of values.
export type Notifications = {
// ================
// Client -> Server
// ================
// The 'initalized' notification must be sent after receiving the 'initialize' response.
initialized: [null]
// The 'exit' notification must be sent after the client receives the 'shutdown' response.
exit: [null]
// The server should use the provided connection configuration for all
// subsequent requests/notications. The previous connection configuration
// should no longer be used.
'connectionConfiguration/didChange': [ConnectionConfiguration]
// Lifecycle notifications for the client to notify the server about text
// contents of documents and to notify which document is currently focused.
'textDocument/didOpen': [TextDocument]
// The 'textDocument/didChange' notification should be sent on almost every
// keystroke, whether the text contents changed or the cursor/selection
// changed. Leave the `content` property undefined when the document's
// content is unchanged.
'textDocument/didChange': [TextDocument]
// The user focused on a document without changing the document's content.
// Only the 'uri' property is required, other properties are ignored.
'textDocument/didFocus': [TextDocument]
// The user closed the editor tab for the given document.
// Only the 'uri' property is required, other properties are ignored.
'textDocument/didClose': [TextDocument]
// ================
// Server -> Client
// ================
// The server received new messages for the ongoing 'chat/executeRecipe'
// request. The server should never send this notification outside of a
// 'chat/executeRecipe' request.
'chat/updateMessageInProgress': [ChatMessage | null]
}
export interface ClientInfo {
name: string
version: string
workspaceRootPath: string
connectionConfiguration?: ConnectionConfiguration
capabilities?: ClientCapabilities
}
export interface ClientCapabilities {
completions?: 'none'
// When 'streaming', handles 'chat/updateMessageInProgress' streaming notifications.
chat?: 'none' | 'streaming'
}
export interface ServerInfo {
name: string
capabilities?: ServerCapabilities
}
export interface ServerCapabilities {}
export interface ConnectionConfiguration {
serverEndpoint: string
accessToken: string
customHeaders: Record<string, string>
}
export interface Position {
// 0-indexed
line: number
// 0-indexed
character: number
}
export interface Range {
start: Position
end: Position
}
export interface TextDocument {
filePath: string
content?: string
selection?: Range
}
export interface RecipeInfo {
id: RecipeID
title: string
}
export interface ExecuteRecipeParams {
id: RecipeID
humanChatInput: string
}

View File

@ -1,12 +0,0 @@
{
"extends": "../../tsconfig.base.json",
"compilerOptions": {
"module": "commonjs",
"rootDir": ".",
"sourceRoot": "src",
"outDir": "out",
},
"include": ["**/*", ".*", "package.json"],
"exclude": ["out"],
"references": [{ "path": "../common" }, { "path": "../cody-shared" }],
}

View File

@ -1,2 +0,0 @@
/dist/
/out/

View File

@ -1,24 +0,0 @@
// @ts-check
const baseConfig = require('../../.eslintrc')
module.exports = {
extends: '../../.eslintrc.js',
parserOptions: {
...baseConfig.parserOptions,
project: [__dirname + '/tsconfig.json'],
},
overrides: baseConfig.overrides,
rules: {
'id-length': 'off',
'no-console': 'off',
'no-restricted-imports': [
'error',
{
patterns: ['!@sourcegraph/cody-shared/*'], // allow any imports from the @sourcegraph/cody-shared package
},
],
'unicorn/filename-case': 'off',
'arrow-body-style': 'off',
'@typescript-eslint/explicit-function-return-type': 'off',
},
}

View File

@ -1,3 +0,0 @@
node_modules/
out/
dist/

View File

@ -1,39 +0,0 @@
load("@aspect_rules_ts//ts:defs.bzl", "ts_config")
load("@npm//:defs.bzl", "npm_link_all_packages")
load("//dev:defs.bzl", "ts_project")
load("//dev:eslint.bzl", "eslint_config_and_lint_root")
npm_link_all_packages(name = "node_modules")
eslint_config_and_lint_root()
ts_config(
name = "tsconfig",
src = "tsconfig.json",
visibility = ["//client:__subpackages__"],
deps = [
"//:tsconfig",
"//client/cody-shared:tsconfig",
"//client/common:tsconfig",
],
)
ts_project(
name = "cody-cli",
srcs = [
"src/app.ts",
"src/completions.ts",
"src/config.ts",
"src/context.ts",
"src/interactions.ts",
"src/preamble.ts",
],
tsconfig = ":tsconfig",
deps = [
":node_modules/@sourcegraph/cody-shared",
":node_modules/@types/prompts",
":node_modules/commander",
":node_modules/prompts",
"//:node_modules/envalid",
],
)

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2022 Sourcegraph, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,21 +0,0 @@
# Cody CLI (experimental)
Cody CLI is an experimental CLI of Cody.
## Install
In the root of the repository, run this:
```
pnpm --filter @sourcegraph/cody-cli run build
cd client/cody-cli
npm install -g .
```
## Local Development
In the root of the repository, run this:
```
pnpm --filter @sourcegraph/cody-cli run start
```

View File

@ -1,11 +0,0 @@
// @ts-check
/** @type {import('@jest/types').Config.InitialOptions} */
const config = require('../../jest.config.base')
/** @type {import('@jest/types').Config.InitialOptions} */
module.exports = {
...config,
displayName: 'cody-cli',
rootDir: __dirname,
}

View File

@ -1,26 +0,0 @@
{
"name": "@sourcegraph/cody-cli",
"private": true,
"displayName": "Sourcegraph Cody CLI",
"version": "0.0.1",
"license": "Apache-2.0",
"description": "Cody CLI",
"scripts": {
"start": "ts-node-transpile-only ./src/app.ts",
"lint": "pnpm run lint:js",
"lint:js": "eslint --cache '**/*.[tj]s?(x)'",
"build": "esbuild ./src/app.ts --bundle --outfile=dist/app.js --format=cjs --platform=node",
"build-ts": "tsc -b --emitDeclarationOnly"
},
"dependencies": {
"@sourcegraph/cody-shared": "workspace:*",
"@sourcegraph/common": "workspace:*",
"@types/prompts": "^2.4.4",
"commander": "^10.0.1",
"prompts": "^2.4.2"
},
"main": "dist/app.js",
"bin": {
"cody": "./dist/app.js"
}
}

View File

@ -1,136 +0,0 @@
#! /usr/bin/env node
import { Command } from 'commander'
import prompts from 'prompts'
import { Transcript } from '@sourcegraph/cody-shared/src/chat/transcript'
import { ConfigurationUseContext } from '@sourcegraph/cody-shared/src/configuration'
import { SourcegraphIntentDetectorClient } from '@sourcegraph/cody-shared/src/intent-detector/client'
import { SourcegraphNodeCompletionsClient } from '@sourcegraph/cody-shared/src/sourcegraph-api/completions/nodeClient'
import { Message } from '@sourcegraph/cody-shared/src/sourcegraph-api/completions/types'
import { SourcegraphGraphQLAPIClient } from '@sourcegraph/cody-shared/src/sourcegraph-api/graphql'
import { isRepoNotFoundError } from '@sourcegraph/cody-shared/src/sourcegraph-api/graphql/client'
import { streamCompletions } from './completions'
import { DEFAULTS, ENVIRONMENT_CONFIG } from './config'
import { createCodebaseContext } from './context'
import { interactionFromMessage } from './interactions'
import { getPreamble } from './preamble'
async function startCLI() {
const program = new Command()
program
.version('0.0.1')
.description('Cody CLI')
.option('-p, --prompt <value>', 'Give Cody a prompt')
.option('-c, --codebase <value>', `Codebase to use for context fetching. Default: ${DEFAULTS.codebase}`)
.option('-e, --endpoint <value>', `Sourcegraph instance to connect to. Default: ${DEFAULTS.serverEndpoint}`)
.option(
'--context [embeddings,keyword,none,blended]',
`How Cody fetches context for query. Default: ${DEFAULTS.contextType}`
)
.option('--lsp', 'Start LSP')
.parse(process.argv)
const options = program.opts()
const codebase: string = (options.codebase as string) || DEFAULTS.codebase
const endpoint: string = (options.endpoint as string) || DEFAULTS.serverEndpoint
const contextType: ConfigurationUseContext =
(options.contextType as ConfigurationUseContext) || DEFAULTS.contextType
const accessToken: string | undefined = ENVIRONMENT_CONFIG.SRC_ACCESS_TOKEN
if (accessToken === undefined || accessToken === '') {
console.error(
'No access token found. Set SRC_ACCESS_TOKEN to an access token created on the Sourcegraph instance.'
)
process.exit(1)
}
const sourcegraphClient = new SourcegraphGraphQLAPIClient({
serverEndpoint: endpoint,
accessToken,
customHeaders: {},
})
let codebaseContext
try {
codebaseContext = await createCodebaseContext(sourcegraphClient, codebase, contextType, endpoint)
} catch (error) {
let errorMessage = ''
if (isRepoNotFoundError(error)) {
errorMessage =
`Cody could not find the '${codebase}' repository on your Sourcegraph instance.\n` +
'Please check that the repository exists and is entered correctly in the cody.codebase setting.'
} else {
errorMessage =
`Cody could not connect to your Sourcegraph instance: ${error}\n` +
'Make sure that cody.serverEndpoint is set to a running Sourcegraph instance and that an access token is configured.'
}
console.error(errorMessage)
process.exit(1)
}
const intentDetector = new SourcegraphIntentDetectorClient(sourcegraphClient)
const completionsClient = new SourcegraphNodeCompletionsClient({
serverEndpoint: endpoint,
accessToken: ENVIRONMENT_CONFIG.SRC_ACCESS_TOKEN,
debugEnable: DEFAULTS.debug === 'development',
customHeaders: {},
})
let prompt = options.prompt as string
if (prompt === undefined || prompt === '') {
const response = await prompts({
type: 'text',
name: 'value',
message: 'What do you want to ask Cody?',
})
prompt = response.value as string
}
const transcript = new Transcript()
// TODO: Keep track of all user input if we add REPL mode
const initialMessage: Message = { speaker: 'human', text: prompt }
const messages: { human: Message; assistant?: Message }[] = [{ human: initialMessage }]
for (const [index, message] of messages.entries()) {
const interaction = await interactionFromMessage(
message.human,
intentDetector,
// Fetch codebase context only for the last message
index === messages.length - 1 ? codebaseContext : null
)
transcript.addInteraction(interaction)
if (message.assistant?.text) {
transcript.addAssistantResponse(message.assistant?.text)
}
}
const { prompt: finalPrompt, contextFiles } = await transcript.getPromptForLastInteraction(getPreamble(codebase))
transcript.setUsedContextFilesForLastInteraction(contextFiles)
let text = ''
streamCompletions(completionsClient, finalPrompt, {
onChange: chunk => {
text = chunk
},
onComplete: () => {
console.log(text)
},
onError: err => {
console.error(err)
},
})
}
startCLI()
.then(() => {})
.catch(error => {
console.error('Error starting the bot:', error)
process.exit(1)
})

View File

@ -1,22 +0,0 @@
import { ANSWER_TOKENS } from '@sourcegraph/cody-shared/src/prompt/constants'
import { Message } from '@sourcegraph/cody-shared/src/sourcegraph-api'
import { SourcegraphNodeCompletionsClient } from '@sourcegraph/cody-shared/src/sourcegraph-api/completions/nodeClient'
import {
CompletionParameters,
CompletionCallbacks,
} from '@sourcegraph/cody-shared/src/sourcegraph-api/completions/types'
const DEFAULT_CHAT_COMPLETION_PARAMETERS: Omit<CompletionParameters, 'messages'> = {
temperature: 0.2,
maxTokensToSample: ANSWER_TOKENS,
topK: -1,
topP: -1,
}
export function streamCompletions(
client: SourcegraphNodeCompletionsClient,
messages: Message[],
cb: CompletionCallbacks
) {
return client.stream({ messages, ...DEFAULT_CHAT_COMPLETION_PARAMETERS }, cb)
}

View File

@ -1,12 +0,0 @@
import { cleanEnv, str } from 'envalid'
export const ENVIRONMENT_CONFIG = cleanEnv(process.env, {
SRC_ACCESS_TOKEN: str(),
})
export const DEFAULTS = {
codebase: 'github.com/sourcegraph/sourcegraph',
serverEndpoint: 'https://sourcegraph.sourcegraph.com',
contextType: 'blended',
debug: 'development',
} as const

View File

@ -1,43 +0,0 @@
import { CodebaseContext } from '@sourcegraph/cody-shared/src/codebase-context'
import { SourcegraphEmbeddingsSearchClient } from '@sourcegraph/cody-shared/src/embeddings/client'
import { KeywordContextFetcher } from '@sourcegraph/cody-shared/src/local-context'
import { SourcegraphGraphQLAPIClient } from '@sourcegraph/cody-shared/src/sourcegraph-api/graphql'
import { isError } from '@sourcegraph/cody-shared/src/utils'
const getRepoId = async (client: SourcegraphGraphQLAPIClient, codebase: string) => {
const repoId = codebase ? await client.getRepoId(codebase) : null
return repoId
}
export async function createCodebaseContext(
client: SourcegraphGraphQLAPIClient,
codebase: string,
contextType: 'embeddings' | 'keyword' | 'none' | 'blended' | 'unified',
serverEndpoint: string
) {
const repoId = await getRepoId(client, codebase)
if (isError(repoId)) {
throw repoId
}
const embeddingsSearch = repoId && !isError(repoId) ? new SourcegraphEmbeddingsSearchClient(client, repoId) : null
const codebaseContext = new CodebaseContext(
{ useContext: contextType, serverEndpoint },
codebase,
embeddingsSearch,
new LocalKeywordContextFetcherMock(),
null
)
return codebaseContext
}
class LocalKeywordContextFetcherMock implements KeywordContextFetcher {
public getContext() {
return Promise.resolve([])
}
public getSearchContext() {
return Promise.resolve([])
}
}

View File

@ -1,52 +0,0 @@
import { Interaction } from '@sourcegraph/cody-shared/src/chat/transcript/interaction'
import { CodebaseContext } from '@sourcegraph/cody-shared/src/codebase-context'
import { ContextMessage } from '@sourcegraph/cody-shared/src/codebase-context/messages'
import { IntentDetector } from '@sourcegraph/cody-shared/src/intent-detector'
import { MAX_HUMAN_INPUT_TOKENS } from '@sourcegraph/cody-shared/src/prompt/constants'
import { truncateText } from '@sourcegraph/cody-shared/src/prompt/truncation'
import { Message } from '@sourcegraph/cody-shared/src/sourcegraph-api'
async function getContextMessages(
text: string,
intentDetector: IntentDetector,
codebaseContext: CodebaseContext
): Promise<ContextMessage[]> {
const contextMessages: ContextMessage[] = []
const isCodebaseContextRequired = await intentDetector.isCodebaseContextRequired(text)
if (isCodebaseContextRequired) {
const codebaseContextMessages = await codebaseContext.getContextMessages(text, {
numCodeResults: 8,
numTextResults: 2,
})
contextMessages.push(...codebaseContextMessages)
}
return contextMessages
}
export async function interactionFromMessage(
message: Message,
intentDetector: IntentDetector,
codebaseContext: CodebaseContext | null
): Promise<Interaction | null> {
if (!message.text) {
return Promise.resolve(null)
}
const text = truncateText(message.text, MAX_HUMAN_INPUT_TOKENS)
const contextMessages =
codebaseContext === null ? Promise.resolve([]) : getContextMessages(text, intentDetector, codebaseContext)
return Promise.resolve(
new Interaction(
{ speaker: 'human', text, displayText: text },
{ speaker: 'assistant', text: '', displayText: '' },
contextMessages,
[]
)
)
}

View File

@ -1,49 +0,0 @@
import { Message } from '@sourcegraph/cody-shared/src/sourcegraph-api'
const actions = `You are Cody, an AI-powered coding assistant created by Sourcegraph. You work inside a Unix command line. You perform the following actions:
- Answer general programming questions.
- Answer questions about the code that I have provided to you.
- Generate code that matches a written description.
- Explain what a section of code does.`
const rules = `In your responses, obey the following rules:
- Be as brief and concise as possible without losing clarity.
- All code snippets have to be markdown-formatted without that language specifier, and placed in-between triple backticks like this \`\`\`.
- Answer questions only if you know the answer or can make a well-informed guess. Otherwise, tell me you don't know and what context I need to provide you for you to answer the question.
- Only reference file names or URLs if you are sure they exist.`
const answer = `Understood. I am Cody, an AI assistant made by Sourcegraph to help with programming tasks.
I will answer questions, explain code, and generate code as concisely and clearly as possible.
My responses will be formatted using Markdown syntax for code blocks without language specifiers.
I will acknowledge when I don't know an answer or need more context.`
/**
* Creates and returns an array of two messages: one from a human, and the supposed response from the AI assistant.
* Both messages contain an optional note about the current codebase if it's not null.
*/
export function getPreamble(codebase: string): Message[] {
const preamble = [actions, rules]
const preambleResponse = [answer]
if (codebase) {
const codebasePreamble =
`You have access to the \`${codebase}\` repository. You are able to answer questions about the \`${codebase}\` repository. ` +
`I will provide the relevant code snippets from the \`${codebase}\` repository when necessary to answer my questions.`
preamble.push(codebasePreamble)
preambleResponse.push(
`I have access to the \`${codebase}\` repository and can answer questions about its files.`
)
}
return [
{
speaker: 'human',
text: preamble.join('\n\n'),
},
{
speaker: 'assistant',
text: preambleResponse.join('\n'),
},
]
}

View File

@ -1,20 +0,0 @@
{
"extends": "../../tsconfig.base.json",
"compilerOptions": {
"module": "commonjs",
"sourceRoot": "src",
"rootDir": ".",
"outDir": "./out",
"baseUrl": "./src",
},
"include": ["src", "package.json", ".eslintrc.js", "jest.config.js"],
"exclude": ["out", "dist"],
"references": [
{
"path": "../cody-shared",
},
{
"path": "../common",
},
],
}

View File

@ -1,2 +0,0 @@
node_modules
pnpm-lock.yaml

View File

@ -1,8 +0,0 @@
{
"fontName": "cody-icons",
"emptyDist": true,
"css": false,
"outSVGPath": false,
"outSVGReact": false,
"useNameAsUnicode": true
}

View File

@ -1,9 +0,0 @@
load("@npm//:defs.bzl", "npm_link_all_packages")
load("//dev:defs.bzl", "npm_package")
npm_link_all_packages(name = "node_modules")
npm_package(
name = "cody-icons-font_pkg",
srcs = ["package.json"],
)

View File

@ -1,16 +0,0 @@
# cody-icons-font
Contains the [cody-icons](font) font, used by the VS Code extension to show Cody-related icons.
## Character Table
| Character | Icon |
| --------- | -------------- |
| `A` | ![](svg/A.svg) |
| `B` | ![](svg/B.svg) |
## Regenerating
```sh
pnpm run font
```

View File

@ -1,11 +0,0 @@
{
"name": "cody-icons-font",
"private": true,
"version": "0.0.0",
"scripts": {
"font": "svgtofont --sources ./svg-outlined --output ./font && rm font/*.{eot,svg}"
},
"dependencies": {
"svgtofont": "^3.23.1"
}
}

View File

@ -1,11 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 27.5.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 16 16" style="enable-background:new 0 0 16 16;" xml:space="preserve">
<style type="text/css">
.st0{fill:none;stroke:#231F20;stroke-width:1.5;stroke-linecap:round;stroke-miterlimit:10;}
</style>
<path class="st0" d="M4,5.6h2.5"/>
<path class="st0" d="M11,3.8v2.5"/>
<path class="st0" d="M3.2,9.7c0,0,1.1,2.5,4.8,2.5s4.8-2.5,4.8-2.5"/>
</svg>

Before

Width:  |  Height:  |  Size: 617 B

View File

@ -1,13 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 27.5.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 16 16" style="enable-background:new 0 0 16 16;" xml:space="preserve">
<style type="text/css">
.st0{fill:none;stroke:#231F20;stroke-width:2;stroke-linecap:round;stroke-miterlimit:10;}
</style>
<g>
<path class="st0" d="M3.9,5.4h2.8"/>
<path class="st0" d="M11,3.6v2.8"/>
<path class="st0" d="M3.2,9.9c0,0,1.1,2.5,4.8,2.5s4.8-2.5,4.8-2.5"/>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 629 B

View File

@ -1 +0,0 @@
<svg enable-background="new 0 0 16 16" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg"><g fill="#231f20"><path d="m6.5 6.4h-2.5c-.4 0-.7-.4-.7-.8s.3-.7.7-.7h2.5c.4 0 .8.3.8.8s-.3.7-.8.7z"/><path d="m11 7c-.4 0-.8-.3-.8-.8v-2.4c0-.4.4-.8.8-.8s.8.3.8.8v2.5c0 .4-.4.7-.8.7z"/><path d="m8 13c-4.1 0-5.4-2.8-5.5-2.9-.2-.4 0-.8.4-1s.8 0 1 .4c0 .1 1 2.1 4.1 2.1s4.1-2 4.1-2.1c.2-.4.6-.5 1-.4.4.2.5.6.4 1-.1 0-1.4 2.9-5.5 2.9z"/></g></svg>

Before

Width:  |  Height:  |  Size: 438 B

View File

@ -1 +0,0 @@
<svg enable-background="new 0 0 16 16" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg"><g fill="#231f20"><path d="m6.7 6.4h-2.8c-.6 0-1-.4-1-1s.4-1 1-1h2.8c.6 0 1 .4 1 1s-.4 1-1 1z"/><path d="m11 7.4c-.6 0-1-.4-1-1v-2.8c0-.6.4-1 1-1s1 .4 1 1v2.8c0 .6-.4 1-1 1z"/><path d="m8 13.4c-4.3 0-5.7-3-5.7-3.1-.2-.5 0-1.1.5-1.3s1.1 0 1.3.5c0 .1 1 1.9 3.9 1.9 3 0 3.9-1.8 3.9-1.9.2-.5.8-.7 1.3-.5s.7.8.5 1.3c0 .1-1.4 3.1-5.7 3.1z"/></g></svg>

Before

Width:  |  Height:  |  Size: 439 B

View File

@ -1,2 +0,0 @@
/dist/
/out/

View File

@ -1,21 +0,0 @@
// @ts-check
const baseConfig = require('../../.eslintrc')
module.exports = {
extends: '../../.eslintrc.js',
parserOptions: {
...baseConfig.parserOptions,
project: [__dirname + '/tsconfig.json'],
},
overrides: baseConfig.overrides,
rules: {
'no-console': 'off',
'id-length': 'off',
'no-restricted-imports': [
'error',
{
paths: ['!highlight.js'],
},
],
},
}

View File

@ -1,2 +0,0 @@
node_modules/

View File

@ -1,136 +0,0 @@
load("@aspect_rules_ts//ts:defs.bzl", "ts_config")
load("@npm//:defs.bzl", "npm_link_all_packages")
load("//dev:defs.bzl", "npm_package", "ts_project")
load("//dev:eslint.bzl", "eslint_config_and_lint_root")
# gazelle:js_resolve vscode //:node_modules/@vscode
npm_link_all_packages(name = "node_modules")
eslint_config_and_lint_root()
ts_config(
name = "tsconfig",
src = "tsconfig.json",
visibility = ["//client:__subpackages__"],
deps = [
"//:tsconfig",
"//client/common:tsconfig",
"//client/http-client:tsconfig",
],
)
ts_project(
name = "cody-shared_lib",
srcs = [
"src/chat/bot-response-multiplexer.ts",
"src/chat/chat.ts",
"src/chat/client.ts",
"src/chat/context.ts",
"src/chat/markdown.ts",
"src/chat/preamble.ts",
"src/chat/recipes/agent-recipes.ts",
"src/chat/recipes/browser-recipes.ts",
"src/chat/recipes/chat-question.ts",
"src/chat/recipes/context-search.ts",
"src/chat/recipes/explain-code-detailed.ts",
"src/chat/recipes/explain-code-high-level.ts",
"src/chat/recipes/find-code-smells.ts",
"src/chat/recipes/fixup.ts",
"src/chat/recipes/generate-docstring.ts",
"src/chat/recipes/generate-pr-description.ts",
"src/chat/recipes/generate-release-notes.ts",
"src/chat/recipes/generate-test.ts",
"src/chat/recipes/git-log.ts",
"src/chat/recipes/helpers.ts",
"src/chat/recipes/improve-variable-names.ts",
"src/chat/recipes/inline-chat.ts",
"src/chat/recipes/inline-touch.ts",
"src/chat/recipes/langs.ts",
"src/chat/recipes/next-questions.ts",
"src/chat/recipes/non-stop.ts",
"src/chat/recipes/recipe.ts",
"src/chat/recipes/translate.ts",
"src/chat/transcript/index.ts",
"src/chat/transcript/interaction.ts",
"src/chat/transcript/messages.ts",
"src/chat/typewriter.ts",
"src/chat/useClient.ts",
"src/chat/viewHelpers.ts",
"src/codebase-context/index.ts",
"src/codebase-context/messages.ts",
"src/codebase-context/rerank.ts",
"src/configuration.ts",
"src/editor/index.ts",
"src/editor/withPreselectedOptions.ts",
"src/embeddings/client.ts",
"src/embeddings/index.ts",
"src/guardrails/client.ts",
"src/guardrails/index.ts",
"src/hallucinations-detector/index.ts",
"src/intent-detector/client.ts",
"src/intent-detector/index.ts",
"src/local-context/index.ts",
"src/prompt/constants.ts",
"src/prompt/prompt-mixin.ts",
"src/prompt/templates.ts",
"src/prompt/truncation.ts",
"src/sourcegraph-api/completions/browserClient.ts",
"src/sourcegraph-api/completions/client.ts",
"src/sourcegraph-api/completions/nodeClient.ts",
"src/sourcegraph-api/completions/parse.ts",
"src/sourcegraph-api/completions/types.ts",
"src/sourcegraph-api/graphql/client.ts",
"src/sourcegraph-api/graphql/index.ts",
"src/sourcegraph-api/graphql/queries.ts",
"src/sourcegraph-api/index.ts",
"src/sourcegraph-api/utils.ts",
"src/telemetry/EventLogger.ts",
"src/unified-context/client.ts",
"src/unified-context/index.ts",
"src/utils.ts",
],
tsconfig = ":tsconfig",
deps = [
":node_modules/@sourcegraph/common",
":node_modules/@sourcegraph/http-client",
":node_modules/@types/xml2js",
":node_modules/xml2js",
"//:node_modules/@microsoft/fetch-event-source",
"//:node_modules/@types/isomorphic-fetch",
"//:node_modules/@types/marked",
"//:node_modules/@types/node",
"//:node_modules/@types/react",
"//:node_modules/@types/vscode", #keep
"//:node_modules/@vscode",
"//:node_modules/isomorphic-fetch",
"//:node_modules/marked",
"//:node_modules/react",
],
)
npm_package(
name = "cody-shared_pkg",
srcs = [
"package.json",
":cody-shared_lib",
],
)
ts_project(
name = "cody-shared_tests",
testonly = True,
srcs = [
"src/chat/bot-response-multiplexer.test.ts",
"src/chat/transcript/transcript.test.ts",
"src/guardrails/index.test.ts",
"src/hallucinations-detector/index.test.ts",
"src/sourcegraph-api/utils.test.ts",
"src/test/mocks.ts",
],
tsconfig = ":tsconfig",
deps = [
":cody-shared_lib",
"//:node_modules/@types/node",
],
)

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2022 Sourcegraph, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,3 +0,0 @@
# Cody shared library
The `@sourcegraph/cody-shared` package contains code that is shared among Cody clients.

View File

@ -1,11 +0,0 @@
// @ts-check
/** @type {import('@jest/types').Config.InitialOptions} */
const config = require('../../jest.config.base')
/** @type {import('@jest/types').Config.InitialOptions} */
module.exports = {
...config,
displayName: 'cody-shared',
rootDir: __dirname,
}

View File

@ -1,28 +0,0 @@
{
"private": true,
"name": "@sourcegraph/cody-shared",
"version": "0.0.1",
"description": "Cody shared library",
"license": "Apache-2.0",
"repository": {
"type": "git",
"url": "https://github.com/sourcegraph/sourcegraph.git",
"directory": "client/cody-shared"
},
"main": "src/index.ts",
"sideEffects": false,
"scripts": {
"build": "tsc -b",
"lint": "pnpm run lint:js",
"lint:js": "eslint --cache '**/*.[tj]s?(x)'",
"test": "jest"
},
"dependencies": {
"@sourcegraph/common": "workspace:*",
"@sourcegraph/http-client": "workspace:*",
"xml2js": "^0.6.0"
},
"devDependencies": {
"@types/xml2js": "^0.4.11"
}
}

View File

@ -1,198 +0,0 @@
import assert from 'assert'
import { BotResponseMultiplexer, BufferedBotResponseSubscriber } from './bot-response-multiplexer'
function promise<T>(): [(value: T) => void, Promise<T>] {
let resolver
const promise = new Promise<T>(resolve => (resolver = resolve))
if (!resolver) {
throw new Error('unreachable')
}
return [resolver, promise]
}
describe('BotResponseMultiplexer', () => {
it('routes messages with no prefix to the default topic', async () => {
const multiplexer = new BotResponseMultiplexer()
const [published, publishedResult] = promise<void>()
multiplexer.sub(BotResponseMultiplexer.DEFAULT_TOPIC, {
onResponse(content): Promise<void> {
assert.strictEqual(content, 'hello, world')
published(undefined)
return Promise.resolve()
},
onTurnComplete() {
return Promise.resolve()
},
})
await multiplexer.publish('hello, world')
await multiplexer.notifyTurnComplete()
await publishedResult
})
it('discards messages when there is no subscriber', async () => {
const multiplexer = new BotResponseMultiplexer()
await multiplexer.publish('is this thing on?')
})
it('treats unknown tags as content', async () => {
const multiplexer = new BotResponseMultiplexer()
const [published, publishedResult] = promise<void>()
multiplexer.sub(
BotResponseMultiplexer.DEFAULT_TOPIC,
new BufferedBotResponseSubscriber(content => {
assert.strictEqual(content, "<orator>I'm speechless</orator>")
published(undefined)
return Promise.resolve()
})
)
await multiplexer.publish("<orator>I'm speechless</orator>")
await multiplexer.notifyTurnComplete()
await publishedResult
})
it('things which lookl like tags as content', async () => {
const multiplexer = new BotResponseMultiplexer()
const [published, publishedResult] = promise<void>()
multiplexer.sub(
BotResponseMultiplexer.DEFAULT_TOPIC,
new BufferedBotResponseSubscriber(content => {
assert.strictEqual(content, '[] <--insert coin </wow> party hat emoji O:>')
published(undefined)
return Promise.resolve()
})
)
await multiplexer.publish('[] <--insert coin </wow> party hat emoji O:>')
await multiplexer.notifyTurnComplete()
await publishedResult
})
it('routes messages to subscribers', async () => {
const multiplexer = new BotResponseMultiplexer()
multiplexer.sub(
'cashier',
new BufferedBotResponseSubscriber(content => {
assert.deepStrictEqual(content, 'one double tall latte please\nand a donut\n')
return Promise.resolve()
})
)
multiplexer.sub(
'barista',
new BufferedBotResponseSubscriber(content => {
assert.deepStrictEqual(content, ' can I get that to go?')
return Promise.resolve()
})
)
await multiplexer.publish(`<cashier>one double tall latte please
and a donut
</cashier><barista> can I get that to go?</barista>`)
})
it('can route nested topics', async () => {
const multiplexer = new BotResponseMultiplexer()
const conspiracyTopic: string[] = []
multiplexer.sub('conspiracy', {
onResponse(content) {
conspiracyTopic.push(content)
return Promise.resolve()
},
onTurnComplete() {
return Promise.resolve()
},
})
const deeperConspiracyTopic: string[] = []
multiplexer.sub('deeper-conspiracy', {
onResponse(content) {
deeperConspiracyTopic.push(content)
return Promise.resolve()
},
onTurnComplete() {
return Promise.resolve()
},
})
// Note, no default topic this time.
await multiplexer.publish(`everything is a-ok
<conspiracy>birds are not <deeper-conspiracy><--they are a government plot!!1!--></deeper-conspiracy>real</conspiracy>`)
await multiplexer.notifyTurnComplete()
assert.deepStrictEqual(conspiracyTopic, ['birds are not ', 'real'])
assert.deepStrictEqual(deeperConspiracyTopic, ['<--they', ' are a government plot!!1!-->'])
})
it('can route to specific subscribers and the default subscriber', async () => {
const multiplexer = new BotResponseMultiplexer()
const conspiracyTopic: string[] = []
multiplexer.sub('deep-state', {
onResponse(content) {
conspiracyTopic.push(content)
return Promise.resolve()
},
onTurnComplete() {
return Promise.resolve()
},
})
const defaultTopic: string[] = []
multiplexer.sub(BotResponseMultiplexer.DEFAULT_TOPIC, {
onResponse(content) {
defaultTopic.push(content)
return Promise.resolve()
},
onTurnComplete() {
return Promise.resolve()
},
})
await multiplexer.publish(`everything is a-ok
<deep-state>birds are not real</deep-state>
<deep-state> they are a government plot</deep-state>`)
await multiplexer.notifyTurnComplete()
assert.deepStrictEqual(defaultTopic, ['everything is a-ok\n', '\n'])
assert.deepStrictEqual(conspiracyTopic, ['birds are not real', ' they are a government plot'])
})
it('can handle sloppily closed tags, or unclosed tags', async () => {
const multiplexer = new BotResponseMultiplexer()
const defaultTopic: string[] = []
multiplexer.sub(BotResponseMultiplexer.DEFAULT_TOPIC, {
onResponse(content) {
defaultTopic.push(content)
return Promise.resolve()
},
onTurnComplete() {
return Promise.resolve()
},
})
const rowTopic: string[] = []
multiplexer.sub('row', {
onResponse(content) {
rowTopic.push(content)
return Promise.resolve()
},
onTurnComplete() {
return Promise.resolve()
},
})
const cellTopic: string[] = []
multiplexer.sub('cell', {
onResponse(content) {
cellTopic.push(content)
return Promise.resolve()
},
onTurnComplete() {
return Promise.resolve()
},
})
await multiplexer.publish('<row>S, V F X<cell>variety</cell><cell>limburger</row>F U N E X')
await multiplexer.notifyTurnComplete()
assert.deepStrictEqual(defaultTopic, ['F U N E X'])
assert.deepStrictEqual(rowTopic, ['S, V F X'])
assert.deepStrictEqual(cellTopic, ['variety', 'limburger'])
})
})

View File

@ -1,230 +0,0 @@
/**
* Processes the part of a response from Cody addressed to a specific topic.
*/
export interface BotResponseSubscriber {
/**
* Processes incremental content from the bot. This may be called multiple times during a turn.
*
* @param content the incremental text from the bot that was addressed to the subscriber
*/
onResponse(content: string): Promise<void>
/**
* Notifies the subscriber that a turn has completed.
*/
onTurnComplete(): Promise<void>
}
/**
* A bot response subscriber that provides the entire bot response in one shot without
* surfacing incremental updates.
*/
export class BufferedBotResponseSubscriber implements BotResponseSubscriber {
private buffer_: string[] = []
/**
* Creates a BufferedBotResponseSubscriber. `callback` is called once per
* turn with the bot's entire output provided in one shot. If the topic
* was not mentioned, `callback` is called with `undefined` signifying the
* end of a turn.
*
* @param callback the callback to handle content from the bot, if any.
*/
constructor(private callback: (content: string | undefined) => Promise<void>) {}
// BotResponseSubscriber implementation
public onResponse(content: string): Promise<void> {
this.buffer_.push(content)
return Promise.resolve()
}
public async onTurnComplete(): Promise<void> {
await this.callback(this.buffer_.length ? this.buffer_.join('') : undefined)
this.buffer_ = []
}
}
/**
* Splits a string in one or two places.
*
* For example, `splitAt('banana!', 2) => ['ba', 'nana!']`
* but `splitAt('banana!', 2, 4) => ['ba', 'na!']`
*
* @param str the string to split.
* @param startIndex the index to break the left substring from the rest.
* @param endIndex the index to break the right substring from the rest, for
* skipping the middle of the `str` from `[startIndex..endIndex)`.
* @returns an array with the two substring pieces.
*/
function splitAt(str: string, startIndex: number, endIndex?: number): [string, string] {
return [str.slice(0, startIndex), str.slice(typeof endIndex === 'undefined' ? startIndex : endIndex)]
}
/**
* Extracts the tag name from something that looks like a simple XML tag. This is
* how BotResponseMultiplexer informs the LLM to address specific topics.
*
* @param tag the tag, including angle brackets, to extract the topic name from.
* @returns the topic name.
*/
function topicName(tag: string): string {
// TODO(dpc): Consider allowing the LLM to put junk in tags like attributes, space, etc.
const match = tag.match(/^<\/?([A-Za-z-]+)>$/)
if (!match) {
throw new Error(`topic tag "${tag}" is malformed`)
}
return match[1]
}
/**
* Incrementally consumes a response from the bot, breaking out parts addressing
* different topics and forwarding those parts to a registered subscriber, if any.
*/
export class BotResponseMultiplexer {
/**
* The default topic. Messages without a specific topic are sent to the default
* topic subscriber, if any.
*/
public static readonly DEFAULT_TOPIC = 'Assistant'
// Matches topic open or close tags
private static readonly TOPIC_RE = /<$|<\/?([A-Za-z-]?$|[A-Za-z-]+>?)/m
private subs_ = new Map<string, BotResponseSubscriber>()
// The topic currently being addressed by the bot. A stack.
private topics_: string[] = []
// Gets the topic on the top of the topic stack.
private get currentTopic(): string {
return this.topics_.at(-1) || BotResponseMultiplexer.DEFAULT_TOPIC
}
// Buffers responses until topics can be parsed
private buffer_ = ''
/**
* Subscribes to a topic in the bot response. Each topic can have only one subscriber at a time. New subscribers overwrite old ones.
*
* @param topic the string prefix to subscribe to.
* @param subscriber the handler for the content produced by the bot.
*/
public sub(topic: string, subscriber: BotResponseSubscriber): void {
// This test needs to be kept in sync with `TOPIC_RE`
if (!/^[A-Za-z-]+$/.test(topic)) {
throw new Error(`topics must be A-Za-z-, was "${topic}`)
}
this.subs_.set(topic, subscriber)
}
/**
* Notifies all subscribers that the bot response is complete.
*/
public async notifyTurnComplete(): Promise<void> {
// Flush buffered content, if any
if (this.buffer_) {
const content = this.buffer_
this.buffer_ = ''
await this.publishInTopic(this.currentTopic, content)
}
// Reset to the default topic, ready for another turn
this.topics_ = []
// Let subscribers react to the end of the turn.
await Promise.all([...this.subs_.values()].map(subscriber => subscriber.onTurnComplete()))
}
/**
* Parses part of a compound response from the bot and forwards as much as possible to
* subscribers.
*
* @param response the text of the next incremental response from the bot.
*/
public async publish(response: string): Promise<void> {
// This is basically a loose parser of an XML-like language which forwards
// incremental content to subscribers which handle specific tags. The parser
// is forgiving if tags are not closed in the right order.
this.buffer_ += response
let last
while (this.buffer_) {
if (typeof last !== 'undefined' && last === this.buffer_.length) {
throw new Error(`did not make progress parsing: ${this.buffer_}`)
}
last = this.buffer_.length
// Look for something that could be a topic.
const match = this.buffer_.match(BotResponseMultiplexer.TOPIC_RE)
if (!match) {
// No topic change is forming, so publish the in-progress content to the current topic
await this.publishBufferUpTo(this.buffer_.length)
return
}
if (typeof match.index === 'undefined') {
throw new TypeError('unreachable')
}
if (match.index) {
// Flush the content before the start (end) topic tag
await this.publishBufferUpTo(match.index)
continue // spin again to get a match with resynced indices
}
const matchEnd = match.index + match[0].length
const tagIsOpenTag = match[0].length >= 2 && match[0].at(1) !== '/'
const tagIsComplete = match[0].at(-1) === '>'
if (!tagIsComplete) {
if (matchEnd === this.buffer_.length) {
// We must wait for more content to see how this plays out.
return
}
// The tag is incomplete, but there's content after it, for
// example: "<--insert coin", match will be "<--insert". Treat
// it as content.
await this.publishBufferUpTo(matchEnd)
continue
}
// The tag is complete.
const topic = topicName(match[0])
if (!this.subs_.has(topic)) {
// There are no subscribers for this topic, so treat it as content.
await this.publishBufferUpTo(matchEnd)
continue
}
this.buffer_ = this.buffer_.slice(matchEnd) // Consume the close tag
if (tagIsOpenTag) {
// Handle a new topic
this.topics_.push(topic)
} else {
// Handle the end of a topic: Pop the topic stack until we find a match.
while (this.topics_.length) {
if (this.topics_.pop() === topic) {
break
}
}
}
}
}
// Publishes the content of `buffer_` up to `index` in the current topic. Discards the published content.
private publishBufferUpTo(index: number): Promise<void> {
let content
;[content, this.buffer_] = splitAt(this.buffer_, index)
return this.publishInTopic(this.currentTopic, content)
}
// Publishes one specific topic to its subscriber, if any.
private async publishInTopic(topic: string, content: string): Promise<void> {
const sub = this.subs_.get(topic)
if (!sub) {
return
}
return sub.onResponse(content)
}
/** Produces a prompt to describe the response format to the bot. */
public prompt(): string {
return `Enclose each part of the response in one of the relevant tags: ${[...this.subs_.keys()]
.map(topic => `<${topic}>`)
.join(', ')}:\n\n`
}
}

View File

@ -1,43 +0,0 @@
import { ANSWER_TOKENS } from '../prompt/constants'
import { Message } from '../sourcegraph-api'
import type { SourcegraphCompletionsClient } from '../sourcegraph-api/completions/client'
import type { CompletionParameters, CompletionCallbacks } from '../sourcegraph-api/completions/types'
import { createTypewriter } from './typewriter'
type ChatParameters = Omit<CompletionParameters, 'messages'>
const DEFAULT_CHAT_COMPLETION_PARAMETERS: ChatParameters = {
temperature: 0.2,
maxTokensToSample: ANSWER_TOKENS,
topK: -1,
topP: -1,
}
export class ChatClient {
constructor(private completions: SourcegraphCompletionsClient) {}
public chat(messages: Message[], cb: CompletionCallbacks, params?: Partial<ChatParameters>): () => void {
const isLastMessageFromHuman = messages.length > 0 && messages[messages.length - 1].speaker === 'human'
const augmentedMessages = isLastMessageFromHuman ? messages.concat([{ speaker: 'assistant' }]) : messages
const typewriter = createTypewriter({
emit: cb.onChange,
})
return this.completions.stream(
{
...DEFAULT_CHAT_COMPLETION_PARAMETERS,
...params,
messages: augmentedMessages,
},
{
...cb,
onChange: typewriter.write,
onComplete: () => {
typewriter.stop()
cb.onComplete()
},
}
)
}
}

View File

@ -1,173 +0,0 @@
import { CodebaseContext } from '../codebase-context'
import { ConfigurationWithAccessToken } from '../configuration'
import { Editor } from '../editor'
import { PrefilledOptions, withPreselectedOptions } from '../editor/withPreselectedOptions'
import { SourcegraphEmbeddingsSearchClient } from '../embeddings/client'
import { SourcegraphIntentDetectorClient } from '../intent-detector/client'
import { SourcegraphBrowserCompletionsClient } from '../sourcegraph-api/completions/browserClient'
import { CompletionsClientConfig, SourcegraphCompletionsClient } from '../sourcegraph-api/completions/client'
import { SourcegraphGraphQLAPIClient } from '../sourcegraph-api/graphql'
import { isError } from '../utils'
import { BotResponseMultiplexer } from './bot-response-multiplexer'
import { ChatClient } from './chat'
import { getPreamble } from './preamble'
import { getRecipe } from './recipes/browser-recipes'
import { RecipeID } from './recipes/recipe'
import { Transcript, TranscriptJSON } from './transcript'
import { ChatMessage } from './transcript/messages'
import { reformatBotMessage } from './viewHelpers'
export type { TranscriptJSON }
export { Transcript }
export type ClientInitConfig = Pick<
ConfigurationWithAccessToken,
'serverEndpoint' | 'codebase' | 'useContext' | 'accessToken' | 'customHeaders'
>
export interface ClientInit {
config: ClientInitConfig
setMessageInProgress: (messageInProgress: ChatMessage | null) => void
setTranscript: (transcript: Transcript) => void
editor: Editor
initialTranscript?: Transcript
createCompletionsClient?: (config: CompletionsClientConfig) => SourcegraphCompletionsClient
}
export interface Client {
readonly transcript: Transcript
readonly isMessageInProgress: boolean
submitMessage: (text: string) => Promise<void>
executeRecipe: (
recipeId: RecipeID,
options?: {
prefilledOptions?: PrefilledOptions
humanChatInput?: string
}
) => Promise<void>
reset: () => void
codebaseContext: CodebaseContext
}
export async function createClient({
config,
setMessageInProgress,
setTranscript,
editor,
initialTranscript,
createCompletionsClient = config => new SourcegraphBrowserCompletionsClient(config),
}: ClientInit): Promise<Client> {
const fullConfig = { debugEnable: false, ...config }
const completionsClient = createCompletionsClient(fullConfig)
const chatClient = new ChatClient(completionsClient)
const graphqlClient = new SourcegraphGraphQLAPIClient(fullConfig)
const repoId = config.codebase ? await graphqlClient.getRepoIdIfEmbeddingExists(config.codebase) : null
if (isError(repoId)) {
throw new Error(
`Cody could not access the '${config.codebase}' repository on your Sourcegraph instance. Details: ${repoId.message}`
)
}
const embeddingsSearch = repoId ? new SourcegraphEmbeddingsSearchClient(graphqlClient, repoId, true) : null
const codebaseContext = new CodebaseContext(config, config.codebase, embeddingsSearch, null, null)
const intentDetector = new SourcegraphIntentDetectorClient(graphqlClient)
const transcript = initialTranscript || new Transcript()
let isMessageInProgress = false
const sendTranscript = (): void => {
if (isMessageInProgress) {
const messages = transcript.toChat()
setTranscript(transcript)
setMessageInProgress(messages[messages.length - 1])
} else {
setTranscript(transcript)
setMessageInProgress(null)
}
}
async function executeRecipe(
recipeId: RecipeID,
options?: {
prefilledOptions?: PrefilledOptions
humanChatInput?: string
}
): Promise<void> {
const humanChatInput = options?.humanChatInput ?? ''
const recipe = getRecipe(recipeId)
if (!recipe) {
return
}
const interaction = await recipe.getInteraction(humanChatInput, {
editor: options?.prefilledOptions ? withPreselectedOptions(editor, options.prefilledOptions) : editor,
intentDetector,
codebaseContext,
responseMultiplexer: new BotResponseMultiplexer(),
firstInteraction: transcript.isEmpty,
})
if (!interaction) {
return
}
isMessageInProgress = true
transcript.addInteraction(interaction)
sendTranscript()
const { prompt, contextFiles } = await transcript.getPromptForLastInteraction(getPreamble(config.codebase))
transcript.setUsedContextFilesForLastInteraction(contextFiles)
const responsePrefix = interaction.getAssistantMessage().prefix ?? ''
let rawText = ''
chatClient.chat(prompt, {
onChange(_rawText) {
rawText = _rawText
const text = reformatBotMessage(rawText, responsePrefix)
transcript.addAssistantResponse(text)
sendTranscript()
},
onComplete() {
isMessageInProgress = false
const text = reformatBotMessage(rawText, responsePrefix)
transcript.addAssistantResponse(text)
sendTranscript()
},
onError(error) {
// Display error message as assistant response
transcript.addErrorAsAssistantResponse(error)
isMessageInProgress = false
sendTranscript()
console.error(`Completion request failed: ${error}`)
},
})
}
return {
get transcript() {
return transcript
},
get isMessageInProgress() {
return isMessageInProgress
},
submitMessage(text: string) {
return executeRecipe('chat-question', { humanChatInput: text })
},
executeRecipe,
reset() {
isMessageInProgress = false
transcript.reset()
sendTranscript()
},
codebaseContext,
}
}

View File

@ -1,11 +0,0 @@
import { ConfigurationUseContext } from '../configuration'
import { ActiveTextEditorSelectionRange } from '../editor'
export interface ChatContextStatus {
mode?: ConfigurationUseContext
connection?: boolean
codebase?: string
filePath?: string
selection?: ActiveTextEditorSelectionRange
supportsKeyword?: boolean
}

View File

@ -1,74 +0,0 @@
import { marked } from 'marked'
import { registerHighlightContributions, renderMarkdown as renderMarkdownCommon } from '@sourcegraph/common'
/**
* Supported URIs to render as links in outputted markdown.
* - https?: Web
* - vscode: VS Code URL scheme (open in editor)
* - command:cody.welcome: VS Code command scheme exception we add to support directly linking to the welcome guide from within the chat.
*/
const ALLOWED_URI_REGEXP = /^((https?|vscode):\/\/[^\s#$./?].\S*|command:cody.welcome)$/i
const DOMPURIFY_CONFIG = {
ALLOWED_TAGS: [
'p',
'div',
'span',
'pre',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'i',
'em',
'b',
'strong',
'code',
'pre',
'blockquote',
'ul',
'li',
'ol',
'a',
'table',
'tr',
'th',
'td',
'thead',
'tbody',
'tfoot',
's',
'u',
],
ALLOWED_URI_REGEXP,
}
/**
* Render Markdown to safe HTML.
*
* NOTE: This only works when called in an environment with the DOM. In the VS
* Code extension, it only works in the webview context, not in the extension
* host context, because the latter lacks a DOM. We could use
* isomorphic-dompurify for that, but that adds needless complexity for now. If
* that becomes necessary, we can add that.
*/
export function renderCodyMarkdown(markdown: string): string {
registerHighlightContributions()
// Add Cody-specific Markdown rendering if needed.
return renderMarkdownCommon(markdown, {
breaks: true,
dompurifyConfig: DOMPURIFY_CONFIG,
addTargetBlankToAllLinks: true,
})
}
/**
* Returns the parsed markdown at block level.
*/
export function parseMarkdown(text: string): marked.Token[] {
return marked.Lexer.lex(text, { gfm: true })
}

View File

@ -1,99 +0,0 @@
import { Message } from '../sourcegraph-api'
const actions = `You are Cody, an AI-powered coding assistant created by Sourcegraph. You work inside a text editor. You have access to my currently open files. You perform the following actions:
- Answer general programming questions.
- Answer questions about the code that I have provided to you.
- Generate code that matches a written description.
- Explain what a section of code does.`
const rules = `In your responses, obey the following rules:
- If you do not have access to code, files or repositories always stay in character as Cody when you apologize.
- Be as brief and concise as possible without losing clarity.
- All code snippets have to be markdown-formatted, and placed in-between triple backticks like this \`\`\`.
- Answer questions only if you know the answer or can make a well-informed guess. Otherwise, tell me you don't know and what context I need to provide you for you to answer the question.
- Only reference file names, repository names or URLs if you are sure they exist.`
const multiRepoRules = `In your responses, obey the following rules:
- If you do not have access to code, files or repositories always stay in character as Cody when you apologize.
- Be as brief and concise as possible without losing clarity.
- All code snippets have to be markdown-formatted, and placed in-between triple backticks like this \`\`\`.
- Answer questions only if you know the answer or can make a well-informed guess. Otherwise, tell me you don't know and what context I need to provide you for you to answer the question.
- If you do not have access to a repository, tell me to add additional repositories to the chat context using repositories selector below the input box to help you answer the question.
- Only reference file names, repository names or URLs if you are sure they exist.`
const answer = `Understood. I am Cody, an AI assistant made by Sourcegraph to help with programming tasks.
I work inside a text editor. I have access to your currently open files in the editor.
I will answer questions, explain code, and generate code as concisely and clearly as possible.
My responses will be formatted using Markdown syntax for code blocks.
I will acknowledge when I don't know an answer or need more context.`
/**
* Creates and returns an array of two messages: one from a human, and the supposed response from the AI assistant.
* Both messages contain an optional note about the current codebase if it's not null.
*/
export function getPreamble(codebase: string | undefined): Message[] {
const preamble = [actions, rules]
const preambleResponse = [answer]
if (codebase) {
const codebasePreamble =
`You have access to the \`${codebase}\` repository. You are able to answer questions about the \`${codebase}\` repository. ` +
`I will provide the relevant code snippets from the \`${codebase}\` repository when necessary to answer my questions. ` +
`If I ask you a question about a repository other than \`${codebase}\`, tell me to add additional repositories to the chat context using the repositories selector below the input box to help you answer the question.`
preamble.push(codebasePreamble)
preambleResponse.push(
`I have access to the \`${codebase}\` repository and can answer questions about its files.`
)
}
return [
{
speaker: 'human',
text: preamble.join('\n\n'),
},
{
speaker: 'assistant',
text: preambleResponse.join('\n'),
},
]
}
export function getMultiRepoPreamble(codebases: string[]): Message[] {
const preamble = [actions, multiRepoRules]
const preambleResponse = [answer]
if (codebases.length === 1) {
return getPreamble(codebases[0])
}
if (codebases.length) {
preamble.push(
`You have access to ${codebases.length} repositories:\n` +
codebases.map((name, index) => `${index + 1}. ${name}`).join('\n') +
'\n You are able to answer questions about all the above repositories. ' +
'I will provide the relevant code snippets from the files present in the above repositories when necessary to answer my questions. ' +
'If I ask you a question about a repository which is not listed above, please tell me to add additional repositories to the chat context using the repositories selector below the input box to help you answer the question.' +
'\n If the repository is listed above but you do not know the answer to the quesstion, tell me you do not know and what context I need to provide you for you to answer the question.'
)
preambleResponse.push(
'I have access to files present in the following repositories:\n' +
codebases.map((name, index) => `${index + 1}. ${name}`).join('\n') +
'\\n I can answer questions about code and files present in all the above repositories. ' +
'If you ask a question about a repository which I do not have access to, I will ask you to add additional repositories to the chat context using the repositories selector below the input box to help me answer the question. ' +
'If I have access to the repository but do not know the answer to the question, I will tell you I do not know and what context you need to provide me for me to answer the question.'
)
}
return [
{
speaker: 'human',
text: preamble.join('\n\n'),
},
{
speaker: 'assistant',
text: preambleResponse.join('\n'),
},
]
}

View File

@ -1,50 +0,0 @@
import { ChatQuestion } from './chat-question'
import { ExplainCodeDetailed } from './explain-code-detailed'
import { ExplainCodeHighLevel } from './explain-code-high-level'
import { FindCodeSmells } from './find-code-smells'
import { GenerateDocstring } from './generate-docstring'
import { GenerateTest } from './generate-test'
import { ImproveVariableNames } from './improve-variable-names'
import { Recipe, RecipeID } from './recipe'
import { TranslateToLanguage } from './translate'
function nullLog(filterLabel: string, text: string, ...args: unknown[]): void {
// Do nothing
}
export const registeredRecipes: { [id in RecipeID]?: Recipe } = {}
export function getRecipe(id: RecipeID): Recipe | undefined {
return registeredRecipes[id]
}
function registerRecipe(id: RecipeID, recipe: Recipe): void {
registeredRecipes[id] = recipe
}
function init(): void {
if (Object.keys(registeredRecipes).length > 0) {
return
}
const recipes: Recipe[] = [
new ChatQuestion(nullLog),
new ExplainCodeDetailed(),
new ExplainCodeHighLevel(),
new GenerateDocstring(),
new GenerateTest(),
new ImproveVariableNames(),
new TranslateToLanguage(),
new FindCodeSmells(),
]
for (const recipe of recipes) {
const existingRecipe = getRecipe(recipe.id)
if (existingRecipe) {
throw new Error(`Duplicate recipe with ID ${recipe.id}`)
}
registerRecipe(recipe.id, recipe)
}
}
init()

View File

@ -1,50 +0,0 @@
import { ChatQuestion } from './chat-question'
import { ExplainCodeDetailed } from './explain-code-detailed'
import { ExplainCodeHighLevel } from './explain-code-high-level'
import { FindCodeSmells } from './find-code-smells'
import { GenerateDocstring } from './generate-docstring'
import { GenerateTest } from './generate-test'
import { ImproveVariableNames } from './improve-variable-names'
import { Recipe, RecipeID } from './recipe'
import { TranslateToLanguage } from './translate'
const registeredRecipes: { [id in RecipeID]?: Recipe } = {}
export function registerRecipe(id: RecipeID, recipe: Recipe): void {
registeredRecipes[id] = recipe
}
export function getRecipe(id: RecipeID): Recipe | undefined {
return registeredRecipes[id]
}
function nullLog(filterLabel: string, text: string, ...args: unknown[]): void {
// Do nothing
}
function init(): void {
if (Object.keys(registeredRecipes).length > 0) {
return
}
const recipes: Recipe[] = [
new ChatQuestion(nullLog),
new ExplainCodeDetailed(),
new ExplainCodeHighLevel(),
new GenerateDocstring(),
new GenerateTest(),
new ImproveVariableNames(),
new TranslateToLanguage(),
new FindCodeSmells(),
]
for (const recipe of recipes) {
const existingRecipe = getRecipe(recipe.id)
if (existingRecipe) {
throw new Error(`Duplicate recipe with ID ${recipe.id}`)
}
registerRecipe(recipe.id, recipe)
}
}
init()

View File

@ -1,94 +0,0 @@
import { CodebaseContext } from '../../codebase-context'
import { ContextMessage, getContextMessageWithResponse } from '../../codebase-context/messages'
import { ActiveTextEditorSelection, Editor } from '../../editor'
import { IntentDetector } from '../../intent-detector'
import { MAX_CURRENT_FILE_TOKENS, MAX_HUMAN_INPUT_TOKENS } from '../../prompt/constants'
import {
populateCurrentEditorContextTemplate,
populateCurrentEditorSelectedContextTemplate,
} from '../../prompt/templates'
import { truncateText } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class ChatQuestion implements Recipe {
public id: RecipeID = 'chat-question'
constructor(private debug: (filterLabel: string, text: string, ...args: unknown[]) => void) {}
public async getInteraction(humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const truncatedText = truncateText(humanChatInput, MAX_HUMAN_INPUT_TOKENS)
return Promise.resolve(
new Interaction(
{ speaker: 'human', text: truncatedText, displayText: humanChatInput },
{ speaker: 'assistant' },
this.getContextMessages(
truncatedText,
context.editor,
context.firstInteraction,
context.intentDetector,
context.codebaseContext,
context.editor.getActiveTextEditorSelection() || null
),
[]
)
)
}
private async getContextMessages(
text: string,
editor: Editor,
firstInteraction: boolean,
intentDetector: IntentDetector,
codebaseContext: CodebaseContext,
selection: ActiveTextEditorSelection | null
): Promise<ContextMessage[]> {
const contextMessages: ContextMessage[] = []
const isCodebaseContextRequired = firstInteraction || (await intentDetector.isCodebaseContextRequired(text))
this.debug('ChatQuestion:getContextMessages', 'isCodebaseContextRequired', isCodebaseContextRequired)
if (isCodebaseContextRequired) {
const codebaseContextMessages = await codebaseContext.getContextMessages(text, {
numCodeResults: 12,
numTextResults: 3,
})
contextMessages.push(...codebaseContextMessages)
}
const isEditorContextRequired = intentDetector.isEditorContextRequired(text)
this.debug('ChatQuestion:getContextMessages', 'isEditorContextRequired', isEditorContextRequired)
if (isCodebaseContextRequired || isEditorContextRequired) {
contextMessages.push(...ChatQuestion.getEditorContext(editor))
}
// Add selected text as context when available
if (selection?.selectedText) {
contextMessages.push(...ChatQuestion.getEditorSelectionContext(selection))
}
return contextMessages
}
public static getEditorContext(editor: Editor): ContextMessage[] {
const visibleContent = editor.getActiveTextEditorVisibleContent()
if (!visibleContent) {
return []
}
const truncatedContent = truncateText(visibleContent.content, MAX_CURRENT_FILE_TOKENS)
return getContextMessageWithResponse(
populateCurrentEditorContextTemplate(truncatedContent, visibleContent.fileName, visibleContent.repoName),
visibleContent
)
}
public static getEditorSelectionContext(selection: ActiveTextEditorSelection): ContextMessage[] {
const truncatedContent = truncateText(selection.selectedText, MAX_CURRENT_FILE_TOKENS)
return getContextMessageWithResponse(
populateCurrentEditorSelectedContextTemplate(truncatedContent, selection.fileName, selection.repoName),
selection
)
}
}

View File

@ -1,92 +0,0 @@
import * as vscode from 'vscode'
import { CodebaseContext } from '../../codebase-context'
import { MAX_HUMAN_INPUT_TOKENS } from '../../prompt/constants'
import { truncateText } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import { getFileExtension } from './helpers'
import { Recipe, RecipeContext, RecipeID } from './recipe'
/*
This class implements the context-search recipe.
Parameters:
- humanChatInput: The input from the human. If empty, a prompt will be shown to enter a search query.
- context: The recipe context.
Functionality:
- Gets a search query from the human input or a prompt.
- Truncates the query to MAX_HUMAN_INPUT_TOKENS.
- Searches the vactor database for code and text results matching the query.
- If codebase is not embedded or if keyword context is selected, get local keyword context instead
- Returns up to 12 code results and 3 text results.
- Generates a markdown string displaying the results with file names linking to the search page for that file.
- Sanitizes the content by removing newlines, tabs and backticks before displaying.
*/
export class ContextSearch implements Recipe {
public id: RecipeID = 'context-search'
public async getInteraction(humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const query = humanChatInput || (await context.editor.showInputBox('Enter your search query here...')) || ''
if (!query) {
return null
}
const truncatedText = truncateText(query.replace('/search ', '').replace('/s ', ''), MAX_HUMAN_INPUT_TOKENS)
const wsRootPath = context.editor.getWorkspaceRootPath()
return new Interaction(
{
speaker: 'human',
text: '',
displayText: query,
},
{
speaker: 'assistant',
text: '',
displayText: await this.displaySearchResults(truncatedText, context.codebaseContext, wsRootPath),
},
new Promise(resolve => resolve([])),
[]
)
}
private async displaySearchResults(
text: string,
codebaseContext: CodebaseContext,
wsRootPath: string | null
): Promise<string> {
const resultContext = await codebaseContext.getSearchResults(text, {
numCodeResults: 12,
numTextResults: 3,
})
const endpointUri = resultContext.endpoint
let snippets = `Here are the code snippets for: ${text}\n\n`
for (const file of resultContext.results) {
const fileContent = this.sanitizeContent(file.content)
const extension = getFileExtension(file.fileName)
const ignoreFilesExtension = /^(md|txt)$/
if (extension.match(ignoreFilesExtension)) {
continue
}
let uri = new URL(`/search?q=context:global+file:${file.fileName}`, endpointUri).href
if (wsRootPath) {
const fileUri = vscode.Uri.joinPath(vscode.Uri.file(wsRootPath), file.fileName)
uri = vscode.Uri.parse(`vscode://file${fileUri.path}`).toString()
}
snippets +=
fileContent && fileContent.length > 5
? `File Name: [_${file.fileName}_](${uri})\n\`\`\`${extension}\n${fileContent}\n\`\`\`\n\n`
: ''
}
return snippets
}
private sanitizeContent(content: string): string {
return content.replace('\n', '').replace('\t', '').replace('`', '').trim()
}
}

View File

@ -1,39 +0,0 @@
import { MAX_RECIPE_INPUT_TOKENS, MAX_RECIPE_SURROUNDING_TOKENS } from '../../prompt/constants'
import { truncateText, truncateTextStart } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import { getContextMessagesFromSelection, getNormalizedLanguageName, MARKDOWN_FORMAT_PROMPT } from './helpers'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class ExplainCodeDetailed implements Recipe {
public id: RecipeID = 'explain-code-detailed'
public async getInteraction(_humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const selection = context.editor.getActiveTextEditorSelectionOrEntireFile()
if (!selection) {
await context.editor.showWarningMessage('No code selected. Please select some code and try again.')
return Promise.resolve(null)
}
const truncatedSelectedText = truncateText(selection.selectedText, MAX_RECIPE_INPUT_TOKENS)
const truncatedPrecedingText = truncateTextStart(selection.precedingText, MAX_RECIPE_SURROUNDING_TOKENS)
const truncatedFollowingText = truncateText(selection.followingText, MAX_RECIPE_SURROUNDING_TOKENS)
const languageName = getNormalizedLanguageName(selection.fileName)
const promptMessage = `Please explain the following ${languageName} code. Be very detailed and specific, and indicate when it is not clear to you what is going on. Format your response as an ordered list.\n\`\`\`\n${truncatedSelectedText}\n\`\`\`\n${MARKDOWN_FORMAT_PROMPT}`
const displayText = `Explain the following code:\n\`\`\`\n${selection.selectedText}\n\`\`\``
return new Interaction(
{ speaker: 'human', text: promptMessage, displayText },
{ speaker: 'assistant' },
getContextMessagesFromSelection(
truncatedSelectedText,
truncatedPrecedingText,
truncatedFollowingText,
selection,
context.codebaseContext
),
[]
)
}
}

View File

@ -1,39 +0,0 @@
import { MAX_RECIPE_INPUT_TOKENS, MAX_RECIPE_SURROUNDING_TOKENS } from '../../prompt/constants'
import { truncateText, truncateTextStart } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import { getContextMessagesFromSelection, getNormalizedLanguageName, MARKDOWN_FORMAT_PROMPT } from './helpers'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class ExplainCodeHighLevel implements Recipe {
public id: RecipeID = 'explain-code-high-level'
public async getInteraction(_humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const selection = context.editor.getActiveTextEditorSelectionOrEntireFile()
if (!selection) {
await context.editor.showWarningMessage('No code selected. Please select some code and try again.')
return Promise.resolve(null)
}
const truncatedSelectedText = truncateText(selection.selectedText, MAX_RECIPE_INPUT_TOKENS)
const truncatedPrecedingText = truncateTextStart(selection.precedingText, MAX_RECIPE_SURROUNDING_TOKENS)
const truncatedFollowingText = truncateText(selection.followingText, MAX_RECIPE_SURROUNDING_TOKENS)
const languageName = getNormalizedLanguageName(selection.fileName)
const promptMessage = `Explain the following ${languageName} code at a high level. Only include details that are essential to an overal understanding of what's happening in the code.\n\`\`\`\n${truncatedSelectedText}\n\`\`\`\n${MARKDOWN_FORMAT_PROMPT}`
const displayText = `Explain the following code at a high level:\n\`\`\`\n${selection.selectedText}\n\`\`\``
return new Interaction(
{ speaker: 'human', text: promptMessage, displayText },
{ speaker: 'assistant' },
getContextMessagesFromSelection(
truncatedSelectedText,
truncatedPrecedingText,
truncatedFollowingText,
selection,
context.codebaseContext
),
[]
)
}
}

View File

@ -1,46 +0,0 @@
import { CHARS_PER_TOKEN, MAX_AVAILABLE_PROMPT_LENGTH, MAX_RECIPE_INPUT_TOKENS } from '../../prompt/constants'
import { truncateText } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import { getNormalizedLanguageName } from './helpers'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class FindCodeSmells implements Recipe {
public id: RecipeID = 'find-code-smells'
public async getInteraction(_humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const selection = context.editor.getActiveTextEditorSelectionOrEntireFile()
if (!selection) {
await context.editor.showWarningMessage('No code selected. Please select some code and try again.')
return Promise.resolve(null)
}
const languageName = getNormalizedLanguageName(selection.fileName)
const promptPrefix = `Find code smells, potential bugs, and unhandled errors in my ${languageName} code:`
const promptSuffix = `List maximum five of them as a list (if you have more in mind, mention that these are the top five), with a short context, reasoning, and suggestion on each.
If you have no ideas because the code looks fine, feel free to say that it already looks fine.`
// Use the whole context window for the prompt because we're attaching no files
const maxTokenCount =
MAX_AVAILABLE_PROMPT_LENGTH - (promptPrefix.length + promptSuffix.length) / CHARS_PER_TOKEN
const truncatedSelectedText = truncateText(
selection.selectedText,
Math.min(maxTokenCount, MAX_RECIPE_INPUT_TOKENS)
)
const promptMessage = `${promptPrefix}\n\n\`\`\`\n${truncatedSelectedText}\n\`\`\`\n\n${promptSuffix}`
const displayText = `Find code smells in the following code: \n\`\`\`\n${selection.selectedText}\n\`\`\``
const assistantResponsePrefix = ''
return new Interaction(
{ speaker: 'human', text: promptMessage, displayText },
{
speaker: 'assistant',
prefix: assistantResponsePrefix,
text: assistantResponsePrefix,
},
new Promise(resolve => resolve([])),
[]
)
}
}

View File

@ -1,105 +0,0 @@
import { CodebaseContext } from '../../codebase-context'
import { ContextMessage } from '../../codebase-context/messages'
import { MAX_CURRENT_FILE_TOKENS, MAX_HUMAN_INPUT_TOKENS } from '../../prompt/constants'
import { truncateText, truncateTextStart } from '../../prompt/truncation'
import { BufferedBotResponseSubscriber } from '../bot-response-multiplexer'
import { Interaction } from '../transcript/interaction'
import { contentSanitizer } from './helpers'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class Fixup implements Recipe {
public id: RecipeID = 'fixup'
public async getInteraction(humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
// TODO: Prompt the user for additional direction.
const selection = context.editor.getActiveTextEditorSelection() || context.editor.controllers?.inline.selection
if (!selection) {
await context.editor.controllers?.inline.error()
await context.editor.showWarningMessage('Select some code to fixup.')
return null
}
const quarterFileContext = Math.floor(MAX_CURRENT_FILE_TOKENS / 4)
if (truncateText(selection.selectedText, quarterFileContext * 2) !== selection.selectedText) {
const msg = "The amount of text selected exceeds Cody's current capacity."
await context.editor.controllers?.inline.error()
await context.editor.showWarningMessage(msg)
return null
}
// Reconstruct Cody's prompt using user's context
// Replace placeholders in reverse order to avoid collisions if a placeholder occurs in the input
// TODO: Move prompt suffix from recipe to chat view. It has other subscribers.
const promptText = Fixup.prompt
.replace('{humanInput}', truncateText(humanChatInput, MAX_HUMAN_INPUT_TOKENS))
.replace('{responseMultiplexerPrompt}', context.responseMultiplexer.prompt())
.replace('{truncateFollowingText}', truncateText(selection.followingText, quarterFileContext))
.replace('{selectedText}', selection.selectedText)
.replace('{truncateTextStart}', truncateTextStart(selection.precedingText, quarterFileContext))
.replace('{fileName}', selection.fileName)
context.responseMultiplexer.sub(
'selection',
new BufferedBotResponseSubscriber(async content => {
if (!content) {
await context.editor.controllers?.inline.error()
await context.editor.showWarningMessage(
'Cody did not suggest any replacement.\nTry starting a new conversation with Cody.'
)
return
}
await context.editor.replaceSelection(
selection.fileName,
selection.selectedText,
contentSanitizer(content)
)
})
)
return Promise.resolve(
new Interaction(
{
speaker: 'human',
text: promptText,
displayText: '**✨Fixup✨** ' + humanChatInput,
},
{
speaker: 'assistant',
prefix: 'Check your document for updates from Cody.\n',
},
this.getContextMessages(selection.selectedText, context.codebaseContext),
[]
)
)
}
// Get context from editor
private async getContextMessages(text: string, codebaseContext: CodebaseContext): Promise<ContextMessage[]> {
const contextMessages: ContextMessage[] = await codebaseContext.getContextMessages(text, {
numCodeResults: 12,
numTextResults: 3,
})
return contextMessages
}
// Prompt Templates
public static readonly prompt = `
This is part of the file {fileName}. The part of the file I have selected is highlighted with <selection> tags. You are helping me to work on that part as my coding assistant.
Follow the instructions in the selected part plus the additional instructions to produce a rewritten replacement for only the selected part.
Put the rewritten replacement inside <selection> tags. I only want to see the code within <selection>.
Do not move code from outside the selection into the selection in your reply.
Do not remove code inside the <selection> tags that might be being used by the code outside the <selection> tags.
It is OK to provide some commentary within the replacement <selection>.
It is not acceptable to enclose the rewritten replacement with markdowns.
Only provide me with the replacement <selection> and nothing else.
If it doesn't make sense, you do not need to provide <selection>. Instead, tell me how I can help you to understand my request.
\`\`\`
{truncateTextStart}<selection>{selectedText}</selection>{truncateFollowingText}
\`\`\`
Additional Instruction:
- {humanInput}
- {responseMultiplexerPrompt}
`
}

View File

@ -1,67 +0,0 @@
import { MAX_RECIPE_INPUT_TOKENS, MAX_RECIPE_SURROUNDING_TOKENS } from '../../prompt/constants'
import { truncateText, truncateTextStart } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import {
MARKDOWN_FORMAT_PROMPT,
getNormalizedLanguageName,
getContextMessagesFromSelection,
getFileExtension,
} from './helpers'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class GenerateDocstring implements Recipe {
public id: RecipeID = 'generate-docstring'
public async getInteraction(_humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const selection = context.editor.getActiveTextEditorSelectionOrEntireFile()
if (!selection) {
await context.editor.showWarningMessage('No code selected. Please select some code and try again.')
return Promise.resolve(null)
}
const truncatedSelectedText = truncateText(selection.selectedText, MAX_RECIPE_INPUT_TOKENS)
const truncatedPrecedingText = truncateTextStart(selection.precedingText, MAX_RECIPE_SURROUNDING_TOKENS)
const truncatedFollowingText = truncateText(selection.followingText, MAX_RECIPE_SURROUNDING_TOKENS)
const extension = getFileExtension(selection.fileName)
const languageName = getNormalizedLanguageName(selection.fileName)
const promptPrefix = `Generate a comment documenting the parameters and functionality of the following ${languageName} code:`
let additionalInstructions = `Use the ${languageName} documentation style to generate a ${languageName} comment.`
if (extension === 'java') {
additionalInstructions = 'Use the JavaDoc documentation style to generate a Java comment.'
} else if (extension === 'py') {
additionalInstructions = 'Use a Python docstring to generate a Python multi-line string.'
}
const promptMessage = `${promptPrefix}\n\`\`\`\n${truncatedSelectedText}\n\`\`\`\nOnly generate the documentation, do not generate the code. ${additionalInstructions} ${MARKDOWN_FORMAT_PROMPT}`
let docStart = ''
if (extension === 'java' || extension.startsWith('js') || extension.startsWith('ts')) {
docStart = '/*'
} else if (extension === 'py') {
docStart = '"""\n'
} else if (extension === 'go') {
docStart = '// '
}
const displayText = `Generate documentation for the following code:\n\`\`\`\n${selection.selectedText}\n\`\`\``
const assistantResponsePrefix = `Here is the generated documentation:\n\`\`\`${extension}\n${docStart}`
return new Interaction(
{ speaker: 'human', text: promptMessage, displayText },
{
speaker: 'assistant',
prefix: assistantResponsePrefix,
text: assistantResponsePrefix,
},
getContextMessagesFromSelection(
truncatedSelectedText,
truncatedPrecedingText,
truncatedFollowingText,
selection,
context.codebaseContext
),
[]
)
}
}

View File

@ -1,84 +0,0 @@
import { spawnSync } from 'child_process'
import { readFileSync } from 'fs'
import path from 'path'
import { MAX_RECIPE_INPUT_TOKENS } from '../../prompt/constants'
import { truncateText } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class PrDescription implements Recipe {
public id: RecipeID = 'pr-description'
public async getInteraction(_humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const dirPath = context.editor.getWorkspaceRootPath()
if (!dirPath) {
return Promise.resolve(null)
}
const logFormat = '--pretty="Commit author: %an%nCommit message: %s%nChange description:%b%n"'
const rawDisplayText = 'Generating the PR description'
const templateFormatArgs = [
'pull_request_template.md',
'PULL_REQUEST_TEMPLATE.md',
'docs/PULL_REQUEST_TEMPLATE.md',
'docs/pull_request_template.md',
'.github/pull_request_template.md',
'.github/PULL_REQUEST_TEMPLATE.md',
]
const checkPrTemplate = spawnSync('git', ['ls-files', ...templateFormatArgs], { cwd: dirPath })
const prTemplateOutput = checkPrTemplate.stdout.toString().trim()
let prTemplateContent = ''
if (prTemplateOutput) {
const templatePath = path.join(dirPath.trim(), prTemplateOutput)
prTemplateContent = readFileSync(templatePath).toString()
}
const userEmail = spawnSync('git', ['config', 'user.email'], { cwd: dirPath })
const email = userEmail.stdout.toString().trim()
const gitCommit = spawnSync('git', ['log', `--author=<${email}>`, 'origin/HEAD..HEAD', logFormat], {
cwd: dirPath,
})
const gitCommitOutput = gitCommit.stdout.toString().trim()
if (!gitCommitOutput) {
const emptyGitCommitMessage = 'No commits history found in the current branch.'
return new Interaction(
{ speaker: 'human', displayText: rawDisplayText },
{
speaker: 'assistant',
prefix: emptyGitCommitMessage,
text: emptyGitCommitMessage,
},
Promise.resolve([]),
[]
)
}
const truncatedGitCommitOutput = truncateText(gitCommitOutput, MAX_RECIPE_INPUT_TOKENS)
let truncatedCommitMessage = ''
if (truncatedGitCommitOutput.length < gitCommitOutput.length) {
truncatedCommitMessage = 'Truncated extra long git log output, so PR description may be incomplete.'
}
const promptMessage = `Summarise these changes:\n${gitCommitOutput}\n\n made while working in the current git branch.\nUse this pull request template to ${prTemplateContent} generate a pull request description based on the committed changes.\nIf the PR template mentions a requirement to check the contribution guidelines, then just summarise the changes in bulletin format.\n If it mentions a test plan for the changes use N/A\n.`
const assistantResponsePrefix = `Here is the PR description for the work done in your current branch:\n${truncatedCommitMessage}`
return new Interaction(
{ speaker: 'human', text: promptMessage, displayText: rawDisplayText },
{
speaker: 'assistant',
prefix: assistantResponsePrefix,
text: assistantResponsePrefix,
},
Promise.resolve([]),
[]
)
}
}

View File

@ -1,99 +0,0 @@
import { spawnSync } from 'child_process'
import { MAX_RECIPE_INPUT_TOKENS } from '../../prompt/constants'
import { truncateText } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class ReleaseNotes implements Recipe {
public id: RecipeID = 'release-notes'
public async getInteraction(_humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const dirPath = context.editor.getWorkspaceRootPath()
if (!dirPath) {
return null
}
let quickPickItems = []
const logFormat = '--pretty="Commit author: %an%nCommit message: %s%nChange description:%b%n"'
// check for tags first
const gitTagCommand = spawnSync('git', ['tag', '--sort=-creatordate'], { cwd: dirPath })
const gitTagOutput = gitTagCommand.stdout.toString().trim()
let tagsPromptText = ''
if (gitTagOutput) {
const tags = gitTagOutput.split(/\r?\n/)
for (const tag of tags.slice(0, 3)) {
quickPickItems.push({
label: tag,
args: ['log', tag, logFormat],
})
}
tagsPromptText =
'Do not include information about any other tags version number if any included in the commits.'
} else {
quickPickItems = [
{
label: 'Last week',
args: ['log', "--since='1 week'", logFormat],
},
{
label: 'Last 2 weeks',
args: ['log', "--since='2 week'", logFormat],
},
{
label: 'Last 4 weeks',
args: ['log', "--since='4 week'", logFormat],
},
]
}
const selectedLabel = await context.editor.showQuickPick(quickPickItems.map(e => e.label))
if (!selectedLabel) {
return null
}
const selected = Object.fromEntries(quickPickItems.map(({ label, args }) => [label, { args }]))[selectedLabel]
const { args: gitArgs } = selected
const gitLogCommand = spawnSync('git', ['--no-pager', ...gitArgs], { cwd: dirPath })
const gitLogOutput = gitLogCommand.stdout.toString().trim()
const rawDisplayText = `Generate release notes for the changes made since ${selectedLabel}`
if (!gitLogOutput) {
const emptyGitLogMessage = 'No recent changes found to generate release notes.'
return new Interaction(
{ speaker: 'human', displayText: rawDisplayText },
{
speaker: 'assistant',
prefix: emptyGitLogMessage,
text: emptyGitLogMessage,
},
Promise.resolve([]),
[]
)
}
const truncatedGitLogOutput = truncateText(gitLogOutput, MAX_RECIPE_INPUT_TOKENS)
console.log(truncatedGitLogOutput)
let truncatedLogMessage = ''
if (truncatedGitLogOutput.length < gitLogOutput.length) {
truncatedLogMessage = 'Truncated extra long git log output, so release notes may miss some changes.'
}
const promptMessage = `Generate release notes by summarising these commits:\n${truncatedGitLogOutput}\n\nUse proper heading format for the release notes.\n\n${tagsPromptText}.Do not include other changes and dependency updates.`
const assistantResponsePrefix = `Here is the generated release notes for ${selectedLabel}\n${truncatedLogMessage}`
return new Interaction(
{ speaker: 'human', text: promptMessage, displayText: rawDisplayText },
{
speaker: 'assistant',
prefix: assistantResponsePrefix,
text: assistantResponsePrefix,
},
Promise.resolve([]),
[]
)
}
}

View File

@ -1,51 +0,0 @@
import { MAX_RECIPE_INPUT_TOKENS, MAX_RECIPE_SURROUNDING_TOKENS } from '../../prompt/constants'
import { truncateText, truncateTextStart } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import {
MARKDOWN_FORMAT_PROMPT,
getNormalizedLanguageName,
getFileExtension,
getContextMessagesFromSelection,
} from './helpers'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class GenerateTest implements Recipe {
public id: RecipeID = 'generate-unit-test'
public async getInteraction(_humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const selection = context.editor.getActiveTextEditorSelectionOrEntireFile()
if (!selection) {
await context.editor.showWarningMessage('No code selected. Please select some code and try again.')
return Promise.resolve(null)
}
const truncatedSelectedText = truncateText(selection.selectedText, MAX_RECIPE_INPUT_TOKENS)
const truncatedPrecedingText = truncateTextStart(selection.precedingText, MAX_RECIPE_SURROUNDING_TOKENS)
const truncatedFollowingText = truncateText(selection.followingText, MAX_RECIPE_SURROUNDING_TOKENS)
const extension = getFileExtension(selection.fileName)
const languageName = getNormalizedLanguageName(selection.fileName)
const promptMessage = `Generate a unit test in ${languageName} for the following code:\n\`\`\`${extension}\n${truncatedSelectedText}\n\`\`\`\n${MARKDOWN_FORMAT_PROMPT}`
const assistantResponsePrefix = `Here is the generated unit test:\n\`\`\`${extension}\n`
const displayText = `Generate a unit test for the following code:\n\`\`\`${extension}\n${selection.selectedText}\n\`\`\``
return new Interaction(
{ speaker: 'human', text: promptMessage, displayText },
{
speaker: 'assistant',
prefix: assistantResponsePrefix,
text: assistantResponsePrefix,
},
getContextMessagesFromSelection(
truncatedSelectedText,
truncatedPrecedingText,
truncatedFollowingText,
selection,
context.codebaseContext
),
[]
)
}
}

View File

@ -1,92 +0,0 @@
import { spawnSync } from 'child_process'
import path from 'path'
import { MAX_RECIPE_INPUT_TOKENS } from '../../prompt/constants'
import { truncateText } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class GitHistory implements Recipe {
public id: RecipeID = 'git-history'
public async getInteraction(_humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const dirPath = context.editor.getWorkspaceRootPath()
if (!dirPath) {
return null
}
const logFormat = '--pretty="Commit author: %an%nCommit message: %s%nChange description:%b%n"'
const items = [
{
label: 'Last 5 items',
args: ['log', '-n5', logFormat],
rawDisplayText: 'What changed in my codebase in the last 5 commits?',
},
{
label: 'Last day',
args: ['log', '--since', '1 day', logFormat],
rawDisplayText: 'What has changed in my codebase in the last day?',
},
{
label: 'Last week',
args: ['log', "--since='1 week'", logFormat],
rawDisplayText: 'What changed in my codebase in the last week?',
},
]
const selection = context.editor.getActiveTextEditorSelectionOrEntireFile()
if (selection) {
const name = path.basename(selection.fileName)
items.push({
label: `Last 5 items for ${name}`,
args: ['log', '-n5', logFormat, '--', selection.fileName],
rawDisplayText: `What changed in ${name} in the last 5 commits`,
})
}
const selectedLabel = await context.editor.showQuickPick(items.map(e => e.label))
if (!selectedLabel) {
return null
}
const selected = Object.fromEntries(
items.map(({ label, args, rawDisplayText }) => [label, { args, rawDisplayText }])
)[selectedLabel]
const { args: gitArgs, rawDisplayText } = selected
const gitLogCommand = spawnSync('git', ['--no-pager', ...gitArgs], { cwd: dirPath })
const gitLogOutput = gitLogCommand.stdout.toString().trim()
if (!gitLogOutput) {
const emptyGitLogMessage = 'No recent changes found'
return new Interaction(
{ speaker: 'human', displayText: rawDisplayText },
{
speaker: 'assistant',
prefix: emptyGitLogMessage,
text: emptyGitLogMessage,
},
Promise.resolve([]),
[]
)
}
const truncatedGitLogOutput = truncateText(gitLogOutput, MAX_RECIPE_INPUT_TOKENS)
let truncatedLogMessage = ''
if (truncatedGitLogOutput.length < gitLogOutput.length) {
truncatedLogMessage = 'Truncated extra long git log output, so summary may be incomplete.'
}
const promptMessage = `Summarize these commits:\n${truncatedGitLogOutput}\n\nProvide your response in the form of a bulleted list. Do not mention the commit hashes.`
const assistantResponsePrefix = `Here is a summary of recent changes:\n${truncatedLogMessage}`
return new Interaction(
{ speaker: 'human', text: promptMessage, displayText: rawDisplayText },
{
speaker: 'assistant',
prefix: assistantResponsePrefix,
text: assistantResponsePrefix,
},
Promise.resolve([]),
[]
)
}
}

View File

@ -1,72 +0,0 @@
import path from 'path'
import { CodebaseContext } from '../../codebase-context'
import { ContextMessage, getContextMessageWithResponse } from '../../codebase-context/messages'
import { populateCodeContextTemplate } from '../../prompt/templates'
export const MARKDOWN_FORMAT_PROMPT = 'Enclose code snippets with three backticks like so: ```.'
const EXTENSION_TO_LANGUAGE: { [key: string]: string } = {
py: 'Python',
rb: 'Ruby',
md: 'Markdown',
php: 'PHP',
js: 'Javascript',
ts: 'Typescript',
jsx: 'JSX',
tsx: 'TSX',
}
export const commandRegex = {
chat: new RegExp(/^(?!.*\/n(ew)?\s|.*\/f(ix)?\s)/i), // For now, if the input does not start with /n or /f, it is a chat
fix: new RegExp(/^\/f(ix)?\s/i),
touch: new RegExp(/^\/t(ouch)?\s/i),
touchNeedFileName: new RegExp(/^\/t(ouch)?\s(?!.*test(s)?\s)/i), // Has /touch or /t but no test or tests in the string
noTest: new RegExp(/^(?!.*test)/i),
search: new RegExp(/^\/s(earch)?\s/i),
test: new RegExp(/^\/n(ew)?\s|test(s)?\s/, 'i'),
}
export function getNormalizedLanguageName(extension: string): string {
return extension ? EXTENSION_TO_LANGUAGE[extension] ?? extension.charAt(0).toUpperCase() + extension.slice(1) : ''
}
export async function getContextMessagesFromSelection(
selectedText: string,
precedingText: string,
followingText: string,
{ fileName, repoName, revision }: { fileName: string; repoName?: string; revision?: string },
codebaseContext: CodebaseContext
): Promise<ContextMessage[]> {
const selectedTextContext = await codebaseContext.getContextMessages(selectedText, {
numCodeResults: 4,
numTextResults: 0,
})
return selectedTextContext.concat(
[precedingText, followingText].flatMap(text =>
getContextMessageWithResponse(populateCodeContextTemplate(text, fileName, repoName), {
fileName,
repoName,
revision,
})
)
)
}
export function getFileExtension(fileName: string): string {
return path.extname(fileName).slice(1).toLowerCase()
}
// This cleans up the code returned by Cody based on current behavior
// ex. Remove `tags:` that Cody sometimes include in the returned content
// It also removes all spaces before a new line to keep the indentations
export function contentSanitizer(text: string): string {
let output = text + '\n'
const tagsIndex = text.indexOf('tags:')
if (tagsIndex !== -1) {
// NOTE: 6 is the length of `tags:` + 1 space
output = output.slice(tagsIndex + 6)
}
return output.replace(/^\s*\n/, '')
}

View File

@ -1,51 +0,0 @@
import { MAX_RECIPE_INPUT_TOKENS, MAX_RECIPE_SURROUNDING_TOKENS } from '../../prompt/constants'
import { truncateText, truncateTextStart } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import {
MARKDOWN_FORMAT_PROMPT,
getNormalizedLanguageName,
getContextMessagesFromSelection,
getFileExtension,
} from './helpers'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class ImproveVariableNames implements Recipe {
public id: RecipeID = 'improve-variable-names'
public async getInteraction(_humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const selection = context.editor.getActiveTextEditorSelectionOrEntireFile()
if (!selection) {
await context.editor.showWarningMessage('No code selected. Please select some code and try again.')
return Promise.resolve(null)
}
const truncatedSelectedText = truncateText(selection.selectedText, MAX_RECIPE_INPUT_TOKENS)
const truncatedPrecedingText = truncateTextStart(selection.precedingText, MAX_RECIPE_SURROUNDING_TOKENS)
const truncatedFollowingText = truncateText(selection.followingText, MAX_RECIPE_SURROUNDING_TOKENS)
const extension = getFileExtension(selection.fileName)
const displayText = `Improve the variable names in the following code:\n\`\`\`\n${selection.selectedText}\n\`\`\``
const languageName = getNormalizedLanguageName(selection.fileName)
const promptMessage = `Improve the variable names in this ${languageName} code by replacing the variable names with new identifiers which succinctly capture the purpose of the variable. We want the new code to be a drop-in replacement, so do not change names bound outside the scope of this code, like function names or members defined elsewhere. Only change the names of local variables and parameters:\n\n\`\`\`${extension}\n${truncatedSelectedText}\n\`\`\`\n${MARKDOWN_FORMAT_PROMPT}`
const assistantResponsePrefix = `Here is the improved code:\n\`\`\`${extension}\n`
return new Interaction(
{ speaker: 'human', text: promptMessage, displayText },
{
speaker: 'assistant',
prefix: assistantResponsePrefix,
text: assistantResponsePrefix,
},
getContextMessagesFromSelection(
truncatedSelectedText,
truncatedPrecedingText,
truncatedFollowingText,
selection,
context.codebaseContext
),
[]
)
}
}

View File

@ -1,107 +0,0 @@
import { CodebaseContext } from '../../codebase-context'
import { ContextMessage } from '../../codebase-context/messages'
import { ActiveTextEditorSelection, Editor } from '../../editor'
import { MAX_HUMAN_INPUT_TOKENS, MAX_RECIPE_INPUT_TOKENS, MAX_RECIPE_SURROUNDING_TOKENS } from '../../prompt/constants'
import { truncateText } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import { ChatQuestion } from './chat-question'
import { Fixup } from './fixup'
import { commandRegex } from './helpers'
import { InlineTouch } from './inline-touch'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class InlineChat implements Recipe {
public id: RecipeID = 'inline-chat'
constructor(private debug: (filterLabel: string, text: string, ...args: unknown[]) => void) {}
public async getInteraction(humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
// Check if this is a touch request
if (commandRegex.touch.test(humanChatInput)) {
return new InlineTouch(this.debug).getInteraction(humanChatInput.replace(commandRegex.touch, ''), context)
}
// Check if this is a fixup request
if (commandRegex.fix.test(humanChatInput)) {
return new Fixup().getInteraction(humanChatInput.replace(commandRegex.fix, ''), context)
}
const selection = context.editor.controllers?.inline.selection
if (!humanChatInput || !selection) {
await context.editor.showWarningMessage('Failed to start Inline Chat: empty input or selection.')
return null
}
const truncatedText = truncateText(humanChatInput, MAX_HUMAN_INPUT_TOKENS)
const MAX_RECIPE_CONTENT_TOKENS = MAX_RECIPE_INPUT_TOKENS + MAX_RECIPE_SURROUNDING_TOKENS * 2
const truncatedSelectedText = truncateText(selection.selectedText, MAX_RECIPE_CONTENT_TOKENS)
// Reconstruct Cody's prompt using user's context
// Replace placeholders in reverse order to avoid collisions if a placeholder occurs in the input
const promptText = InlineChat.prompt
.replace('{humanInput}', truncatedText)
.replace('{selectedText}', truncatedSelectedText)
.replace('{fileName}', selection.fileName)
// Text display in UI fpr human that includes the selected code
const displayText = humanChatInput + InlineChat.displayPrompt.replace('{selectedText}', selection.selectedText)
return Promise.resolve(
new Interaction(
{
speaker: 'human',
text: promptText,
displayText,
},
{ speaker: 'assistant' },
this.getContextMessages(truncatedText, context.codebaseContext, selection, context.editor),
[]
)
)
}
// Prompt Templates
public static readonly prompt = `
I have questions about this part of the code from {fileName}:
\`\`\`
{selectedText}
\`\`\`
As my coding assistant, please help me with my questions:
{humanInput}
## Instruction
- Do not enclose your answer with tags.
- Do not remove code that might be being used by the other part of the code that was not shared.
- Your answers and suggestions should based on the provided context only.
- You may make references to other part of the shared code.
- Do not suggest code that are not related to any of the shared context.
- Do not suggest anything that would break the working code.
`
// Prompt template for displaying the prompt to users in chat view
public static readonly displayPrompt = `
\n\`\`\`\n{selectedText}\n\`\`\`\n`
// Get context from editor
private async getContextMessages(
text: string,
codebaseContext: CodebaseContext,
selection: ActiveTextEditorSelection,
editor: Editor
): Promise<ContextMessage[]> {
const contextMessages: ContextMessage[] = []
// Add selected text and current file as context
contextMessages.push(...ChatQuestion.getEditorSelectionContext(selection))
contextMessages.push(...ChatQuestion.getEditorContext(editor))
const extraContext = await codebaseContext.getContextMessages(text, {
numCodeResults: 5,
numTextResults: 3,
})
contextMessages.push(...extraContext)
return contextMessages
}
}

View File

@ -1,277 +0,0 @@
import * as vscode from 'vscode'
import { ContextMessage, getContextMessageWithResponse } from '../../codebase-context/messages'
import { ActiveTextEditorSelection } from '../../editor'
import {
MAX_CURRENT_FILE_TOKENS,
MAX_HUMAN_INPUT_TOKENS,
MAX_RECIPE_INPUT_TOKENS,
MAX_RECIPE_SURROUNDING_TOKENS,
} from '../../prompt/constants'
import { populateCurrentEditorContextTemplate } from '../../prompt/templates'
import { truncateText } from '../../prompt/truncation'
import { BufferedBotResponseSubscriber } from '../bot-response-multiplexer'
import { Interaction } from '../transcript/interaction'
import { ChatQuestion } from './chat-question'
import { commandRegex, contentSanitizer } from './helpers'
import { Recipe, RecipeContext, RecipeID } from './recipe'
/** ======================================================
* Recipe for Generating a New File
====================================================== **/
export class InlineTouch implements Recipe {
public id: RecipeID = 'inline-touch'
private workspacePath = vscode.workspace.workspaceFolders?.[0].uri
constructor(private debug: (filterLabel: string, text: string, ...args: unknown[]) => void) {}
public async getInteraction(humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const selection = context.editor.getActiveTextEditorSelection() || context.editor.controllers?.inline.selection
if (!selection || !this.workspacePath) {
await context.editor.controllers?.inline.error()
await context.editor.showWarningMessage('Failed to start Inline Chat: empty selection.')
return null
}
const humanInput = humanChatInput.trim() || (await this.getInstructionFromInput()).trim()
if (!humanInput) {
await context.editor.controllers?.inline.error()
await context.editor.showWarningMessage('Failed to start Inline Chat: empty input.')
return null
}
// Get the current directory of the file that the user is currently working on
// Create file path from selection.fileName and workspacePath
const currentFilePath = `${this.workspacePath.fsPath}/${selection.fileName}`
const currentDir = currentFilePath.replace(/\/[^/]+$/, '')
this.debug('InlineTouch:currentDir', 'currentDir', currentDir)
// Create new file name based on the user's input
const newFileName = commandRegex.noTest.test(humanInput)
? currentFilePath.replace(/(\.[^./]+)$/, '.cody$1')
: currentFilePath.replace(/(\.[^./]+)$/, '.test$1')
const newFsPath = newFileName || (await this.getNewFileNameFromInput(selection.fileName, currentDir))
if (!newFsPath || !currentDir) {
return null
}
// create vscode uri for the new file from the newFilePath which includes the workspacePath
const fileUri = vscode.Uri.file(newFsPath)
const workspaceEditor = new vscode.WorkspaceEdit()
// Create file if it doesn't exist
workspaceEditor.createFile(fileUri, { ignoreIfExists: true })
await vscode.workspace.applyEdit(workspaceEditor)
this.debug('InlineTouch:workspaceEditor', 'createFile', fileUri)
const truncatedText = truncateText(humanInput, MAX_HUMAN_INPUT_TOKENS)
const MAX_RECIPE_CONTENT_TOKENS = MAX_RECIPE_INPUT_TOKENS + MAX_RECIPE_SURROUNDING_TOKENS * 2
const truncatedSelectedText = truncateText(selection.selectedText, MAX_RECIPE_CONTENT_TOKENS)
// Reconstruct Cody's prompt using user's context
// Replace placeholders in reverse order to avoid collisions if a placeholder occurs in the input
const prompt = InlineTouch.newFilePrompt
const promptText = prompt
.replace('{newFileName}', newFsPath)
.replace('{humanInput}', truncatedText)
.replace('{selectedText}', truncatedSelectedText)
.replace('{fileName}', selection.fileName)
// Text display in UI fpr human that includes the selected code
const displayText = this.getHumanDisplayText(humanInput, selection.fileName)
context.responseMultiplexer.sub(
'selection',
new BufferedBotResponseSubscriber(async content => {
if (!content) {
await context.editor.controllers?.inline.error()
await context.editor.showWarningMessage(
'Cody did not suggest any code updates. Please try again with a different question.'
)
return
}
await this.addContentToNewFile(workspaceEditor, fileUri, content)
this.debug('InlineTouch:responseMultiplexer', 'BufferedBotResponseSubscriber', content)
})
)
return Promise.resolve(
new Interaction(
{
speaker: 'human',
text: promptText,
displayText,
},
{
speaker: 'assistant',
prefix: 'Working on it! I will show you the new file when it is ready.\n\n',
},
this.getContextMessages(selection, currentDir),
[]
)
)
}
private async addContentToNewFile(
workspaceEditor: vscode.WorkspaceEdit,
filePath: vscode.Uri,
content: string
): Promise<void> {
const textDocument = await vscode.workspace.openTextDocument(filePath)
workspaceEditor.insert(filePath, new vscode.Position(textDocument.lineCount + 1, 0), contentSanitizer(content))
await vscode.workspace.applyEdit(workspaceEditor)
await textDocument.save()
await vscode.window.showTextDocument(filePath)
}
/** ======================================================
* Prompt Template for New File
* ====================================================== */
public static readonly newFilePrompt = `
I am currently looking at this selected code from {fileName}:
\`\`\`
{selectedText}
\`\`\`
Help me with creating content for a new file based on the selected code.
- {humanInput}
## Instruction
- Follow my instructions to produce new code for the new file called {newFileName}.
- Think carefully and use the shared context as reference before produce the new code
- Make sure the new code works with the shared context and the selected code.
- Use the same framework, language and style as the shared context that are also from current directory I am working on.
- Put all new content inside <selection> tags.
- I only want to see the new code enclosed with the <selection> tags only if you understand my instructions.
- Do not enclose any part of your answer with <selection> tags if you are not sure about the answer.
- Only provide me with the code inside <selection> and nothing else.
- Do not enclose your answer with markdowns.
## Guidelines for the new file
- Include all the import statements that are required for the new code to work.
- If there are already content in the file with the same name, the new code will be appended to the file.
- If my selected code is empty, it means I am working in an empty file.
- Do not remove code that is being used by the the shared files.
- Do not suggest code that are not related to any of the shared context.
- Do not make up code, including function names, that could break the selected code.
`
// Prompt template for displaying the prompt to users in chat view
public static readonly displayPrompt = `\n
File: `
// ======================================================== //
// GET CONTEXT //
// ======================================================== //
private async getContextMessages(
selection: ActiveTextEditorSelection,
currentDir: string
): Promise<ContextMessage[]> {
const contextMessages: ContextMessage[] = []
// Add selected text and current file as context and create context messages from current directory
const selectedContext = ChatQuestion.getEditorSelectionContext(selection)
const currentDirContext = await InlineTouch.getEditorDirContext(currentDir)
contextMessages.push(...selectedContext, ...currentDirContext)
// Create context messages from open tabs
if (contextMessages.length < 10) {
contextMessages.push(...InlineTouch.getEditorOpenTabsContext(currentDir))
}
return contextMessages.slice(-10)
}
// Create Context from Current Directory of Active Document //
public static async getEditorDirContext(currentDir: string): Promise<ContextMessage[]> {
// get a list of files from the current directory path
const currentDirUri = vscode.Uri.file(currentDir)
// Get the list of files in the current directory then filter out directories and get the first 10 files
const filesInDir = (await vscode.workspace.fs.readDirectory(currentDirUri))
.filter(file => file[1] === 1)
.slice(0, 10)
const contextMessages: ContextMessage[] = []
for (const file of filesInDir) {
// Get the context from each file
const fileName = vscode.Uri.joinPath(currentDirUri, file[0]).fsPath
const fileUri = vscode.Uri.joinPath(currentDirUri, file[0])
try {
const fileContent = await vscode.workspace.openTextDocument(fileUri)
const truncatedContent = truncateText(fileContent.getText(), MAX_CURRENT_FILE_TOKENS)
const contextMessage = getContextMessageWithResponse(
populateCurrentEditorContextTemplate(truncatedContent, fileName),
{ fileName }
)
contextMessages.push(...contextMessage)
} catch (error) {
console.error(error)
}
}
return contextMessages
}
// Get context from current editor open tabs
public static getEditorOpenTabsContext(currentDir: string): ContextMessage[] {
const contextMessages: ContextMessage[] = []
// Skip the current active tab (which is already included in selection context), files in currentDir, and non-file tabs
const openTabs = vscode.window.visibleTextEditors
for (const tab of openTabs) {
if (
tab === vscode.window.activeTextEditor ||
tab.document.fileName.includes(currentDir) ||
tab.document.uri.scheme !== 'file'
) {
continue
}
const fileName = tab.document.fileName
const truncatedContent = truncateText(tab.document.getText(), MAX_CURRENT_FILE_TOKENS)
const contextMessage = getContextMessageWithResponse(
populateCurrentEditorContextTemplate(truncatedContent, fileName),
{
fileName,
}
)
contextMessages.push(...contextMessage)
}
return contextMessages
}
// ======================================================== //
// HELPERS //
// ======================================================== //
// Get display text for human
private getHumanDisplayText(humanChatInput: string, fileName: string): string {
return '**✨Touch✨** ' + humanChatInput + InlineTouch.displayPrompt + fileName
}
private async getInstructionFromInput(): Promise<string> {
// Get the file name from the user using the input box, set default value to cody and validate the input
const humanInput = await vscode.window.showInputBox({
prompt: 'Enter your instructions for Cody to create a new file based on the selected code:',
placeHolder: 'ex. create unit tests for the selected code',
validateInput: (input: string) => {
if (!input) {
return 'Please enter instructions.'
}
return null
},
})
return humanInput || ''
}
private async getNewFileNameFromInput(fileName: string, currentDir: string): Promise<string> {
// Get the file name from the user using the input box, set default value to cody and validate the input
const newFileName = await vscode.window.showInputBox({
prompt: 'Enter a new file name (with extension):',
value: fileName,
validateInput: (input: string) => {
if (!input) {
return 'Please enter a file name.'
}
if (!input.includes('.')) {
return 'Please enter a file name with extension.'
}
return null
},
})
// The newFilePath is the fsPath of the new file that the user is creating
return `${currentDir}/${newFileName}`
}
}

View File

@ -1,33 +0,0 @@
export const languageMarkdownID: { [name: string]: string } = {
BASIC: 'basic',
Bash: 'bash',
C: 'c',
'C#': 'csharp',
'C++': 'cpp',
COBOL: 'cobol',
Elm: 'elm',
Erlang: 'erlang',
Fortran: 'fortran',
Go: 'go',
Groovy: 'groovy',
Haskell: 'haskell',
Java: 'java',
JavaScript: 'javascript',
Julia: 'julia',
Kotlin: 'kotlin',
Lisp: 'lisp',
Matlab: 'matlab',
'Objective-C': 'objectivec',
PHP: 'php',
Perl: 'perl',
Python: 'python',
R: 'r',
Ruby: 'ruby',
Rust: 'rust',
Scala: 'scala',
Swift: 'swift',
TypeScript: 'typescript',
Zig: 'zig',
}
export const languageNames = Array.from(Object.keys(languageMarkdownID))

View File

@ -1,75 +0,0 @@
import { CodebaseContext } from '../../codebase-context'
import { ContextMessage, getContextMessageWithResponse } from '../../codebase-context/messages'
import { Editor } from '../../editor'
import { IntentDetector } from '../../intent-detector'
import { CHARS_PER_TOKEN, MAX_AVAILABLE_PROMPT_LENGTH, MAX_CURRENT_FILE_TOKENS } from '../../prompt/constants'
import { populateCurrentEditorContextTemplate } from '../../prompt/templates'
import { truncateText } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class NextQuestions implements Recipe {
public id: RecipeID = 'next-questions'
public async getInteraction(humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const promptPrefix = 'Assume I have an answer to the following request:'
const promptSuffix =
'Generate one to three follow-up discussion topics that the human can ask you to uphold the conversation. Keep the topics very concise (try not to exceed 5 words per topic) and phrase them as questions.'
const maxTokenCount =
MAX_AVAILABLE_PROMPT_LENGTH - (promptPrefix.length + promptSuffix.length) / CHARS_PER_TOKEN
const truncatedText = truncateText(humanChatInput, maxTokenCount)
const promptMessage = `${promptPrefix}\n\n\`\`\`\n${truncatedText}\n\`\`\`\n\n${promptSuffix}`
const assistantResponsePrefix = 'Sure, here are great follow-up discussion topics and learning ideas:\n\n - '
return Promise.resolve(
new Interaction(
{ speaker: 'human', text: promptMessage },
{
speaker: 'assistant',
prefix: assistantResponsePrefix,
text: assistantResponsePrefix,
},
this.getContextMessages(promptMessage, context.editor, context.intentDetector, context.codebaseContext),
[]
)
)
}
private async getContextMessages(
text: string,
editor: Editor,
intentDetector: IntentDetector,
codebaseContext: CodebaseContext
): Promise<ContextMessage[]> {
const contextMessages: ContextMessage[] = []
const isCodebaseContextRequired = await intentDetector.isCodebaseContextRequired(text)
if (isCodebaseContextRequired) {
const codebaseContextMessages = await codebaseContext.getContextMessages(text, {
numCodeResults: 12,
numTextResults: 3,
})
contextMessages.push(...codebaseContextMessages)
}
if (isCodebaseContextRequired || intentDetector.isEditorContextRequired(text)) {
contextMessages.push(...this.getEditorContext(editor))
}
return contextMessages
}
private getEditorContext(editor: Editor): ContextMessage[] {
const visibleContent = editor.getActiveTextEditorVisibleContent()
if (!visibleContent) {
return []
}
const truncatedContent = truncateText(visibleContent.content, MAX_CURRENT_FILE_TOKENS)
return getContextMessageWithResponse(
populateCurrentEditorContextTemplate(truncatedContent, visibleContent.fileName, visibleContent.repoName),
visibleContent
)
}
}

View File

@ -1,101 +0,0 @@
import { CodebaseContext } from '../../codebase-context'
import { ContextMessage } from '../../codebase-context/messages'
import { MAX_CURRENT_FILE_TOKENS, MAX_HUMAN_INPUT_TOKENS } from '../../prompt/constants'
import { truncateText, truncateTextStart } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import { Recipe, RecipeContext, RecipeID } from './recipe'
// TODO: Disconnect recipe from chat
export class NonStop implements Recipe {
public id: RecipeID = 'non-stop'
public async getInteraction(taskId: string, context: RecipeContext): Promise<Interaction | null> {
const controllers = context.editor.controllers
if (!controllers) {
return null
}
const taskParameters = await controllers.fixups.getTaskRecipeData(taskId)
if (!taskParameters) {
// Nothing to do.
return null
}
const { instruction, fileName, precedingText, selectedText, followingText } = taskParameters
const quarterFileContext = Math.floor(MAX_CURRENT_FILE_TOKENS / 4)
if (truncateText(selectedText, quarterFileContext * 2) !== selectedText) {
const msg = "The amount of text selected exceeds Cody's current capacity."
await context.editor.showWarningMessage(msg)
// TODO: Communicate this error back to the FixupController
return null
}
// Reconstruct Cody's prompt using user's context
// Replace placeholders in reverse order to avoid collisions if a placeholder occurs in the input
const promptText = NonStop.prompt
.replace('{humanInput}', truncateText(instruction, MAX_HUMAN_INPUT_TOKENS))
.replace('{responseMultiplexerPrompt}', context.responseMultiplexer.prompt())
.replace('{truncateFollowingText}', truncateText(followingText, quarterFileContext))
.replace('{selectedText}', selectedText)
.replace('{truncateTextStart}', truncateTextStart(precedingText, quarterFileContext))
.replace('{fileName}', fileName)
let text = ''
context.responseMultiplexer.sub('selection', {
onResponse: async (content: string) => {
text += content
await context.editor.didReceiveFixupText(taskId, text, 'streaming')
},
onTurnComplete: async () => {
await context.editor.didReceiveFixupText(taskId, text, 'complete')
},
})
return Promise.resolve(
new Interaction(
{
speaker: 'human',
text: promptText,
displayText: 'Cody Fixups: ' + instruction,
},
{
speaker: 'assistant',
prefix: 'Check your document for updates from Cody.',
},
this.getContextMessages(selectedText, context.codebaseContext),
[]
)
)
}
// Get context from editor
private async getContextMessages(text: string, codebaseContext: CodebaseContext): Promise<ContextMessage[]> {
const contextMessages: ContextMessage[] = await codebaseContext.getContextMessages(text, {
numCodeResults: 12,
numTextResults: 3,
})
return contextMessages
}
// Prompt Templates
public static readonly prompt = `
This is part of the file {fileName}. The part of the file I have selected is enclosed with the <selection> tags. You are helping me to work on that part as my coding assistant.
Follow the instructions in the selected part along with the additional instruction provide below to produce a rewritten replacement for only the selected part.
Put the rewritten replacement inside <selection> tags. I only want to see the code within <selection>.
Do not move code from outside the selection into the selection in your reply.
Do not remove code inside the <selection> tags that might be being used by the code outside the <selection> tags.
Do not enclose replacement code with tags other than the <selection> tags.
Do not enclose your answer with any markdown.
Only return provide me the replacement <selection> and nothing else.
If it doesn't make sense, you do not need to provide <selection>.
\`\`\`
{truncateTextStart}<selection>{selectedText}</selection>{truncateFollowingText}
\`\`\`
Additional Instruction:
- {humanInput}
- {responseMultiplexerPrompt}
`
}

View File

@ -1,38 +0,0 @@
import { CodebaseContext } from '../../codebase-context'
import { Editor } from '../../editor'
import { IntentDetector } from '../../intent-detector'
import { BotResponseMultiplexer } from '../bot-response-multiplexer'
import { Interaction } from '../transcript/interaction'
/** Tools and context recipes can use at the point they are invoked. */
export interface RecipeContext {
editor: Editor
intentDetector: IntentDetector
codebaseContext: CodebaseContext
responseMultiplexer: BotResponseMultiplexer
firstInteraction: boolean
}
export type RecipeID =
| 'chat-question'
| 'context-search'
| 'explain-code-detailed'
| 'explain-code-high-level'
| 'inline-touch'
| 'find-code-smells'
| 'fixup'
| 'generate-docstring'
| 'generate-unit-test'
| 'git-history'
| 'improve-variable-names'
| 'inline-chat'
| 'next-questions'
| 'non-stop'
| 'pr-description'
| 'release-notes'
| 'translate-to-language'
export interface Recipe {
id: RecipeID
getInteraction(humanChatInput: string, context: RecipeContext): Promise<Interaction | null>
}

View File

@ -1,46 +0,0 @@
import { MAX_RECIPE_INPUT_TOKENS } from '../../prompt/constants'
import { truncateText } from '../../prompt/truncation'
import { Interaction } from '../transcript/interaction'
import { languageMarkdownID, languageNames } from './langs'
import { Recipe, RecipeContext, RecipeID } from './recipe'
export class TranslateToLanguage implements Recipe {
public id: RecipeID = 'translate-to-language'
public static options = languageNames
public async getInteraction(_humanChatInput: string, context: RecipeContext): Promise<Interaction | null> {
const selection = context.editor.getActiveTextEditorSelectionOrEntireFile()
if (!selection) {
await context.editor.showWarningMessage('No code selected. Please select some code and try again.')
return null
}
const toLanguage = await context.editor.showQuickPick(languageNames)
if (!toLanguage) {
// TODO: Show the warning within the Chat UI.
// editor.showWarningMessage('Must pick a language to translate to.')
return null
}
const truncatedSelectedText = truncateText(selection.selectedText, MAX_RECIPE_INPUT_TOKENS)
const promptMessage = `Translate the following code into ${toLanguage}\n\`\`\`\n${truncatedSelectedText}\n\`\`\``
const displayText = `Translate the following code into ${toLanguage}\n\`\`\`\n${selection.selectedText}\n\`\`\``
const markdownID = languageMarkdownID[toLanguage] || ''
const assistantResponsePrefix = `Here is the code translated to ${toLanguage}:\n\`\`\`${markdownID}\n`
return new Interaction(
{ speaker: 'human', text: promptMessage, displayText },
{
speaker: 'assistant',
prefix: assistantResponsePrefix,
text: assistantResponsePrefix,
},
Promise.resolve([]),
[]
)
}
}

View File

@ -1,281 +0,0 @@
import { ContextFile, ContextMessage, OldContextMessage } from '../../codebase-context/messages'
import { CHARS_PER_TOKEN, MAX_AVAILABLE_PROMPT_LENGTH } from '../../prompt/constants'
import { PromptMixin } from '../../prompt/prompt-mixin'
import { Message } from '../../sourcegraph-api'
import { Interaction, InteractionJSON } from './interaction'
import { ChatMessage } from './messages'
export interface TranscriptJSONScope {
includeInferredRepository: boolean
includeInferredFile: boolean
repositories: string[]
}
export interface TranscriptJSON {
// This is the timestamp of the first interaction.
id: string
interactions: InteractionJSON[]
lastInteractionTimestamp: string
scope?: TranscriptJSONScope
}
/**
* The "model" class that tracks the call and response of the Cody chat box.
* Any "controller" logic belongs outside of this class.
*/
export class Transcript {
public static fromJSON(json: TranscriptJSON): Transcript {
return new Transcript(
json.interactions.map(
({ humanMessage, assistantMessage, context, fullContext, usedContextFiles, timestamp }) => {
if (!fullContext) {
fullContext = context || []
}
return new Interaction(
humanMessage,
assistantMessage,
Promise.resolve(
fullContext.map(message => {
if (message.file) {
return message
}
const { fileName } = message as any as OldContextMessage
if (fileName) {
return { ...message, file: { fileName } }
}
return message
})
),
usedContextFiles || [],
timestamp || new Date().toISOString()
)
}
),
json.id
)
}
private interactions: Interaction[] = []
private internalID: string
constructor(interactions: Interaction[] = [], id?: string) {
this.interactions = interactions
this.internalID =
id ||
this.interactions.find(({ timestamp }) => !isNaN(new Date(timestamp) as any))?.timestamp ||
new Date().toISOString()
}
public get id(): string {
return this.internalID
}
public get isEmpty(): boolean {
return this.interactions.length === 0
}
public get lastInteractionTimestamp(): string {
for (let index = this.interactions.length - 1; index >= 0; index--) {
const { timestamp } = this.interactions[index]
if (!isNaN(new Date(timestamp) as any)) {
return timestamp
}
}
return this.internalID
}
public addInteraction(interaction: Interaction | null): void {
if (!interaction) {
return
}
this.interactions.push(interaction)
}
public getLastInteraction(): Interaction | null {
return this.interactions.length > 0 ? this.interactions[this.interactions.length - 1] : null
}
public removeLastInteraction(): void {
this.interactions.pop()
}
public removeInteractionsSince(id: string): void {
const index = this.interactions.findIndex(({ timestamp }) => timestamp === id)
if (index >= 0) {
this.interactions = this.interactions.slice(0, index)
}
}
public addAssistantResponse(text: string, displayText?: string): void {
this.getLastInteraction()?.setAssistantMessage({
speaker: 'assistant',
text,
displayText: displayText ?? text,
})
}
/**
* Adds a error div to the assistant response. If the assistant has collected
* some response before, we will add the error message after it.
*
* @param errorText The error TEXT to be displayed. Do not wrap it in HTML tags.
*/
public addErrorAsAssistantResponse(errorText: string): void {
const lastInteraction = this.getLastInteraction()
if (!lastInteraction) {
return
}
// If assistant has responsed before, we will add the error message after it
const lastAssistantMessage = lastInteraction.getAssistantMessage().displayText || ''
lastInteraction.setAssistantMessage({
speaker: 'assistant',
text: 'Failed to generate a response due to server error.',
displayText:
lastAssistantMessage + `<div class="cody-chat-error"><span>Request failed: </span>${errorText}</div>`,
})
}
private async getLastInteractionWithContextIndex(): Promise<number> {
for (let index = this.interactions.length - 1; index >= 0; index--) {
const hasContext = await this.interactions[index].hasContext()
if (hasContext) {
return index
}
}
return -1
}
public async getPromptForLastInteraction(
preamble: Message[] = [],
maxPromptLength: number = MAX_AVAILABLE_PROMPT_LENGTH
): Promise<{ prompt: Message[]; contextFiles: ContextFile[] }> {
if (this.interactions.length === 0) {
return { prompt: [], contextFiles: [] }
}
const lastInteractionWithContextIndex = await this.getLastInteractionWithContextIndex()
const messages: Message[] = []
for (let index = 0; index < this.interactions.length; index++) {
const interaction = this.interactions[index]
const humanMessage = PromptMixin.mixInto(interaction.getHumanMessage())
const assistantMessage = interaction.getAssistantMessage()
const contextMessages = await interaction.getFullContext()
if (index === lastInteractionWithContextIndex) {
messages.push(...contextMessages, humanMessage, assistantMessage)
} else {
messages.push(humanMessage, assistantMessage)
}
}
const preambleTokensUsage = preamble.reduce((acc, message) => acc + estimateTokensUsage(message), 0)
let truncatedMessages = truncatePrompt(messages, maxPromptLength - preambleTokensUsage)
// Return what context fits in the window
const contextFiles: ContextFile[] = []
for (const msg of truncatedMessages) {
const contextFile = (msg as ContextMessage).file
if (contextFile) {
contextFiles.push(contextFile)
}
}
// Filter out extraneous fields from ContextMessage instances
truncatedMessages = truncatedMessages.map(({ speaker, text }) => ({ speaker, text }))
return {
prompt: [...preamble, ...truncatedMessages],
contextFiles,
}
}
public setUsedContextFilesForLastInteraction(contextFiles: ContextFile[]): void {
if (this.interactions.length === 0) {
throw new Error('Cannot set context files for empty transcript')
}
this.interactions[this.interactions.length - 1].setUsedContext(contextFiles)
}
public toChat(): ChatMessage[] {
return this.interactions.flatMap(interaction => interaction.toChat())
}
public async toChatPromise(): Promise<ChatMessage[]> {
return [...(await Promise.all(this.interactions.map(interaction => interaction.toChatPromise())))].flat()
}
public async toJSON(scope?: TranscriptJSONScope): Promise<TranscriptJSON> {
const interactions = await Promise.all(this.interactions.map(interaction => interaction.toJSON()))
return {
id: this.id,
interactions,
lastInteractionTimestamp: this.lastInteractionTimestamp,
scope: scope
? {
repositories: scope.repositories,
includeInferredRepository: scope.includeInferredRepository,
includeInferredFile: scope.includeInferredFile,
}
: undefined,
}
}
public toJSONEmpty(scope?: TranscriptJSONScope): TranscriptJSON {
return {
id: this.id,
interactions: [],
lastInteractionTimestamp: this.lastInteractionTimestamp,
scope: scope
? {
repositories: scope.repositories,
includeInferredRepository: scope.includeInferredRepository,
includeInferredFile: scope.includeInferredFile,
}
: undefined,
}
}
public reset(): void {
this.interactions = []
this.internalID = new Date().toISOString()
}
}
/**
* Truncates the given prompt messages to fit within the available tokens budget.
* The truncation is done by removing the oldest pairs of messages first.
* No individual message will be truncated. We just remove pairs of messages if they exceed the available tokens budget.
*/
function truncatePrompt(messages: Message[], maxTokens: number): Message[] {
const newPromptMessages = []
let availablePromptTokensBudget = maxTokens
for (let i = messages.length - 1; i >= 1; i -= 2) {
const humanMessage = messages[i - 1]
const botMessage = messages[i]
const combinedTokensUsage = estimateTokensUsage(humanMessage) + estimateTokensUsage(botMessage)
// We stop adding pairs of messages once we exceed the available tokens budget.
if (combinedTokensUsage <= availablePromptTokensBudget) {
newPromptMessages.push(botMessage, humanMessage)
availablePromptTokensBudget -= combinedTokensUsage
} else {
break
}
}
// Reverse the prompt messages, so they appear in chat order (older -> newer).
return newPromptMessages.reverse()
}
/**
* Gives a rough estimate for the number of tokens used by the message.
*/
function estimateTokensUsage(message: Message): number {
return Math.round((message.text || '').length / CHARS_PER_TOKEN)
}

View File

@ -1,72 +0,0 @@
import { ContextMessage, ContextFile } from '../../codebase-context/messages'
import { ChatMessage, InteractionMessage } from './messages'
export interface InteractionJSON {
humanMessage: InteractionMessage
assistantMessage: InteractionMessage
fullContext: ContextMessage[]
usedContextFiles: ContextFile[]
timestamp: string
// DEPRECATED: Legacy field for backcompat, renamed to `fullContext`
context?: ContextMessage[]
}
export class Interaction {
constructor(
private readonly humanMessage: InteractionMessage,
private assistantMessage: InteractionMessage,
private fullContext: Promise<ContextMessage[]>,
private usedContextFiles: ContextFile[],
public readonly timestamp: string = new Date().toISOString()
) {}
public getAssistantMessage(): InteractionMessage {
return { ...this.assistantMessage }
}
public setAssistantMessage(assistantMessage: InteractionMessage): void {
this.assistantMessage = assistantMessage
}
public getHumanMessage(): InteractionMessage {
return { ...this.humanMessage }
}
public async getFullContext(): Promise<ContextMessage[]> {
const msgs = await this.fullContext
return msgs.map(msg => ({ ...msg }))
}
public async hasContext(): Promise<boolean> {
const contextMessages = await this.fullContext
return contextMessages.length > 0
}
public setUsedContext(usedContextFiles: ContextFile[]): void {
this.usedContextFiles = usedContextFiles
}
/**
* Converts the interaction to chat message pair: one message from a human, one from an assistant.
*/
public toChat(): ChatMessage[] {
return [this.humanMessage, { ...this.assistantMessage, contextFiles: this.usedContextFiles }]
}
public async toChatPromise(): Promise<ChatMessage[]> {
await this.fullContext
return this.toChat()
}
public async toJSON(): Promise<InteractionJSON> {
return {
humanMessage: this.humanMessage,
assistantMessage: this.assistantMessage,
fullContext: await this.fullContext,
usedContextFiles: this.usedContextFiles,
timestamp: this.timestamp,
}
}
}

View File

@ -1,34 +0,0 @@
import { ContextFile } from '../../codebase-context/messages'
import { Message } from '../../sourcegraph-api'
import { TranscriptJSON } from '.'
export interface ChatButton {
label: string
action: string
onClick: (action: string) => void
}
export interface ChatMessage extends Message {
displayText?: string
contextFiles?: ContextFile[]
buttons?: ChatButton[]
}
export interface InteractionMessage extends Message {
displayText?: string
prefix?: string
}
export interface UserLocalHistory {
chat: ChatHistory
input: string[]
}
export interface ChatHistory {
[chatID: string]: TranscriptJSON
}
export interface OldChatHistory {
[chatID: string]: ChatMessage[]
}

View File

@ -1,367 +0,0 @@
import assert from 'assert'
import { CodebaseContext } from '../../codebase-context'
import { MAX_AVAILABLE_PROMPT_LENGTH } from '../../prompt/constants'
import { Message } from '../../sourcegraph-api'
import {
defaultKeywordContextFetcher,
MockEditor,
MockEmbeddingsClient,
MockIntentDetector,
newRecipeContext,
} from '../../test/mocks'
import { ChatQuestion } from '../recipes/chat-question'
import { Transcript } from '.'
async function generateLongTranscript(): Promise<{ transcript: Transcript; tokensPerInteraction: number }> {
// Add enough interactions to exceed the maximum prompt length.
const numInteractions = 100
const transcript = new Transcript()
for (let i = 0; i < numInteractions; i++) {
const interaction = await new ChatQuestion(() => {}).getInteraction(
'ABCD'.repeat(256), // 256 tokens, 1 token is ~4 chars
newRecipeContext()
)
transcript.addInteraction(interaction)
const assistantResponse = 'EFGH'.repeat(256) // 256 tokens
transcript.addAssistantResponse(assistantResponse)
}
return {
transcript,
tokensPerInteraction: 512, // 256 for question + 256 for response.
}
}
describe('Transcript', () => {
it('generates an empty prompt with no interactions', async () => {
const transcript = new Transcript()
const { prompt } = await transcript.getPromptForLastInteraction()
assert.deepStrictEqual(prompt, [])
})
it('generates a prompt without context for a chat question', async () => {
const interaction = await new ChatQuestion(() => {}).getInteraction(
'how do access tokens work in sourcegraph',
newRecipeContext()
)
const transcript = new Transcript()
transcript.addInteraction(interaction)
const { prompt } = await transcript.getPromptForLastInteraction()
const expectedPrompt = [
{ speaker: 'human', text: 'how do access tokens work in sourcegraph' },
{ speaker: 'assistant', text: undefined },
]
assert.deepStrictEqual(prompt, expectedPrompt)
})
it('generates a prompt with context for a chat question', async () => {
const embeddings = new MockEmbeddingsClient({
search: async () =>
Promise.resolve({
codeResults: [{ fileName: 'src/main.go', startLine: 0, endLine: 1, content: 'package main' }],
textResults: [{ fileName: 'docs/README.md', startLine: 0, endLine: 1, content: '# Main' }],
}),
})
const interaction = await new ChatQuestion(() => {}).getInteraction(
'how do access tokens work in sourcegraph',
newRecipeContext({
intentDetector: new MockIntentDetector({
isCodebaseContextRequired: async () => Promise.resolve(true),
}),
codebaseContext: new CodebaseContext(
{ useContext: 'embeddings', serverEndpoint: 'https://example.com' },
'dummy-codebase',
embeddings,
defaultKeywordContextFetcher,
null
),
})
)
const transcript = new Transcript()
transcript.addInteraction(interaction)
const { prompt } = await transcript.getPromptForLastInteraction()
const expectedPrompt = [
{ speaker: 'human', text: 'Use the following text from file `docs/README.md`:\n# Main' },
{ speaker: 'assistant', text: 'Ok.' },
{ speaker: 'human', text: 'Use following code snippet from file `src/main.go`:\n```go\npackage main\n```' },
{ speaker: 'assistant', text: 'Ok.' },
{ speaker: 'human', text: 'how do access tokens work in sourcegraph' },
{ speaker: 'assistant', text: undefined },
]
assert.deepStrictEqual(prompt, expectedPrompt)
})
it('generates a prompt with context for a chat question for first interaction', async () => {
const embeddings = new MockEmbeddingsClient({
search: async () =>
Promise.resolve({
codeResults: [{ fileName: 'src/main.go', startLine: 0, endLine: 1, content: 'package main' }],
textResults: [{ fileName: 'docs/README.md', startLine: 0, endLine: 1, content: '# Main' }],
}),
})
const interaction = await new ChatQuestion(() => {}).getInteraction(
'how do access tokens work in sourcegraph',
newRecipeContext({
codebaseContext: new CodebaseContext(
{ useContext: 'embeddings', serverEndpoint: 'https://example.com' },
'dummy-codebase',
embeddings,
defaultKeywordContextFetcher,
null
),
firstInteraction: true,
})
)
const transcript = new Transcript()
transcript.addInteraction(interaction)
const { prompt } = await transcript.getPromptForLastInteraction()
const expectedPrompt = [
{ speaker: 'human', text: 'Use the following text from file `docs/README.md`:\n# Main' },
{ speaker: 'assistant', text: 'Ok.' },
{ speaker: 'human', text: 'Use following code snippet from file `src/main.go`:\n```go\npackage main\n```' },
{ speaker: 'assistant', text: 'Ok.' },
{ speaker: 'human', text: 'how do access tokens work in sourcegraph' },
{ speaker: 'assistant', text: undefined },
]
assert.deepStrictEqual(prompt, expectedPrompt)
})
it('generates a prompt for multiple chat questions, includes context for last question only', async () => {
const embeddings = new MockEmbeddingsClient({
search: async () =>
Promise.resolve({
codeResults: [{ fileName: 'src/main.go', startLine: 0, endLine: 1, content: 'package main' }],
textResults: [{ fileName: 'docs/README.md', startLine: 0, endLine: 1, content: '# Main' }],
}),
})
const intentDetector = new MockIntentDetector({ isCodebaseContextRequired: async () => Promise.resolve(true) })
const codebaseContext = new CodebaseContext(
{ useContext: 'embeddings', serverEndpoint: 'https://example.com' },
'dummy-codebase',
embeddings,
defaultKeywordContextFetcher,
null
)
const chatQuestionRecipe = new ChatQuestion(() => {})
const transcript = new Transcript()
const firstInteraction = await chatQuestionRecipe.getInteraction(
'how do access tokens work in sourcegraph',
newRecipeContext({
intentDetector,
codebaseContext,
})
)
transcript.addInteraction(firstInteraction)
const assistantResponse = 'By setting the Authorization header.'
transcript.addAssistantResponse(assistantResponse)
const secondInteraction = await chatQuestionRecipe.getInteraction(
'how to create a batch change',
newRecipeContext({
intentDetector,
codebaseContext,
})
)
transcript.addInteraction(secondInteraction)
const { prompt } = await transcript.getPromptForLastInteraction()
const expectedPrompt = [
{ speaker: 'human', text: 'how do access tokens work in sourcegraph' },
{ speaker: 'assistant', text: assistantResponse },
{ speaker: 'human', text: 'Use the following text from file `docs/README.md`:\n# Main' },
{ speaker: 'assistant', text: 'Ok.' },
{ speaker: 'human', text: 'Use following code snippet from file `src/main.go`:\n```go\npackage main\n```' },
{ speaker: 'assistant', text: 'Ok.' },
{ speaker: 'human', text: 'how to create a batch change' },
{ speaker: 'assistant', text: undefined },
]
assert.deepStrictEqual(prompt, expectedPrompt)
})
it('should limit prompts to a maximum number of tokens', async () => {
const { transcript, tokensPerInteraction } = await generateLongTranscript()
const numExpectedInteractions = Math.floor(MAX_AVAILABLE_PROMPT_LENGTH / tokensPerInteraction)
const numExpectedMessages = numExpectedInteractions * 2 // Each interaction has two messages.
const { prompt } = await transcript.getPromptForLastInteraction()
assert.deepStrictEqual(prompt.length, numExpectedMessages)
})
it('should limit prompts to a maximum number of tokens with preamble always included', async () => {
const { transcript, tokensPerInteraction } = await generateLongTranscript()
const preamble: Message[] = [
{ speaker: 'human', text: 'PREA'.repeat(tokensPerInteraction / 2) },
{ speaker: 'assistant', text: 'MBLE'.repeat(tokensPerInteraction / 2) },
{ speaker: 'human', text: 'PREA'.repeat(tokensPerInteraction / 2) },
{ speaker: 'assistant', text: 'MBLE'.repeat(tokensPerInteraction / 2) },
]
const numExpectedInteractions = Math.floor(MAX_AVAILABLE_PROMPT_LENGTH / tokensPerInteraction)
const numExpectedMessages = numExpectedInteractions * 2 // Each interaction has two messages.
const { prompt } = await transcript.getPromptForLastInteraction(preamble)
assert.deepStrictEqual(prompt.length, numExpectedMessages)
assert.deepStrictEqual(preamble, prompt.slice(0, 4))
})
it('includes currently visible content from the editor', async () => {
const editor = new MockEditor({
getActiveTextEditorVisibleContent: () => ({ fileName: 'internal/lib.go', content: 'package lib' }),
})
const embeddings = new MockEmbeddingsClient({
search: async () =>
Promise.resolve({
codeResults: [{ fileName: 'src/main.go', startLine: 0, endLine: 1, content: 'package main' }],
textResults: [{ fileName: 'docs/README.md', startLine: 0, endLine: 1, content: '# Main' }],
}),
})
const intentDetector = new MockIntentDetector({ isCodebaseContextRequired: async () => Promise.resolve(true) })
const codebaseContext = new CodebaseContext(
{ useContext: 'embeddings', serverEndpoint: 'https://example.com' },
'dummy-codebase',
embeddings,
defaultKeywordContextFetcher,
null
)
const chatQuestionRecipe = new ChatQuestion(() => {})
const transcript = new Transcript()
const interaction = await chatQuestionRecipe.getInteraction(
'how do access tokens work in sourcegraph',
newRecipeContext({
editor,
intentDetector,
codebaseContext,
})
)
transcript.addInteraction(interaction)
const { prompt } = await transcript.getPromptForLastInteraction()
const expectedPrompt = [
{ speaker: 'human', text: 'Use the following text from file `docs/README.md`:\n# Main' },
{ speaker: 'assistant', text: 'Ok.' },
{
speaker: 'human',
text: 'Use following code snippet from file `src/main.go`:\n```go\npackage main\n```',
},
{ speaker: 'assistant', text: 'Ok.' },
{
speaker: 'human',
text: 'I have the `internal/lib.go` file opened in my editor. Use following code snippet from file `internal/lib.go`:\n```go\npackage lib\n```',
},
{
speaker: 'assistant',
text: 'Ok.',
},
{ speaker: 'human', text: 'how do access tokens work in sourcegraph' },
{ speaker: 'assistant', text: undefined },
]
assert.deepStrictEqual(prompt, expectedPrompt)
})
it('does not include currently visible content from the editor if no codebase context is required', async () => {
const editor = new MockEditor({
getActiveTextEditorVisibleContent: () => ({ fileName: 'internal/lib.go', content: 'package lib' }),
})
const intentDetector = new MockIntentDetector({ isCodebaseContextRequired: async () => Promise.resolve(false) })
const transcript = new Transcript()
const interaction = await new ChatQuestion(() => {}).getInteraction(
'how do access tokens work in sourcegraph',
newRecipeContext({
editor,
intentDetector,
})
)
transcript.addInteraction(interaction)
const { prompt } = await transcript.getPromptForLastInteraction()
const expectedPrompt = [
{ speaker: 'human', text: 'how do access tokens work in sourcegraph' },
{ speaker: 'assistant', text: undefined },
]
assert.deepStrictEqual(prompt, expectedPrompt)
})
it('adds context for last interaction with non-empty context', async () => {
const embeddings = new MockEmbeddingsClient({
search: async () =>
Promise.resolve({
codeResults: [{ fileName: 'src/main.go', startLine: 0, endLine: 1, content: 'package main' }],
textResults: [{ fileName: 'docs/README.md', startLine: 0, endLine: 1, content: '# Main' }],
}),
})
const intentDetector = new MockIntentDetector({ isCodebaseContextRequired: async () => Promise.resolve(true) })
const codebaseContext = new CodebaseContext(
{ useContext: 'embeddings', serverEndpoint: 'https://example.com' },
'dummy-codebase',
embeddings,
defaultKeywordContextFetcher,
null
)
const chatQuestionRecipe = new ChatQuestion(() => {})
const transcript = new Transcript()
const firstInteraction = await chatQuestionRecipe.getInteraction(
'how do batch changes work in sourcegraph',
newRecipeContext({
intentDetector,
codebaseContext,
})
)
transcript.addInteraction(firstInteraction)
transcript.addAssistantResponse('Smartly.')
const secondInteraction = await chatQuestionRecipe.getInteraction(
'how do access tokens work in sourcegraph',
newRecipeContext({
intentDetector,
codebaseContext,
})
)
transcript.addInteraction(secondInteraction)
transcript.addAssistantResponse('By setting the Authorization header.')
const thirdInteraction = await chatQuestionRecipe.getInteraction(
'how do to delete them',
newRecipeContext({
// Here, we use the default intent detector to disable context fetching.
codebaseContext,
})
)
transcript.addInteraction(thirdInteraction)
const { prompt } = await transcript.getPromptForLastInteraction()
const expectedPrompt = [
{ speaker: 'human', text: 'how do batch changes work in sourcegraph' },
{ speaker: 'assistant', text: 'Smartly.' },
{ speaker: 'human', text: 'Use the following text from file `docs/README.md`:\n# Main' },
{ speaker: 'assistant', text: 'Ok.' },
{ speaker: 'human', text: 'Use following code snippet from file `src/main.go`:\n```go\npackage main\n```' },
{ speaker: 'assistant', text: 'Ok.' },
{ speaker: 'human', text: 'how do access tokens work in sourcegraph' },
{ speaker: 'assistant', text: 'By setting the Authorization header.' },
{ speaker: 'human', text: 'how do to delete them' },
{ speaker: 'assistant', text: undefined },
]
assert.deepStrictEqual(prompt, expectedPrompt)
})
})

View File

@ -1,85 +0,0 @@
interface Typewriter {
/**
* Push new text to the typewriter to emit over time.
* Text should be incremental but still include the previous text. E.g. "Hel" -> "Hello" -> "Hello, world!"
*/
write: (incomingText: string) => void
/** Stop the typewriter, immediately emit any remaining text */
stop: () => void
}
interface CreateTypewriterParams {
/**
* Callback to call every time a new character is emitted
*/
emit: (text: string) => void
}
// Maximum/minimum amount of time to wait between character chunks
const MAX_DELAY_MS = 200
const MIN_DELAY_MS = 5
const MIN_CHAR_CHUNK_SIZE = 1
export const createTypewriter = ({ emit }: CreateTypewriterParams): Typewriter => {
let fullText = ''
let processedText = ''
let interval: ReturnType<typeof setInterval> | undefined
return {
write: (updatedText: string) => {
/** Keep text in sync with the latest update, so consumers can choose to `stop` early. */
fullText = updatedText
/**
* If we already have an interval running, stop it to avoid stacking
* multiple intervals on top of each other.
*/
if (interval) {
clearInterval(interval)
interval = undefined
}
/**
* Calculate the delay from the remaining characters we know we have left to process
* This ensures that the typewriter effect will speed up if we start to fall behind.
*/
const calculatedDelay = MAX_DELAY_MS / (updatedText.length - processedText.length)
/**
* We limit how small our delay can be to ensure we always have some form of typing effect.
*/
const dynamicDelay = Math.max(calculatedDelay, MIN_DELAY_MS)
/**
* To ensure we still can keep up with the updated text, we instead increase the character chunk size.
* We calculate this by working out how many characters we would need to maintain the same minimum delay.
* This ensures we always keep up with the text, no matter how large the incoming chunks are.
*
* Note: For particularly large chunks, this will result in a character chunk size that is far bigger than you would expect for a typing effect.
* This is an accepted trade-off in order to ensure we stay in sync with the incoming text.
*/
const charChunkSize =
calculatedDelay < MIN_DELAY_MS ? Math.round(MIN_DELAY_MS / calculatedDelay) : MIN_CHAR_CHUNK_SIZE
interval = setInterval(() => {
processedText += updatedText.slice(processedText.length, processedText.length + charChunkSize)
/** Clean up when we have reached the end of the known remaining text. */
if (processedText.length === updatedText.length && interval) {
clearInterval(interval)
interval = undefined
}
return emit(processedText)
}, dynamicDelay)
},
stop: () => {
if (interval) {
clearInterval(interval)
interval = undefined
}
return emit(fullText)
},
}
}

View File

@ -1,455 +0,0 @@
import { useState, useCallback, useMemo } from 'react'
import { isErrorLike } from '@sourcegraph/common'
import { CodebaseContext } from '../codebase-context'
import { ConfigurationWithAccessToken } from '../configuration'
import { Editor, NoopEditor } from '../editor'
import { PrefilledOptions, withPreselectedOptions } from '../editor/withPreselectedOptions'
import { SourcegraphIntentDetectorClient } from '../intent-detector/client'
import { SourcegraphBrowserCompletionsClient } from '../sourcegraph-api/completions/browserClient'
import { SourcegraphGraphQLAPIClient } from '../sourcegraph-api/graphql'
import { UnifiedContextFetcherClient } from '../unified-context/client'
import { isError } from '../utils'
import { BotResponseMultiplexer } from './bot-response-multiplexer'
import { ChatClient } from './chat'
import { getMultiRepoPreamble } from './preamble'
import { getRecipe } from './recipes/browser-recipes'
import { RecipeID } from './recipes/recipe'
import { Transcript } from './transcript'
import { ChatMessage } from './transcript/messages'
import { reformatBotMessage } from './viewHelpers'
export type CodyClientConfig = Pick<
ConfigurationWithAccessToken,
'serverEndpoint' | 'useContext' | 'accessToken' | 'customHeaders'
> & { debugEnable: boolean; needsEmailVerification: boolean }
export interface CodyClientScope {
includeInferredRepository: boolean
includeInferredFile: boolean
repositories: string[]
editor: Editor
}
export interface CodyClientScopePartial {
repositories?: string[]
editor?: Editor
}
export type CodyClientEvent = 'submit' | 'initializedNewChat' | 'error'
export interface CodyClient {
readonly transcript: Transcript | null
readonly chatMessages: ChatMessage[]
readonly messageInProgress: ChatMessage | null
readonly isMessageInProgress: boolean
readonly scope: CodyClientScope
readonly config: CodyClientConfig
setTranscript: (transcript: Transcript) => Promise<void>
setScope: (scope: CodyClientScope) => void
setConfig: (config: CodyClientConfig) => void
submitMessage: (humanChatInput: string, scope?: CodyClientScope) => Promise<Transcript | null>
editMessage: (
humanChatInput: string,
messageId?: string | undefined,
scope?: CodyClientScope
) => Promise<Transcript | null>
initializeNewChat: () => Transcript | null
executeRecipe: (
recipeId: RecipeID,
options?: {
prefilledOptions?: PrefilledOptions
humanChatInput?: string
scope?: {
editor?: Editor
}
}
) => Promise<Transcript | null>
setEditorScope: (editor: Editor) => void
toggleIncludeInferredRepository: () => void
toggleIncludeInferredFile: () => void
abortMessageInProgress: () => void
fetchRepositoryNames: (count: number) => Promise<string[]>
}
interface CodyClientProps {
config: CodyClientConfig
scope?: CodyClientScope
initialTranscript?: Transcript | null
onEvent?: (event: CodyClientEvent) => void
}
export const useClient = ({
config: initialConfig,
initialTranscript = null,
scope: initialScope = {
includeInferredRepository: true,
includeInferredFile: true,
repositories: [],
editor: new NoopEditor(),
},
onEvent,
}: CodyClientProps): CodyClient => {
const [transcript, setTranscriptState] = useState<Transcript | null>(initialTranscript)
const [chatMessages, setChatMessagesState] = useState<ChatMessage[]>([])
const [isMessageInProgress, setIsMessageInProgressState] = useState<boolean>(false)
const [abortMessageInProgressInternal, setAbortMessageInProgress] = useState<() => void>(() => () => undefined)
const messageInProgress: ChatMessage | null = useMemo(() => {
if (isMessageInProgress) {
const lastMessage = chatMessages[chatMessages.length - 1]
if (lastMessage?.speaker === 'assistant') {
return lastMessage
}
}
return null
}, [chatMessages, isMessageInProgress])
const abortMessageInProgress = useCallback(() => {
abortMessageInProgressInternal()
transcript
?.toChatPromise()
.then(messages => {
setChatMessagesState(messages)
setIsMessageInProgressState(false)
})
.catch(error => console.error(`aborting in progress message failed: ${error}`))
}, [abortMessageInProgressInternal, transcript, setChatMessagesState, setIsMessageInProgressState])
const setTranscript = useCallback(async (transcript: Transcript): Promise<void> => {
const messages = await transcript.toChatPromise()
setIsMessageInProgressState(false)
setTranscriptState(transcript)
setChatMessagesState(messages)
}, [])
const [config, setConfig] = useState<CodyClientConfig>(initialConfig)
const { graphqlClient, chatClient, intentDetector } = useMemo(() => {
const completionsClient = new SourcegraphBrowserCompletionsClient(config)
const chatClient = new ChatClient(completionsClient)
const graphqlClient = new SourcegraphGraphQLAPIClient(config)
const intentDetector = new SourcegraphIntentDetectorClient(graphqlClient)
return { graphqlClient, chatClient, intentDetector }
}, [config])
const [scope, setScopeState] = useState<CodyClientScope>(initialScope)
const setScope = useCallback((scope: CodyClientScope) => setScopeState(scope), [setScopeState])
const setEditorScope = useCallback(
(editor: Editor) => {
const newRepoName = editor.getActiveTextEditor()?.repoName
return setScopeState(scope => {
const oldRepoName = scope.editor.getActiveTextEditor()?.repoName
const resetInferredScope = newRepoName !== oldRepoName
return {
...scope,
editor,
includeInferredRepository: resetInferredScope ? true : scope.includeInferredRepository,
includeInferredFile: resetInferredScope ? true : scope.includeInferredFile,
}
})
},
[setScopeState]
)
const toggleIncludeInferredRepository = useCallback(
() =>
setScopeState(scope => ({
...scope,
includeInferredRepository: !scope.includeInferredRepository,
includeInferredFile: !scope.includeInferredRepository,
})),
[setScopeState]
)
const toggleIncludeInferredFile = useCallback(
() => setScopeState(scope => ({ ...scope, includeInferredFile: !scope.includeInferredFile })),
[setScopeState]
)
const activeEditor = useMemo(() => scope.editor.getActiveTextEditor(), [scope.editor])
const codebases: string[] = useMemo(() => {
const repos = [...scope.repositories]
if (scope.includeInferredRepository && activeEditor?.repoName) {
repos.push(activeEditor.repoName)
}
return repos
}, [scope, activeEditor])
const codebaseIds: Promise<string[]> = useMemo(async () => {
if (!codebases.length) {
return []
}
const results = await graphqlClient.getRepoIds(codebases)
if (isError(results)) {
console.error(
`Cody could not access the repositories on your Sourcegraph instance. Details: ${results.message}`
)
return []
}
return results.map(({ id }) => id)
}, [codebases, graphqlClient])
const fetchRepositoryNames = useCallback(
async (count: number): Promise<string[]> =>
graphqlClient
.getRepoNames(count)
.then(repositories => (isErrorLike(repositories) ? [] : repositories))
.catch(error => {
console.error(
`Cody could not fetch the list of repositories on your Sourcegraph instance. Details: ${error}`
)
return []
}),
[graphqlClient]
)
const initializeNewChat = useCallback((): Transcript | null => {
if (config.needsEmailVerification) {
return transcript
}
const newTranscript = new Transcript()
setIsMessageInProgressState(false)
setTranscriptState(newTranscript)
setChatMessagesState(newTranscript.toChat())
setScopeState(scope => ({
includeInferredRepository: true,
includeInferredFile: true,
repositories: [],
editor: scope.editor,
}))
onEvent?.('initializedNewChat')
return newTranscript
}, [onEvent, config.needsEmailVerification, transcript])
const executeRecipe = useCallback(
async (
recipeId: RecipeID,
options?: {
prefilledOptions?: PrefilledOptions
humanChatInput?: string
scope?: CodyClientScopePartial
}
): Promise<Transcript | null> => {
const recipe = getRecipe(recipeId)
if (!recipe || transcript === null || isMessageInProgress || config.needsEmailVerification) {
return Promise.resolve(null)
}
const repoNames = [...codebases]
const repoIds = [...(await codebaseIds)]
const editor = options?.scope?.editor || (scope.includeInferredFile ? scope.editor : new NoopEditor())
const activeEditor = editor.getActiveTextEditor()
if (activeEditor?.repoName && !repoNames.includes(activeEditor.repoName)) {
// NOTE(naman): We allow users to disable automatic inferrence of current file & repo
// using `includeInferredFile` and `includeInferredRepository` options. But for editor recipes
// like "Explain code at high level", we need to pass the current repo & file context.
// Here we are passing the current repo & file context based on `options.scope.editor`
// if present.
const additionalRepoId = await graphqlClient.getRepoId(activeEditor.repoName)
if (isError(additionalRepoId)) {
console.error(
`Cody could not access the ${activeEditor.repoName} repository on your Sourcegraph instance. Details: ${additionalRepoId.message}`
)
} else {
repoIds.push(additionalRepoId)
repoNames.push(activeEditor.repoName)
}
}
const unifiedContextFetcherClient = new UnifiedContextFetcherClient(graphqlClient, repoIds)
const codebaseContext = new CodebaseContext(
config,
undefined,
null,
null,
null,
unifiedContextFetcherClient
)
const { humanChatInput = '', prefilledOptions } = options ?? {}
// TODO(naman): save scope with each interaction
const interaction = await recipe.getInteraction(humanChatInput, {
editor: prefilledOptions ? withPreselectedOptions(editor, prefilledOptions) : editor,
intentDetector,
codebaseContext,
responseMultiplexer: new BotResponseMultiplexer(),
firstInteraction: transcript.isEmpty,
})
if (!interaction) {
return Promise.resolve(null)
}
transcript.addInteraction(interaction)
setChatMessagesState(transcript.toChat())
setIsMessageInProgressState(true)
onEvent?.('submit')
const { prompt, contextFiles } = await transcript.getPromptForLastInteraction(
getMultiRepoPreamble(repoNames)
)
transcript.setUsedContextFilesForLastInteraction(contextFiles)
const responsePrefix = interaction.getAssistantMessage().prefix ?? ''
let rawText = ''
const updatedTranscript = await new Promise<Transcript | null>(resolve => {
const abort = chatClient.chat(prompt, {
onChange(_rawText) {
rawText = _rawText
const text = reformatBotMessage(rawText, responsePrefix)
transcript.addAssistantResponse(text)
setChatMessagesState(transcript.toChat())
},
onComplete() {
const text = reformatBotMessage(rawText, responsePrefix)
transcript.addAssistantResponse(text)
transcript
.toChatPromise()
.then(messages => {
setChatMessagesState(messages)
setIsMessageInProgressState(false)
})
.catch(() => null)
resolve(transcript)
},
onError(error) {
// Display error message as assistant response
transcript.addErrorAsAssistantResponse(error)
console.error(`Completion request failed: ${error}`)
transcript
.toChatPromise()
.then(messages => {
setChatMessagesState(messages)
setIsMessageInProgressState(false)
})
.catch(() => null)
onEvent?.('error')
resolve(transcript)
},
})
setAbortMessageInProgress(() => () => {
abort()
resolve(transcript)
})
})
setAbortMessageInProgress(() => () => undefined)
return updatedTranscript
},
[
config,
scope,
codebases,
codebaseIds,
graphqlClient,
transcript,
intentDetector,
chatClient,
isMessageInProgress,
onEvent,
setAbortMessageInProgress,
]
)
const submitMessage = useCallback(
async (humanChatInput: string, scope?: CodyClientScope): Promise<Transcript | null> =>
executeRecipe('chat-question', { humanChatInput, scope }),
[executeRecipe]
)
// TODO(naman): load message scope from the interaction
const editMessage = useCallback(
async (
humanChatInput: string,
messageId?: string | undefined,
scope?: CodyClientScope
): Promise<Transcript | null> => {
if (!transcript) {
return transcript
}
const timestamp = messageId || transcript.getLastInteraction()?.timestamp || new Date().toISOString()
transcript.removeInteractionsSince(timestamp)
setChatMessagesState(transcript.toChat())
return submitMessage(humanChatInput, scope)
},
[transcript, submitMessage]
)
const returningChatMessages = useMemo(
() => (messageInProgress ? chatMessages.slice(0, -1) : chatMessages),
[chatMessages, messageInProgress]
)
return useMemo(
() => ({
transcript,
chatMessages: returningChatMessages,
isMessageInProgress,
messageInProgress,
setTranscript,
scope,
setScope,
setEditorScope,
config,
setConfig,
executeRecipe,
submitMessage,
initializeNewChat,
editMessage,
toggleIncludeInferredRepository,
toggleIncludeInferredFile,
abortMessageInProgress,
fetchRepositoryNames,
}),
[
transcript,
returningChatMessages,
isMessageInProgress,
messageInProgress,
setTranscript,
scope,
setScope,
setEditorScope,
config,
setConfig,
executeRecipe,
submitMessage,
initializeNewChat,
editMessage,
toggleIncludeInferredRepository,
toggleIncludeInferredFile,
abortMessageInProgress,
fetchRepositoryNames,
]
)
}

View File

@ -1,22 +0,0 @@
// If the bot message ends with some prefix of the `Human:` stop
// sequence, trim if from the end.
const STOP_SEQUENCE_REGEXP = /(H|Hu|Hum|Huma|Human|Human:)$/
export function reformatBotMessage(text: string, prefix: string): string {
let reformattedMessage = prefix + text.trimEnd()
const stopSequenceMatch = reformattedMessage.match(STOP_SEQUENCE_REGEXP)
if (stopSequenceMatch) {
reformattedMessage = reformattedMessage.slice(0, stopSequenceMatch.index)
}
// TODO: Detect if bot sent unformatted code without a markdown block.
return fixOpenMarkdownCodeBlock(reformattedMessage)
}
function fixOpenMarkdownCodeBlock(text: string): string {
const occurrences = text.split('```').length - 1
if (occurrences % 2 === 1) {
return text + '\n```'
}
return text
}

View File

@ -1,267 +0,0 @@
import { Configuration } from '../configuration'
import { EmbeddingsSearch } from '../embeddings'
import { FilenameContextFetcher, KeywordContextFetcher, ContextResult } from '../local-context'
import { isMarkdownFile, populateCodeContextTemplate, populateMarkdownContextTemplate } from '../prompt/templates'
import { Message } from '../sourcegraph-api'
import { EmbeddingsSearchResult } from '../sourcegraph-api/graphql/client'
import { UnifiedContextFetcher } from '../unified-context'
import { isError } from '../utils'
import { ContextMessage, ContextFile, getContextMessageWithResponse, ContextFileSource } from './messages'
export interface ContextSearchOptions {
numCodeResults: number
numTextResults: number
}
export class CodebaseContext {
private embeddingResultsError = ''
constructor(
private config: Pick<Configuration, 'useContext' | 'serverEndpoint'>,
private codebase: string | undefined,
private embeddings: EmbeddingsSearch | null,
private keywords: KeywordContextFetcher | null,
private filenames: FilenameContextFetcher | null,
private unifiedContextFetcher?: UnifiedContextFetcher | null,
private rerank?: (query: string, results: ContextResult[]) => Promise<ContextResult[]>
) {}
public getCodebase(): string | undefined {
return this.codebase
}
public onConfigurationChange(newConfig: typeof this.config): void {
this.config = newConfig
}
private mergeContextResults(keywordResults: ContextResult[], filenameResults: ContextResult[]): ContextResult[] {
// Just take the single most relevant filename suggestion for now. Otherwise, because our reranking relies solely
// on filename, the filename results would dominate the keyword results.
const merged = filenameResults.slice(-1).concat(keywordResults)
const uniques = new Map<string, ContextResult>()
for (const result of merged) {
uniques.set(result.fileName, result)
}
return Array.from(uniques.values())
}
/**
* Returns list of context messages for a given query, sorted in *reverse* order of importance (that is,
* the most important context message appears *last*)
*/
public async getContextMessages(query: string, options: ContextSearchOptions): Promise<ContextMessage[]> {
switch (this.config.useContext) {
case 'unified':
return this.getUnifiedContextMessages(query, options)
case 'keyword':
return this.getLocalContextMessages(query, options)
case 'none':
return []
default:
return this.embeddings
? this.getEmbeddingsContextMessages(query, options)
: this.getLocalContextMessages(query, options)
}
}
public checkEmbeddingsConnection(): boolean {
return !!this.embeddings
}
public getEmbeddingSearchErrors(): string {
return this.embeddingResultsError.trim()
}
public async getSearchResults(
query: string,
options: ContextSearchOptions
): Promise<{ results: ContextResult[] | EmbeddingsSearchResult[]; endpoint: string }> {
if (this.embeddings && this.config.useContext !== 'keyword') {
return {
results: await this.getEmbeddingSearchResults(query, options),
endpoint: this.config.serverEndpoint,
}
}
return {
results:
(await this.keywords?.getSearchContext(query, options.numCodeResults + options.numTextResults)) || [],
endpoint: this.config.serverEndpoint,
}
}
// We split the context into multiple messages instead of joining them into a single giant message.
// We can gradually eliminate them from the prompt, instead of losing them all at once with a single large messeage
// when we run out of tokens.
private async getEmbeddingsContextMessages(
query: string,
options: ContextSearchOptions
): Promise<ContextMessage[]> {
const combinedResults = await this.getEmbeddingSearchResults(query, options)
return groupResultsByFile(combinedResults)
.reverse() // Reverse results so that they appear in ascending order of importance (least -> most).
.flatMap(groupedResults => this.makeContextMessageWithResponse(groupedResults))
.map(message => contextMessageWithSource(message, 'embeddings'))
}
private async getEmbeddingSearchResults(
query: string,
options: ContextSearchOptions
): Promise<EmbeddingsSearchResult[]> {
if (!this.embeddings) {
return []
}
const embeddingsSearchResults = await this.embeddings.search(
query,
options.numCodeResults,
options.numTextResults
)
if (isError(embeddingsSearchResults)) {
console.error('Error retrieving embeddings:', embeddingsSearchResults)
this.embeddingResultsError = `Error retrieving embeddings: ${embeddingsSearchResults}`
return []
}
this.embeddingResultsError = ''
return embeddingsSearchResults.codeResults.concat(embeddingsSearchResults.textResults)
}
private makeContextMessageWithResponse(groupedResults: { file: ContextFile; results: string[] }): ContextMessage[] {
const contextTemplateFn = isMarkdownFile(groupedResults.file.fileName)
? populateMarkdownContextTemplate
: populateCodeContextTemplate
return groupedResults.results.flatMap<Message>(text =>
getContextMessageWithResponse(
contextTemplateFn(text, groupedResults.file.fileName, groupedResults.file.repoName),
groupedResults.file
)
)
}
private async getUnifiedContextMessages(query: string, options: ContextSearchOptions): Promise<ContextMessage[]> {
if (!this.unifiedContextFetcher) {
return []
}
const results = await this.unifiedContextFetcher.getContext(
query,
options.numCodeResults,
options.numTextResults
)
if (isError(results)) {
console.error('Error retrieving context:', results)
return []
}
return results.flatMap(result => {
if (result?.type === 'FileChunkContext') {
const { content, filePath, repoName, revision } = result
const messageText = isMarkdownFile(filePath)
? populateMarkdownContextTemplate(content, filePath, repoName)
: populateCodeContextTemplate(content, filePath, repoName)
return getContextMessageWithResponse(messageText, { fileName: filePath, repoName, revision })
}
return []
})
}
private async getLocalContextMessages(query: string, options: ContextSearchOptions): Promise<ContextMessage[]> {
try {
const keywordResultsPromise = this.getKeywordSearchResults(query, options)
const filenameResultsPromise = this.getFilenameSearchResults(query, options)
const [keywordResults, filenameResults] = await Promise.all([keywordResultsPromise, filenameResultsPromise])
const combinedResults = this.mergeContextResults(keywordResults, filenameResults)
const rerankedResults = await (this.rerank ? this.rerank(query, combinedResults) : combinedResults)
const messages = resultsToMessages(rerankedResults)
this.embeddingResultsError = ''
return messages
} catch (error) {
console.error('Error retrieving local context:', error)
this.embeddingResultsError = `Error retrieving local context: ${error}`
return []
}
}
private async getKeywordSearchResults(query: string, options: ContextSearchOptions): Promise<ContextResult[]> {
if (!this.keywords) {
return []
}
const results = await this.keywords.getContext(query, options.numCodeResults + options.numTextResults)
return results
}
private async getFilenameSearchResults(query: string, options: ContextSearchOptions): Promise<ContextResult[]> {
if (!this.filenames) {
return []
}
const results = await this.filenames.getContext(query, options.numCodeResults + options.numTextResults)
return results
}
}
function groupResultsByFile(results: EmbeddingsSearchResult[]): { file: ContextFile; results: string[] }[] {
const originalFileOrder: ContextFile[] = []
for (const result of results) {
if (!originalFileOrder.find((ogFile: ContextFile) => ogFile.fileName === result.fileName)) {
originalFileOrder.push({ fileName: result.fileName, repoName: result.repoName, revision: result.revision })
}
}
const resultsGroupedByFile = new Map<string, EmbeddingsSearchResult[]>()
for (const result of results) {
const results = resultsGroupedByFile.get(result.fileName)
if (results === undefined) {
resultsGroupedByFile.set(result.fileName, [result])
} else {
resultsGroupedByFile.set(result.fileName, results.concat([result]))
}
}
return originalFileOrder.map(file => ({
file,
results: mergeConsecutiveResults(resultsGroupedByFile.get(file.fileName)!),
}))
}
function mergeConsecutiveResults(results: EmbeddingsSearchResult[]): string[] {
const sortedResults = results.sort((a, b) => a.startLine - b.startLine)
const mergedResults = [results[0].content]
for (let i = 1; i < sortedResults.length; i++) {
const result = sortedResults[i]
const previousResult = sortedResults[i - 1]
if (result.startLine === previousResult.endLine) {
mergedResults[mergedResults.length - 1] = mergedResults[mergedResults.length - 1] + result.content
} else {
mergedResults.push(result.content)
}
}
return mergedResults
}
function resultsToMessages(results: ContextResult[]): ContextMessage[] {
return results.flatMap(({ content, fileName, repoName, revision }) => {
const messageText = populateCodeContextTemplate(content, fileName, repoName)
return getContextMessageWithResponse(messageText, { fileName, repoName, revision })
})
}
function contextMessageWithSource(message: ContextMessage, source: ContextFileSource): ContextMessage {
if (message.file) {
message.file.source = source
}
return message
}

View File

@ -1,35 +0,0 @@
import { Message } from '../sourcegraph-api'
// tracked for telemetry purposes. Which context source provided this context
// file.
//
// For now we just track "embeddings" since that is the main driver for
// understanding if it is being useful.
export type ContextFileSource = 'embeddings'
export interface ContextFile {
fileName: string
repoName?: string
revision?: string
source?: ContextFileSource
}
export interface ContextMessage extends Message {
file?: ContextFile
}
export interface OldContextMessage extends Message {
fileName?: string
}
export function getContextMessageWithResponse(
text: string,
file: ContextFile,
response: string = 'Ok.'
): ContextMessage[] {
return [
{ speaker: 'human', text, file },
{ speaker: 'assistant', text: response },
]
}

View File

@ -1,93 +0,0 @@
import { parseStringPromise } from 'xml2js'
import { ChatClient } from '../chat/chat'
import { ContextResult } from '../local-context'
export interface Reranker {
rerank(userQuery: string, results: ContextResult[]): Promise<ContextResult[]>
}
export class MockReranker implements Reranker {
constructor(private rerank_: (userQuery: string, results: ContextResult[]) => Promise<ContextResult[]>) {}
public rerank(userQuery: string, results: ContextResult[]): Promise<ContextResult[]> {
return this.rerank_(userQuery, results)
}
}
/**
* A reranker class that uses a LLM to boost high-relevance results.
*/
export class LLMReranker implements Reranker {
constructor(private chatClient: ChatClient) {}
public async rerank(userQuery: string, results: ContextResult[]): Promise<ContextResult[]> {
// Reverse the results so the most important appears first
results = [...results].reverse()
let out = await new Promise<string>((resolve, reject) => {
let responseText = ''
this.chatClient.chat(
[
{
speaker: 'human',
text: `I am a professional computer programmer and need help deciding which of these files to read first to answer my question. My question is <userQuestion>${userQuery}</userQuestion>. Select the files from the following list that I should read to answer my question, ranked by most relevant first. Format the result as XML, like this: <list><item><filename>filename 1</filename><explanation>this is why I chose this item</explanation></item><item><filename>filename 2</filename><explanation>why I chose this item</explanation></item></list>\n${results
.map(r => r.fileName)
.join('\n')}`,
},
],
{
onChange: (text: string) => {
responseText = text
},
onComplete: () => {
resolve(responseText)
},
onError: (message: string, statusCode?: number) => {
reject(new Error(`Status code ${statusCode}: ${message}`))
},
},
{
temperature: 0,
fast: true,
}
)
})
if (out.indexOf('<list>') > 0) {
out = out.slice(out.indexOf('<list>'))
}
if (out.indexOf('</list>') !== out.length - '</list>'.length) {
out = out.slice(0, out.indexOf('</list>') + '</list>'.length)
}
const boostedFilenames = await parseXml(out)
const resultsMap = Object.fromEntries(results.map(r => [r.fileName, r]))
const boostedNames = new Set<string>()
const rerankedResults = []
for (const boostedFilename of boostedFilenames) {
const boostedResult = resultsMap[boostedFilename]
if (!boostedResult) {
continue
}
rerankedResults.push(boostedResult)
boostedNames.add(boostedFilename)
}
for (const result of results) {
if (!boostedNames.has(result.fileName)) {
rerankedResults.push(result)
}
}
rerankedResults.reverse()
return rerankedResults
}
}
async function parseXml(xml: string): Promise<string[]> {
const result = await parseStringPromise(xml)
const items = result.list.item
const files: { filename: string; explanation: string }[] = items.map((item: any) => ({
filename: item.filename[0],
explanation: item.explanation[0],
}))
return files.map(f => f.filename)
}

View File

@ -1,27 +0,0 @@
export type ConfigurationUseContext = 'embeddings' | 'keyword' | 'none' | 'blended' | 'unified'
// Should we share VS Code specific config via cody-shared?
export interface Configuration {
serverEndpoint: string
codebase?: string
debugEnable: boolean
debugFilter: RegExp | null
debugVerbose: boolean
useContext: ConfigurationUseContext
customHeaders: Record<string, string>
autocomplete: boolean
experimentalChatPredictions: boolean
inlineChat: boolean
experimentalGuardrails: boolean
experimentalNonStop: boolean
autocompleteAdvancedProvider: 'anthropic' | 'unstable-codegen' | 'unstable-huggingface'
autocompleteAdvancedServerEndpoint: string | null
autocompleteAdvancedAccessToken: string | null
autocompleteAdvancedCache: boolean
autocompleteAdvancedEmbeddings: boolean
}
export interface ConfigurationWithAccessToken extends Configuration {
/** The access token, which is stored in the secret storage (not configuration). */
accessToken: string | null
}

View File

@ -1,121 +0,0 @@
export interface ActiveTextEditor {
content: string
filePath: string
repoName?: string
revision?: string
selection?: ActiveTextEditorSelectionRange
}
export interface ActiveTextEditorSelectionRange {
start: {
line: number
character: number
}
end: {
line: number
character: number
}
}
export interface ActiveTextEditorSelection {
fileName: string
repoName?: string
revision?: string
precedingText: string
selectedText: string
followingText: string
}
export interface ActiveTextEditorVisibleContent {
content: string
fileName: string
repoName?: string
revision?: string
}
interface VsCodeInlineController {
selection: ActiveTextEditorSelection | null
error(): Promise<void>
}
interface VsCodeFixupController {
getTaskRecipeData(taskId: string): Promise<
| {
instruction: string
fileName: string
precedingText: string
selectedText: string
followingText: string
}
| undefined
>
}
export interface ActiveTextEditorViewControllers {
inline: VsCodeInlineController
fixups: VsCodeFixupController
}
export interface Editor {
controllers?: ActiveTextEditorViewControllers
getWorkspaceRootPath(): string | null
getActiveTextEditor(): ActiveTextEditor | null
getActiveTextEditorSelection(): ActiveTextEditorSelection | null
/**
* Gets the active text editor's selection, or the entire file if the selected range is empty.
*/
getActiveTextEditorSelectionOrEntireFile(): ActiveTextEditorSelection | null
getActiveTextEditorVisibleContent(): ActiveTextEditorVisibleContent | null
replaceSelection(fileName: string, selectedText: string, replacement: string): Promise<void>
showQuickPick(labels: string[]): Promise<string | undefined>
showWarningMessage(message: string): Promise<void>
showInputBox(prompt?: string): Promise<string | undefined>
// TODO: When Non-Stop Fixup doesn't depend directly on the chat view,
// move the recipe to client/cody and remove this entrypoint.
didReceiveFixupText(id: string, text: string, state: 'streaming' | 'complete'): Promise<void>
}
export class NoopEditor implements Editor {
public getWorkspaceRootPath(): string | null {
return null
}
public getActiveTextEditor(): ActiveTextEditor | null {
return null
}
public getActiveTextEditorSelection(): ActiveTextEditorSelection | null {
return null
}
public getActiveTextEditorSelectionOrEntireFile(): ActiveTextEditorSelection | null {
return null
}
public getActiveTextEditorVisibleContent(): ActiveTextEditorVisibleContent | null {
return null
}
public replaceSelection(_fileName: string, _selectedText: string, _replacement: string): Promise<void> {
return Promise.resolve()
}
public showQuickPick(_labels: string[]): Promise<string | undefined> {
return Promise.resolve(undefined)
}
public showWarningMessage(_message: string): Promise<void> {
return Promise.resolve()
}
public showInputBox(_prompt?: string): Promise<string | undefined> {
return Promise.resolve(undefined)
}
public didReceiveFixupText(id: string, text: string, state: 'streaming' | 'complete'): Promise<void> {
return Promise.resolve()
}
}

View File

@ -1,23 +0,0 @@
import { Editor } from '.'
export type PrefilledOptions = [string[], string][]
export function withPreselectedOptions(editor: Editor, preselectedOptions: PrefilledOptions): Editor {
const proxy = new Proxy<Editor>(editor, {
get(target: Editor, property: string, receiver: unknown) {
if (property === 'showQuickPick') {
return async function showQuickPick(options: string[]): Promise<string | undefined> {
for (const [preselectedOption, selectedOption] of preselectedOptions) {
if (preselectedOption === options) {
return Promise.resolve(selectedOption)
}
}
return target.showQuickPick(options)
}
}
return Reflect.get(target, property, receiver)
},
})
return proxy
}

View File

@ -1,19 +0,0 @@
import { SourcegraphGraphQLAPIClient, EmbeddingsSearchResults } from '../sourcegraph-api/graphql'
import { EmbeddingsSearch } from '.'
export class SourcegraphEmbeddingsSearchClient implements EmbeddingsSearch {
constructor(private client: SourcegraphGraphQLAPIClient, private repoId: string, private web: boolean = false) {}
public async search(
query: string,
codeResultsCount: number,
textResultsCount: number
): Promise<EmbeddingsSearchResults | Error> {
if (this.web) {
return this.client.searchEmbeddings([this.repoId], query, codeResultsCount, textResultsCount)
}
return this.client.legacySearchEmbeddings(this.repoId, query, codeResultsCount, textResultsCount)
}
}

View File

@ -1,5 +0,0 @@
import { EmbeddingsSearchResults } from '../sourcegraph-api/graphql/client'
export interface EmbeddingsSearch {
search(query: string, codeResultsCount: number, textResultsCount: number): Promise<EmbeddingsSearchResults | Error>
}

Some files were not shown because too many files have changed in this diff Show More