Cody: basic completions (#49669)

This commit is contained in:
Beyang Liu 2023-03-24 22:31:58 -07:00 committed by GitHub
parent 4bb8dc2d84
commit 9217a6fba6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 474 additions and 7 deletions

View File

@ -107,6 +107,10 @@
{
"command": "cody.delete-access-token",
"title": "Cody: Delete Access Token"
},
{
"command": "cody.experimental.suggest",
"title": "Ask Cody: View Suggestions"
}
],
"keybindings": [
@ -218,8 +222,18 @@
"blended"
],
"default": "embeddings"
},
"cody.experimental.suggestions": {
"type": "boolean",
"default": false
},
"cody.experimental.keys.openai": {
"type": "string"
}
}
}
},
"dependencies": {
"openai": "^3.2.1"
}
}

View File

@ -1,6 +1,9 @@
import * as openai from 'openai'
import * as vscode from 'vscode'
import { ChatViewProvider } from '../chat/ChatViewProvider'
import { CodyCompletionItemProvider } from '../completions'
import { CompletionsDocumentProvider } from '../completions/docprovider'
import { getConfiguration } from '../configuration'
import { ExtensionApi } from '../extension-api'
@ -96,6 +99,25 @@ export const CommandsProvider = async (context: vscode.ExtensionContext): Promis
)
)
if (config.experimentalSuggest && config.openaiKey) {
const configuration = new openai.Configuration({
apiKey: config.openaiKey,
})
const openaiApi = new openai.OpenAIApi(configuration)
const docprovider = new CompletionsDocumentProvider()
vscode.workspace.registerTextDocumentContentProvider('cody', docprovider)
const completionsProvider = new CodyCompletionItemProvider(openaiApi, docprovider)
context.subscriptions.push(
vscode.commands.registerCommand('cody.experimental.suggest', async () => {
await completionsProvider.fetchAndShowCompletions()
})
)
context.subscriptions.push(
vscode.languages.registerInlineCompletionItemProvider({ scheme: 'file' }, completionsProvider)
)
}
// Watch all relevant configuration and secrets for changes.
context.subscriptions.push(
vscode.workspace.onDidChangeConfiguration(async event => {

View File

@ -0,0 +1,104 @@
import * as openai from 'openai'
import * as vscode from 'vscode'
// FIXME: When OpenAI's logit_bias uses a more precise type than 'object',
// specify JSON-able objects as { [prop: string]: JSONSerialiable | undefined }
export type JSONSerializable = null | string | number | boolean | object | JSONSerializable[]
interface Meta {
elapsedMillis: number
prompt: string
suffix: string
llmOptions: JSONSerializable
}
export interface CompletionGroup {
lang: string
prefixText: string
completions: openai.CreateChatCompletionResponse
meta?: Meta
}
export class CompletionsDocumentProvider implements vscode.TextDocumentContentProvider {
private completionsByUri: { [uri: string]: CompletionGroup[] } = {}
private isDebug(): boolean {
return vscode.workspace.getConfiguration().get<boolean>('cody.debug') === true
}
private fireDocumentChanged(uri: vscode.Uri): void {
this.onDidChangeEmitter.fire(uri)
}
public clearCompletions(uri: vscode.Uri): void {
delete this.completionsByUri[uri.toString()]
this.fireDocumentChanged(uri)
}
public addCompletions(
uri: vscode.Uri,
lang: string,
prefixText: string,
completions: openai.CreateChatCompletionResponse,
debug?: Meta
): void {
if (!this.completionsByUri[uri.toString()]) {
this.completionsByUri[uri.toString()] = []
}
this.completionsByUri[uri.toString()].push({
lang,
prefixText,
completions,
meta: debug,
})
this.fireDocumentChanged(uri)
}
public onDidChangeEmitter = new vscode.EventEmitter<vscode.Uri>()
public onDidChange = this.onDidChangeEmitter.event
public provideTextDocumentContent(uri: vscode.Uri): string {
const completionGroups = this.completionsByUri[uri.toString()]
if (!completionGroups) {
return 'Loading...'
}
return completionGroups
.map(({ completions, lang, prefixText, meta }) =>
completions.choices
.map(({ message, finish_reason }, index) => {
if (!message?.content) {
return undefined
}
let completionText = `\`\`\`${lang}\n${prefixText}${message.content}\n\`\`\``
if (this.isDebug() && meta) {
completionText =
`\`\`\`\n${meta.prompt}\n\`\`\`` +
'\n' +
completionText +
'\n' +
`\`\`\`\n${meta.suffix}\n\`\`\``
}
const headerComponents = [`${index + 1} / ${completions.choices.length}`]
if (finish_reason) {
headerComponents.push(`finish_reason:${finish_reason}`)
}
return headerize(headerComponents.join(', '), 80) + '\n' + completionText
})
.filter(t => t)
.join('\n\n')
)
.join('\n\n')
}
}
function headerize(label: string, width: number): string {
const prefix = '# ======= '
let buffer = width - label.length - prefix.length - 1
if (buffer < 0) {
buffer = 0
}
return `${prefix}${label} ${'='.repeat(buffer)}`
}

View File

@ -0,0 +1,303 @@
import { OpenAIApi } from 'openai'
import * as vscode from 'vscode'
import { CompletionsDocumentProvider } from './docprovider'
export class CodyCompletionItemProvider implements vscode.InlineCompletionItemProvider {
private maxPrefixTokens: number
private maxSuffixTokens: number
constructor(
private openai: OpenAIApi,
private documentProvider: CompletionsDocumentProvider,
private model = 'gpt-3.5-turbo',
private contextWindowTokens = 2048, // 8001
private bytesPerToken = 4,
private responseTokens = 200,
private prefixPercentage = 0.9,
private suffixPercentage = 0.1
) {
const promptTokens = this.contextWindowTokens - this.responseTokens
this.maxPrefixTokens = Math.floor(promptTokens * this.prefixPercentage)
this.maxSuffixTokens = Math.floor(promptTokens * this.suffixPercentage)
}
async provideInlineCompletionItems(
document: vscode.TextDocument,
position: vscode.Position,
context: vscode.InlineCompletionContext,
token: vscode.CancellationToken
): Promise<vscode.InlineCompletionItem[]> {
try {
return this.provideInlineCompletionItemsInner(document, position, context, token)
} catch (error) {
vscode.window.showErrorMessage(error)
return []
}
}
private tokToByte(toks: number): number {
return Math.floor(toks * this.bytesPerToken)
}
private async provideInlineCompletionItemsInner(
document: vscode.TextDocument,
position: vscode.Position,
context: vscode.InlineCompletionContext,
token: vscode.CancellationToken
): Promise<vscode.InlineCompletionItem[]> {
// Require manual invocation
if (context.triggerKind === vscode.InlineCompletionTriggerKind.Automatic) {
return []
}
const docContext = getCurrentDocContext(
document,
position,
this.tokToByte(this.maxPrefixTokens),
this.tokToByte(this.maxSuffixTokens)
)
if (!docContext) {
return []
}
const { prefix, prevLine: precedingLine } = docContext
let waitMs: number
let completionPrefix = '' // text to require as the first part of the completion
if (precedingLine.trim() === '') {
// Start of line: medium debounce, allow multiple lines
waitMs = 1000
} else if (context.triggerKind === vscode.InlineCompletionTriggerKind.Invoke || precedingLine.endsWith('.')) {
// Middle of line: long debounce, next line
waitMs = 100
} else {
// End of line: long debounce, next line
completionPrefix = '\n'
waitMs = 2000
// TODO(beyang): handle this as a special case, try 2 completions, one with newline inserted, one without
}
const aborter = new AbortController()
token.onCancellationRequested(() => aborter.abort())
const waiter = new Promise<void>(resolve => setTimeout(() => resolve(), waitMs))
const completionsPromise = this.openai.createChatCompletion(
{
model: this.model,
messages: [
{
role: 'system',
content: 'Complete whatever code you obtain from the user through the end of the line.',
},
{
role: 'user',
content: prefix + completionPrefix,
},
],
max_tokens: Math.min(this.contextWindowTokens - this.maxPrefixTokens, this.responseTokens),
n: 1,
},
{
signal: aborter.signal,
}
)
await waiter
let completions
try {
completions = await completionsPromise
} catch (error) {
throw new Error(`error fetching completions from OpenAI: ${error}`)
}
if (token.isCancellationRequested) {
return []
}
if (completions.data.choices.length === 0) {
throw new Error('no completions')
}
const inlineCompletions: vscode.InlineCompletionItem[] = []
for (const choice of completions.data.choices) {
if (!choice.message?.content) {
continue
}
inlineCompletions.push(new vscode.InlineCompletionItem(completionPrefix + choice.message.content))
}
return inlineCompletions
}
async fetchAndShowCompletions(): Promise<void> {
const currentEditor = vscode.window.activeTextEditor
if (!currentEditor || currentEditor?.document.uri.scheme === 'cody') {
return
}
const filename = currentEditor.document.fileName
const ext = filename.split('.').pop() || ''
const completionsUri = vscode.Uri.parse('cody:Completions.md')
this.documentProvider.clearCompletions(completionsUri)
const doc = vscode.workspace.openTextDocument(completionsUri)
doc.then(doc => {
vscode.window.showTextDocument(doc, {
preview: false,
viewColumn: 2,
})
})
const docContext = getCurrentDocContext(
currentEditor.document,
currentEditor.selection.start,
this.tokToByte(this.maxPrefixTokens),
this.tokToByte(this.maxSuffixTokens)
)
if (docContext === null) {
console.error('not showing completions, no currently open doc')
return
}
const { prefix, suffix, prevLine, prevNonEmptyLine } = docContext
try {
const completion = await this.openai.createChatCompletion({
model: this.model,
messages: [
{
role: 'system',
content:
'Complete whatever code you obtain from the user up to the end of the function or block scope.',
},
{
role: 'user',
content: prefix,
},
],
max_tokens: Math.min(this.contextWindowTokens - this.maxPrefixTokens, this.responseTokens),
n: 3,
})
// Trim lines that go past current indent
for (const choice of completion.data.choices) {
if (!choice.message?.content) {
continue
}
const indent = getIndent(prevNonEmptyLine)
choice.message.content = trimToIndent(choice.message.content, indent)
}
this.documentProvider.addCompletions(completionsUri, ext, prevLine, completion.data, {
prompt: prefix,
suffix,
elapsedMillis: 0,
llmOptions: null,
})
} catch (error) {
if (error.response) {
console.error(error.response.status)
console.error(error.response.data)
} else {
console.error(error.message)
}
}
}
}
function getCurrentDocContext(
document: vscode.TextDocument,
position: vscode.Position,
maxPrefixLength: number,
maxSuffixLength: number
): {
prefix: string
suffix: string
prevLine: string
prevNonEmptyLine: string
nextNonEmptyLine: string
} | null {
const offset = document.offsetAt(position)
const prefixLines = document.getText(new vscode.Range(new vscode.Position(0, 0), position)).split('\n')
if (prefixLines.length === 0) {
console.error('no lines')
return null
}
const suffixLines = document
.getText(new vscode.Range(position, document.positionAt(document.getText().length)))
.split('\n')
let nextNonEmptyLine = ''
if (suffixLines.length > 0) {
for (const line of suffixLines) {
if (line.trim().length > 0) {
nextNonEmptyLine = line
break
}
}
}
let prevNonEmptyLine = ''
for (let i = prefixLines.length - 1; i >= 0; i--) {
const line = prefixLines[i]
if (line.trim().length > 0) {
prevNonEmptyLine = line
break
}
}
const prevLine = prefixLines[prefixLines.length - 1]
let prefix: string
if (offset > maxPrefixLength) {
let total = 0
let startLine = prefixLines.length
for (let i = prefixLines.length - 1; i >= 0; i--) {
if (total + prefixLines[i].length > maxPrefixLength) {
break
}
startLine = i
total += prefixLines[i].length
}
prefix = prefixLines.slice(startLine).join('\n')
} else {
prefix = document.getText(new vscode.Range(new vscode.Position(0, 0), position))
}
let totalSuffix = 0
let endLine = 0
for (let i = 0; i < suffixLines.length; i++) {
if (totalSuffix + suffixLines[i].length > maxSuffixLength) {
break
}
endLine = i + 1
totalSuffix += suffixLines[i].length
}
const suffix = suffixLines.slice(0, endLine).join('\n')
return {
prefix,
suffix,
prevLine,
prevNonEmptyLine,
nextNonEmptyLine,
}
}
function getIndent(line: string): string {
const match = /^(\s*)/.exec(line)
if (!match || match.length < 2) {
return ''
}
return match[1]
}
function trimToIndent(text: string, indent: string): string {
const lines = text.split('\n')
// Iterate through the lines starting at the second line (always include the first line)
for (let i = 1; i < lines.length; i++) {
if (lines[i].trim().length === 0) {
continue
}
const lineIndent = getIndent(lines[i])
if (indent.indexOf(lineIndent) === 0 && lineIndent.length < indent.length) {
return lines.slice(0, i).join('\n')
}
}
return text
}

View File

@ -8,6 +8,8 @@ export interface Configuration {
codebase?: string
debug: boolean
useContext: ConfigurationUseContext
experimentalSuggest: boolean
openaiKey: string | null
}
export function getConfiguration(config: vscode.WorkspaceConfiguration): Configuration {
@ -17,6 +19,8 @@ export function getConfiguration(config: vscode.WorkspaceConfiguration): Configu
codebase: config.get('cody.codebase'),
debug: config.get('cody.debug', false),
useContext: config.get<ConfigurationUseContext>('cody.useContext') || 'embeddings',
experimentalSuggest: config.get('cody.experimental.suggestions', false),
openaiKey: config.get('cody.experimental.keys.openai', null),
}
}

View File

@ -38,7 +38,7 @@ export async function recommendSourcegraph(localStorageService: LocalStorageServ
await vscode.window
.showInformationMessage('Add Sourcegraph to your workspace recommendations', '👍 Yes', "Don't show again")
.then(async answer => {
if (answer === 'Yes') {
if (answer === '👍 Yes') {
await vscode.commands.executeCommand(
'workbench.extensions.action.addExtensionToWorkspaceRecommendations',
'sourcegraph.sourcegraph'

View File

@ -217,7 +217,7 @@
"@types/svgo": "2.6.0",
"@types/testing-library__jest-dom": "5.9.5",
"@types/uuid": "8.0.1",
"@types/vscode": "^1.63.1",
"@types/vscode": "^1.76.0",
"@types/webpack-bundle-analyzer": "^4.6.0",
"@types/webpack-stats-plugin": "^0.3.2",
"@types/yauzl": "^2.9.2",

View File

@ -181,7 +181,7 @@ importers:
'@types/svgo': 2.6.0
'@types/testing-library__jest-dom': 5.9.5
'@types/uuid': 8.0.1
'@types/vscode': ^1.63.1
'@types/vscode': ^1.76.0
'@types/webpack-bundle-analyzer': ^4.6.0
'@types/webpack-stats-plugin': ^0.3.2
'@types/yauzl': ^2.9.2
@ -687,7 +687,7 @@ importers:
'@types/svgo': 2.6.0
'@types/testing-library__jest-dom': 5.9.5
'@types/uuid': 8.0.1
'@types/vscode': 1.63.1
'@types/vscode': 1.76.0
'@types/webpack-bundle-analyzer': 4.6.0_cf7cgeqdkm72g3fdehkr7aaod4
'@types/webpack-stats-plugin': 0.3.2_cf7cgeqdkm72g3fdehkr7aaod4
'@types/yauzl': 2.10.0
@ -918,7 +918,10 @@ importers:
'@sourcegraph/extension-api-types': link:../extension-api-types
client/cody:
specifiers: {}
specifiers:
openai: ^3.2.1
dependencies:
openai: 3.2.1
client/common:
specifiers:
@ -9160,8 +9163,8 @@ packages:
'@types/node': 13.13.5
dev: true
/@types/vscode/1.63.1:
resolution: {integrity: sha512-Z+ZqjRcnGfHP86dvx/BtSwWyZPKQ/LBdmAVImY82TphyjOw2KgTKcp7Nx92oNwCTsHzlshwexAG/WiY2JuUm3g==}
/@types/vscode/1.76.0:
resolution: {integrity: sha512-CQcY3+Fe5hNewHnOEAVYj4dd1do/QHliXaknAEYSXx2KEHUzFibDZSKptCon+HPgK55xx20pR+PBJjf0MomnBA==}
dev: true
/@types/webpack-bundle-analyzer/4.6.0_cf7cgeqdkm72g3fdehkr7aaod4:
@ -10606,6 +10609,14 @@ packages:
- debug
dev: true
/axios/0.26.1:
resolution: {integrity: sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA==}
dependencies:
follow-redirects: 1.15.1
transitivePeerDependencies:
- debug
dev: false
/axobject-query/2.2.0:
resolution: {integrity: sha512-Td525n+iPOOyUQIeBfcASuG6uJsDOITl7Mds5gFyerkWiX7qhUTdYUBlSgNMyVqtSJqwpt1kXGLdUt6SykLMRA==}
dev: true
@ -21287,6 +21298,15 @@ packages:
is-docker: 2.2.1
is-wsl: 2.2.0
/openai/3.2.1:
resolution: {integrity: sha512-762C9BNlJPbjjlWZi4WYK9iM2tAVAv0uUp1UmI34vb0CN5T2mjB/qM6RYBmNKMh/dN9fC+bxqPwWJZUTWW052A==}
dependencies:
axios: 0.26.1
form-data: 4.0.0
transitivePeerDependencies:
- debug
dev: false
/opener/1.5.2:
resolution: {integrity: sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==}
hasBin: true