Use 8B version of llama3 in metadata generation (#63212)

In offline evals this seems as good as 70b while being a fair bit
faster.

## Test plan
Tested locally.
This commit is contained in:
Jan Hartman 2024-06-13 13:40:56 +02:00 committed by GitHub
parent a3a6545612
commit c59ece1fd2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 2 additions and 1 deletions

View File

@ -44,7 +44,7 @@ Respond with nothing else, only the entry names and the documentation. Code: ` +
MaxTokensToSample: 2000,
Temperature: 0,
TopP: 1,
Model: fireworks.Llama370bInstruct,
Model: fireworks.Llama38bInstruct,
}, c.logger)
if err != nil {

View File

@ -28,6 +28,7 @@ const Llama27bCode = "accounts/fireworks/models/llama-v2-7b-code"
const Llama213bCode = "accounts/fireworks/models/llama-v2-13b-code"
const Llama213bCodeInstruct = "accounts/fireworks/models/llama-v2-13b-code-instruct"
const Llama234bCodeInstruct = "accounts/fireworks/models/llama-v2-34b-code-instruct"
const Llama38bInstruct = "accounts/fireworks/models/llama-v3-8b-instruct"
const Llama370bInstruct = "accounts/fireworks/models/llama-v3-70b-instruct"
const Mistral7bInstruct = "accounts/fireworks/models/mistral-7b-instruct-4k"
const Mixtral8x7bInstruct = "accounts/fireworks/models/mixtral-8x7b-instruct"