working one

This commit is contained in:
YiMing Han 2023-08-18 15:07:41 -04:00
parent bd182289c5
commit 8607b11ea1
12 changed files with 1210 additions and 1449 deletions

View File

@ -0,0 +1,20 @@
{
"configVersion": 2,
"packages": [
{
"name": "args",
"rootUri": "file:///Users/yiminghan/.pub-cache/hosted/pub.dev/args-2.4.2",
"packageUri": "lib/",
"languageVersion": "2.19"
},
{
"name": "llama2.dart",
"rootUri": "../",
"packageUri": "lib/",
"languageVersion": "3.1"
}
],
"generated": "2023-08-18T18:58:12.764817Z",
"generator": "pub",
"generatorVersion": "3.1.0"
}

View File

@ -1,60 +0,0 @@
# choose your compiler, e.g. gcc/clang
# example override to clang: make run CC=clang
CC = gcc
# the most basic way of building that is most likely to work on most systems
.PHONY: run
run: run.c
$(CC) -O3 -o run run.c -lm
# useful for a debug build, can then e.g. analyze with valgrind, example:
# $ valgrind --leak-check=full ./run out/model.bin -n 3
rundebug: run.c
$(CC) -g -o run run.c -lm
# https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html
# https://simonbyrne.github.io/notes/fastmath/
# -Ofast enables all -O3 optimizations.
# Disregards strict standards compliance.
# It also enables optimizations that are not valid for all standard-compliant programs.
# It turns on -ffast-math, -fallow-store-data-races and the Fortran-specific
# -fstack-arrays, unless -fmax-stack-var-size is specified, and -fno-protect-parens.
# It turns off -fsemantic-interposition.
# In our specific application this is *probably* okay to use
.PHONY: runfast
runfast: run.c
$(CC) -Ofast -o run run.c -lm
# additionally compiles with OpenMP, allowing multithreaded runs
# make sure to also enable multiple threads when running, e.g.:
# OMP_NUM_THREADS=4 ./run out/model.bin
.PHONY: runomp
runomp: run.c
$(CC) -Ofast -fopenmp -march=native run.c -lm -o run
.PHONY: win64
win64:
x86_64-w64-mingw32-gcc -Ofast -D_WIN32 -o run.exe -I. run.c win.c
# compiles with gnu99 standard flags for amazon linux, coreos, etc. compatibility
.PHONY: rungnu
rungnu:
$(CC) -Ofast -std=gnu11 -o run run.c -lm
.PHONY: runompgnu
runompgnu:
$(CC) -Ofast -fopenmp -std=gnu11 run.c -lm -o run
# run all tests
.PHONY: test
test:
pytest
# run only tests for run.c C implementation (is a bit faster if only C code changed)
.PHONY: testc
testc:
pytest -k runc
.PHONY: clean
clean:
rm -f run

322
ORIGINAL.md Normal file
View File

@ -0,0 +1,322 @@
## llama2.c
<p align="center">
<img src="assets/llama_cute.jpg" width="300" height="300" alt="Cute Llama">
</p>
Train the Llama 2 LLM architecture in PyTorch then inference it with one simple 700-line C file ([run.c](run.c)). You might think that you need many billion parameter LLMs to do anything useful, but in fact very small LLMs can have surprisingly strong performance if you make the domain narrow enough (ref: [TinyStories](https://huggingface.co/datasets/roneneldan/TinyStories) paper). This repo is a "fullstack" train + inference solution for Llama 2 LLM, with focus on minimalism and simplicity.
As the architecture is identical, you can also load and inference Meta's Llama 2 models. However, the current code only inferences models in fp32, so you will most likely not be able to productively load models larger than 7B. Work on model quantization is currently ongoing.
Please note that this repo started recently as a fun weekend project: I took my earlier [nanoGPT](https://github.com/karpathy/nanoGPT), tuned it to implement the Llama-2 architecture instead of GPT-2, and the meat of it was writing the C inference engine in [run.c](run.c). So the project is young and moving quickly. Hat tip to the awesome [llama.cpp](https://github.com/ggerganov/llama.cpp) for inspiring this project. Compred to llama.cpp, I wanted something super simple, minimal, and educational so I chose to hard-code the Llama 2 architecture and just roll one inference file of pure C with no dependencies.
## feel the magic
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/karpathy/llama2.c/blob/master/run.ipynb)
First, navigate to the folder when you keep your projects and clone this repository to this folder:
```bash
git clone https://github.com/karpathy/llama2.c.git
```
Then, open the repository folder:
```bash
cd llama2.c
```
Now, let's just run a baby Llama 2 model in C. You need a model checkpoint. Download this 15M parameter model I trained on the [TinyStories](https://huggingface.co/datasets/roneneldan/TinyStories) dataset (~60MB download):
```bash
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin
```
Compile and run the C code:
```bash
make run
./run stories15M.bin
```
You'll see the text stream a sample. On my M1 MacBook Air this runs at ~110 tokens/s. See [performance](#performance) or the Makefile for compile flags that can significantly speed this up. We can also try a bit bigger 42M parameter model:
```bash
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories42M.bin
./run stories42M.bin
```
This still runs at interactive rates and samples more coherent and diverse stories:
> Once upon a time, there was a little girl named Lily. She loved playing with her toys on top of her bed. One day, she decided to have a tea party with her stuffed animals. She poured some tea into a tiny teapot and put it on top of the teapot. Suddenly, her little brother Max came into the room and wanted to join the tea party too. Lily didn't want to share her tea and she told Max to go away. Max started to cry and Lily felt bad. She decided to yield her tea party to Max and they both shared the teapot. But then, something unexpected happened. The teapot started to shake and wiggle. Lily and Max were scared and didn't know what to do. Suddenly, the teapot started to fly towards the ceiling and landed on the top of the bed. Lily and Max were amazed and they hugged each other. They realized that sharing was much more fun than being selfish. From that day on, they always shared their tea parties and toys.
You can also prompt the model with a prefix or a number of additional command line arguments, e.g. to sample at temperature 0.8 for 256 steps and with a prompt:
```bash
./run stories42M.bin -t 0.8 -n 256 -i "One day, Lily met a Shoggoth"
```
> One day, Lily met a Shoggoth. He was very shy, but was also very generous. Lily said “Hello Shoggy! Can I be your friend?” Shoggy was happy to have a friend and said “Yes, lets explore the universe together!” So they set off on a journey to explore the universe. As they travelled, Shoggy was happy to explain to Lily about all the wonderful things in the universe. At the end of the day, Lily and Shoggy had gathered lots of wonderful things from the universe, and they both felt very proud. They promised to explore the universe as one big pair and to never stop being generous to each other.
There is also an even better 110M param model available, see [models](#models).
Quick note on sampling, the recommendation for ~best results is to sample with `-t 1.0 -p 0.9`, i.e. temperature 1.0 (default) but also top-p sampling at 0.9 (default). Intuitively, top-p ensures that tokens with tiny probabilities do not get sampled, so we can't get "unlucky" during sampling, and we are less likely to go "off the rails" afterwards. More generally, to control the diversity of samples use either the temperature (i.e. vary `-t` between 0 and 1 and keep top-p off with `-p 0`) or the top-p value (i.e. vary `-p` between 0 and 1 and keep `-t 1`), but not both. Nice explainers on LLM sampling strategies include [this](https://peterchng.com/blog/2023/05/02/token-selection-strategies-top-k-top-p-and-temperature/), [this](https://docs.cohere.com/docs/controlling-generation-with-top-k-top-p) or [this](https://huggingface.co/blog/how-to-generate).
## Meta's Llama 2 models
As the neural net architecture is identical, we can also inference the Llama 2 models released by Meta. Sadly there is a bit of friction here due to licensing (I can't directly upload the checkpoints, I think). So Step 1, get the Llama 2 checkpoints by following the [Meta instructions](https://github.com/facebookresearch/llama). Once we have those checkpoints, we have to convert them into the llama2.c format.
For this we need to install the python dependencies (`pip install -r requirements.txt`) and then use the `export_meta_llama_bin.py` file, e.g. for 7B model:
```bash
python export_meta_llama_bin.py path/to/llama/model/7B llama2_7b.bin
```
The export will take ~10 minutes or so and generate a 26GB file (the weights of the 7B model in float32) called `llama2_7b.bin` in the current directory. It has been [reported](https://github.com/karpathy/llama2.c/pull/85) that despite efforts, the 13B export currently doesn't work for unknown reasons (accepting PRs for fix). We can run the model as normal:
```bash
./run llama2_7b.bin
```
This ran at about 4 tokens/s compiled with [OpenMP](#OpenMP) on 96 threads on my CPU Linux box in the cloud. (On my MacBook Air M1, currently it's closer to 30 seconds per token if you just build with `make runfast`.) Example output:
> The purpose of this document is to highlight the state-of-the-art of CoO generation technologies, both recent developments and those in commercial use. The focus is on the technologies with the highest merit to become the dominating processes of the future and therefore to be technologies of interest to S&amp;T ... R&amp;D. As such, CoO generation technologies developed in Russia, Japan and Europe are described in some depth. The document starts with an introduction to cobalt oxides as complex products and a short view on cobalt as an essential material. The document continues with the discussion of the available CoO generation processes with respect to energy and capital consumption as well as to environmental damage.
base models... ¯\\_(ツ)_/¯. Since we can inference the base model, it should be possible to also inference the chat model quite easily, and have a conversation with it. And if we can find a way to run 7B more efficiently, we can start adding LoRA to our training script, and going wild with finetunes all within the repo!
## models
For the sake of examples of smaller, from-scratch models, I trained a small model series on TinyStories. All of these trained in a few hours on my training setup (4X A100 40GB GPUs). The 110M took around 24 hours. I am hosting them on huggingface hub [tinyllamas](https://huggingface.co/karpathy/tinyllamas), both in the original PyTorch .pt, and also in the llama2.c format .bin:
| model | dim | n_layers | n_heads | n_kv_heads | max context length | parameters | val loss | download |
| ----- | --- | -------- | ------- | ---------- | ------------------ | ---------- | -------- | ------------------------------------------------------------------------------------------ |
| 260K | 64 | 5 | 8 | 4 | 512 | 260K | 1.297 | [stories260K](https://huggingface.co/karpathy/tinyllamas/tree/main/stories260K) |
| OG | 288 | 6 | 6 | 6 | 256 | 15M | 1.072 | [stories15M.bin](https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin) |
| 42M | 512 | 8 | 8 | 8 | 1024 | 42M | 0.847 | [stories42M.bin](https://huggingface.co/karpathy/tinyllamas/resolve/main/stories42M.bin) |
| 110M | 768 | 12 | 12 | 12 | 1024 | 110M | 0.760 | [stories110M.bin](https://huggingface.co/karpathy/tinyllamas/resolve/main/stories110M.bin) |
You'll notice that the 110M model is equivalent to GPT-1 in size. Alternatively, this is also the smallest model in the GPT-2 series (`GPT-2 small`), except the max context length is only 1024 instead of 2048. The only notable changes from GPT-1/2 architecture is that Llama uses RoPE relatively positional embeddings instead of absolute/learned positional embeddings, a bit more fancy SwiGLU non-linearity in the MLP, RMSNorm instead of LayerNorm, bias=False on all Linear layers, and is optionally multiquery (but this is not yet supported in llama2.c).
## training
Let's see how we can train a baby Llama 2 from scratch using the code in this repo. First let's download and pretokenize some source dataset, e.g. I like [TinyStories](https://huggingface.co/datasets/roneneldan/TinyStories) so this is the only example currently available in this repo. But it should be very easy to add datasets, see the code.
```bash
python tinystories.py download
python tinystories.py pretokenize
```
Then train our model:
```bash
python train.py
```
**brief training guide**. See the train.py script for more exotic launches and hyperparameter overrides. Here is a brief guide to how to set the parameters. Look at the table at the very end of the [Chinchilla paper](https://arxiv.org/abs/2203.15556) to get a sense of how the Transformer parameters (dim, n*layers, n_heads) grow or shrink together. Extrapolate/interpolate this pattern to get bigger or smaller transformers. Set the max context length however you wish, depending on the problem: this should be the max number of tokens that matter to predict the next token. E.g. Llama 2 uses 2048. Next, you want the \_total* batch size per update (printed by the script as "tokens per iteration will be:") to be somewhere around 100K tokens for medium-sized applications. For tiny applications it could be lower, for large training (e.g. GPTs/LLamas) it is usually ~0.5M, or even more. You get there by first maxing out the batch*size to whatever your system allows (e.g. mine was 16 in a recent run because after that my GPU runs out of memory), and then you want to increase gradient_accumulation_steps to be as high as necessary to reach the total batch size of ~100K. Finally, you want to tune your learning_rate (LR). You want this to be as high as your training allows. Very small networks can get away with a large LR (e.g. 1e-3 or even higher). Large networks need lower LRs. 3e-4 is a safe choice in most medium-sized applications, but can be too low for small networks, so try to increase it! Finally, max_iters is the length of training. Play with different settings. I mostly only ever tune these parameters and leave most of the others unchanged. Here is an example of how I trained the 110M model, which I don't think is anywhere near optimal, but looked sensible to me: dim 768, n_layers 12, n_heads 12 (so size of each head is 768 / 12 = 64 channels), seq len of 1024, batch size 16 (this is the most that fit my A100 40GB GPU), gradient_accumulation_steps = 8 was needed to get total tokens batch size to be 16 batch size * 1024 tokens in sequence \_ 8 grad_accum = 131,072 tokens per update. Good. Learning rate 4e-4 (probably a little too low). max_iters 200K (probably a bit too high). Dropout 0.1, as that usually helps a bit at medium size. That was it. I ran using Distributed Data Parallel (DDP) on 4 GPUs on my cloud machine, training took ~day or so.
Totally understand if you want to skip model training, for simple demo just download one of the pretrained models (see [models](#models) section), e.g.:
```bash
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin
```
Once we have the model.bin file, we can inference in C. Compile the C code first:
```bash
make run
```
You can now run it simply as
```bash
./run stories15M.bin
```
Watch the tokens stream by, fun! We can also run the PyTorch inference script for a comparison. Download one of the models again from huggingface hub and point the `sample.py` script at it:
```bash
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.pt -P out15M
python sample.py --checkpoint=out15M/stories15M.pt
```
Which gives the same results.
## custom tokenizers
In everything above, we've assumed the custom Lllama 2 tokenizer with 32,000 tokens. However, in many boutique LLMs, using vocabulary this big might be an overkill. If you have a small application you have in mind, you might be much better off training your own tokenizers. This can make everything nicer - with smaller vocabs your model has fewer parameters (because the token embedding table is a lot smaller), the inference is faster (because there are fewer tokens to predict), and your average sequence length per example could also get smaller (because the compression is a lot more efficient on your data). So let's see how we train a custom tokenizer.
By default, to pretokenize the tinystories dataset we had to run, in order:
```
python tinystories.py download
python tinystories.py pretokenize
```
The `pretokenize` stage here loads the Llama 2 tokenizer (vocab size 32,000) and uses it to convert the downloaded text into integers, and saves that to file. We now change this as follows, to train an example 4096-token tokenizer:
```
python tinystories.py download
python tinystories.py train_vocab --vocab_size=4096
python tinystories.py pretokenize --vocab_size=4096
```
The `train_vocab` stage will call the `train_vocab.sh` script, which calls the `sentencepiece` library to train the tokenizer, storing it in a new file `data/tok4096.model`. I tried to reproduce as well as I could the settings that (I think) Meta used to train their vocabulary. This uses the Byte Pair Encoding algorithm that starts out with raw utf8 byte sequences of the text data and then iteratively merges the most common consecutive pairs of tokens to form the vocabulary. Inspect the `tinystories.py` file - the custom tokenizers are stored in a special directory structure indexed by the vocab size.
A quick note of interest is that vocab size of 4096 trained specifically on tinystories creates integer sequences with about the same sequence length per example as the default Llama 2 tokenizer of 32000 tokens! This means that our custom, tailored tokenizer is a lot better adapted to our specific text, and can compress it very effectively. So our trained models are smaller and faster.
Now that we have pretokenized the dataset with our custom tokenizer, we can train the model. The training script `train.py` doesn't care about the exact tokens, it only cares about the vocabulary size so it can correctly initialize the model. So when training your model, make sure to pass in
```
python train.py --vocab_source=custom --vocab_size=4096
```
(The defaults are `llama2` and `32000` respectively, which indicates the default Llama 2 tokenizer). This trains the model. Finally we are ready to run inference with our `run.c` script. For that we need two things. Number one, we have to export our tokenizer in the `.bin` format, do that with:
```
python tokenizer.py --tokenizer-model=data/tok4096.model
```
This writes the tokenizer to `data/tok4096.bin`. Now we can run inference, pointing it to this tokenizer using the `-z` flag:
```
./run out/model.bin -z data/tok4096.bin
```
This should print the samples. If you leave out the `-z` flag, it will use the default Llama 2 tokenizer, which would generate a good sequence of integers, but they would get translated using a different vocabulary to text, so it would look like gibberish.
## performance
There are many ways to potentially speed up this code depending on your system. Have a look at the [Makefile](Makefile), which contains a lot of notes. The `make run` command currently uses the `-O3` optimization by default, i.e.:
```bash
gcc -O3 -o run run.c -lm
```
-O3 includes optimizations that are expensive in terms of compile time and memory usage. Including vectorization, loop unrolling, and predicting branches.
To get a much better performance, try to compile with `make runfast`. This turns on the `-Ofast` flag, which includes additional optimizations that may break compliance with the C/IEEE specifications, in addition to `-O3`. See [the GCC docs](https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html) for more information.
Try `-march=native` to compile the program to use the architecture of the machine you're compiling on rather than a more generic CPU. This may enable additional optimizations and hardware-specific tuning such as improved vector instructions/width.
The fastest throughput I saw so far on my MacBook Air (M1) so far is with `make runfast`.
You can also experiment with replacing `gcc` with `clang`.
If compiling with gcc, try experimenting with `-funroll-all-loops`, see PR [#183](https://github.com/karpathy/llama2.c/pull/183)
### OpenMP
Big improvements can also be achieved by compiling with OpenMP, which "activates" the `#pragma omp parallel for` inside the matmul and attention, allowing the work in the loops to be split up over multiple processors.
You'll need to install the OpenMP library and the clang compiler first (e.g. `apt install clang libomp-dev` on ubuntu). Then you can compile with `make runomp`, which does:
```bash
clang -Ofast -fopenmp -march=native run.c -lm -o run
```
When you run inference make sure to use OpenMP flags to set the number of threads, e.g.:
```bash
OMP_NUM_THREADS=4 ./run out/model.bin
```
Depending on your system resources you may want to tweak these hyperparameters and use more threads. But more is not always better, usually this is a bit U shaped.
## platforms
On **Windows**, use `build_msvc.bat` in a Visual Studio Command Prompt to build with msvc, or you can use `make win64` to use mingw compiler toolchain from linux or windows to build the windows target. MSVC build will automatically use openmp and max threads appropriate for your CPU unless you set `OMP_NUM_THREADS` env.
On **Centos 7**, **Amazon Linux 2018** use `rungnu` Makefile target: `make rungnu` or `make runompgnu` to use openmp.
On **Mac**, use clang from brew for openmp build. Install clang as `brew install llvm` and use the installed clang binary to compile with openmp: `make runomp CC=/opt/homebrew/opt/llvm/bin/clang`
## tests
You can run tests simply with pytest:
```bash
$ pip install pytest
$ pytest
```
This will currently invoke two tests inside `test_all.py`, which forward the model in both C and Python for 200 steps and check the output against a known good expected output. The tests currently run in only a few seconds, but will have to download and cache the stories260K models in a temporary `test` directory (only ~2MB download).
## ack
I trained the llama2.c storyteller models on a 4X A100 40GB box graciously provided by the excellent [Lambda labs](https://lambdalabs.com/service/gpu-cloud), thank you.
## discord
Figured it's possible to reuse my existing discord channel (that I use for my [zero to hero youtube series](https://karpathy.ai/zero-to-hero.html)), see #llama2c channel on [discord](https://discord.gg/3zy8kqD9Cp), for any quick questions, related discussions, etc.
## contributing
A few words on this repo and the kinds of PRs that are likely to be accepted. What is the goal of this repo? Basically I think there will be a lot of interest in training or finetuning custom micro-LLMs (think ~100M - ~1B params, but let's say up to ~10B params) across a large diversity of applications, and deploying them in edge-adjacent environments (think MCUs, phones, web browsers, laptops, etc.). I'd like this repo to be the simplest, smallest, most hackable repo to support this workflow, both training and inference. In particular, this repo is not a complex framework with a 1000 knobs controlling inscrutible code across a nested directory structure of hundreds of files. Instead, I expect most applications will wish to create a fork of this repo and hack it to their specific needs and deployment platforms.
People who care about deployment efficiency above all else should look at [llama.cpp](https://github.com/ggerganov/llama.cpp). This repo still cares about efficiency, but not at the cost of simplicity, readability or portability. Basically, I expect that a lot of people come to this repo because the training code is 2 readable .py files and the inference code is 500 lines of C. So I'd like this to continue to be a kind of simplest "reference implementation" that can be easily hacked in a separate fork into whatever downstream application people are excited about. It shouldn't be full-featured. It shouldn't take 100 different options or settings. It shouldn't be the most efficient. A few examples:
- someone re-ordered two loops to improve data locality for a small efficieny win => instant merge.
- someone added the one line "pragma omp parallel for", which allows you to compile with OpenMP and dramatically speed up the code, or acts as just a comment if you don't compile it that way => instant merge.
- bug fixes and touchups etc. => happy to merge
A few examples of PRs are that are not an excellent fit:
- adding more than several #ifdefs all over the place in code. If they are localized / few, might be okay.
- adding a lot of code that is very specific to some specific platform (e.g. MCUs, or some special version of linux or processor). These may be a better fit for forks of the project, and I am very happy to maintain a list of these forks in section below.
- adding hundreds of lines of code to run.c that are only active in specific scenarios or platforms.
If your candidate PRs have elements of these it doesn't mean they won't get merged, it just means they will make it into the gray territory. TLDR: I am eager to merge any mostly small, mostly localized, broadly applicable, clean changes that improve the efficiency and portability of the repo, while keep its hackability and readability. I appreciate all PRs seeking to help me improve the project, thank you! <3.
## notable forks
- Rust
- [llama2.rs](https://github.com/gaxler/llama2.rs) by @[gaxler](https://github.com/gaxler): a Rust port of this project
- [llama2.rs](https://github.com/leo-du/llama2.rs) by @[leo-du](https://github.com/leo-du): A Rust port of this project
- [llama2-rs](https://github.com/danielgrittner/llama2-rs) by @[danielgrittner](https://github.com/danielgrittner): a Rust port of this project
- [llama2.rs](https://github.com/lintian06/llama2.rs) by @[lintian06](https://github.com/lintian06): A Rust port of this project
- Go
- [go-llama2](https://github.com/tmc/go-llama2) by @[tmc](https://github.com/tmc): a Go port of this project
- [llama2.go](https://github.com/nikolaydubina/llama2.go) by @[nikolaydubina](https://github.com/nikolaydubina): a Go port of this project
- [llama2.go](https://github.com/haormj/llama2.go) by @[haormj](https://github.com/haormj): a Go port of this project
- [llama2.go](https://github.com/saracen/llama2.go) by @[saracen](https://github.com/saracen): a Go port of this project
- Android
- [llama2.c-android](https://github.com/Manuel030/llama2.c-android): by @[Manuel030](https://github.com/Manuel030): adds Android binaries of this project
- [llama2.c-android-wrapper](https://github.com/celikin/llama2.c-android-wrapper): by @[celikin](https://github.com/celikin): added JNI wrapper, PoC
- C++
- [llama2.cpp](https://github.com/leloykun/llama2.cpp) by @[leloykun](https://github.com/leloykun): a C++ port of this project
- JavaScript
- [llama2.js](https://github.com/epicure/llama2.js) by @[epicure](https://github.com/epicure): a JavaScript port of this project
- [llama2.ts](https://github.com/wizzard0/llama2.ts) by @[oleksandr_now](https://twitter.com/oleksandr_now): a TypeScript port of this project. Full Llama2-7B capable.
- [llama2.c-emscripten](https://github.com/gohai/llama2.c-emscripten) by @[gohai](https://github.com/gohai): Emscripten (JavaScript) port, based on @ggerganov's initial prototype
- Zig
- [llama2.zig](https://github.com/cgbur/llama2.zig) by @[cgbur](https://github.com/cgbur): A Zig port of this project
- [llama2.zig](https://github.com/vodkaslime/llama2.zig) by @[vodkaslime](https://github.com/vodkaslime): a Zig port of this project
- [llama2.zig](https://github.com/clebert/llama2.zig) by @[clebert](https://github.com/clebert): a Zig port of this project
- Julia
- [llama2.jl](https://github.com/juvi21/llama2.jl) by @[juvi21](https://github.com/juvi21): a Julia port of this project
- Scala
- [llama2.scala](https://github.com/jrudolph/llama2.scala) by @[jrudolph](https://github.com/jrudolph): a Scala port of this project
- Java
- [llama2.java](https://github.com/mukel/llama2.java) by @[mukel](https://github.com/mukel): a Java port of this project
- Kotlin
- [llama2.kt](https://github.com/madroidmaq/llama2.kt) by @[madroidmaq](https://github.com/madroidmaq): a Kotlin port of this project
- Python
- [llama2.py](https://github.com/tairov/llama2.py) by @[tairov](https://github.com/tairov): a simple one file pure Python port of this project with zero dependencies
- C#
- [llama2.cs](https://github.com/trrahul/llama2.cs) by @[trrahul](https://github.com/trrahul): a C# port of this project
- WebAssembly
- [icpp-llm](https://github.com/icppWorld/icpp-llm): LLMs for the Internet Computer
- [llama2.c - Llama 2 Everywhere](https://github.com/trholding/llama2.c) by @[trholding](https://github.com/trholding): Standalone, Bootable & Portable Binary Llama 2
- [llama2.c-zh - Bilingual Chinese and English](https://github.com/chenyangMl/llama2.c-zh) by @[chenyangMl](https://github.com/chenyangMl): Expand tokenizer to support training and inference in both Chinese and English
## unsorted todos
- make it easier to add a new dataset with not too much pain
- should calculate freq_cis online in the script run.c instead of loading them
- int4/8 quantization
- export the model in a more sensible output format with a proper header, etc.
- support Llama 2 7B Chat models and tune run.c to Chat UI/UX
- llama2.cu investigate and merge
- (LoRA) finetuning and export of Llama 2 models
## License
MIT

356
README.md
View File

@ -1,4 +1,48 @@
## llama2.c
## llama2.dart
This is a fork of Andrej Karpathy's [llama2.c](https://github.com/karpathy/llama2.c), implemented in (Almost) Pure Dart, except for some args parsing utility library.
### To run :
Instal Dart
```bash
brew tap dart-lang/dart
brew install dart
```
Install the arg parsing dependency
```bash
dart pub add args
```
Download the dataset:
```bash
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories42M.bin
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories110M.bin
```
```bash
dart run run.dart -c ./stories15M.bin -i "PROMPT GOES HERE"
```
## Performance
Dart suprisingly ok performance being a single threaded language, tho it's starting to struggle at 110M:
Tested on M2 Max Chip
| Model | Token/s |
| ----- | ------------ |
| 15M | tok/s: 17.78 |
| 42M | tok/s: 6.43 |
| 110M | tok/s: 2.47 |
### Original README
Extract from the original Repo:
<p align="center">
<img src="assets/llama_cute.jpg" width="300" height="300" alt="Cute Llama">
@ -10,312 +54,4 @@ As the architecture is identical, you can also load and inference Meta's Llama 2
Please note that this repo started recently as a fun weekend project: I took my earlier [nanoGPT](https://github.com/karpathy/nanoGPT), tuned it to implement the Llama-2 architecture instead of GPT-2, and the meat of it was writing the C inference engine in [run.c](run.c). So the project is young and moving quickly. Hat tip to the awesome [llama.cpp](https://github.com/ggerganov/llama.cpp) for inspiring this project. Compred to llama.cpp, I wanted something super simple, minimal, and educational so I chose to hard-code the Llama 2 architecture and just roll one inference file of pure C with no dependencies.
## feel the magic
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/karpathy/llama2.c/blob/master/run.ipynb)
First, navigate to the folder when you keep your projects and clone this repository to this folder:
```bash
git clone https://github.com/karpathy/llama2.c.git
```
Then, open the repository folder:
```bash
cd llama2.c
```
Now, let's just run a baby Llama 2 model in C. You need a model checkpoint. Download this 15M parameter model I trained on the [TinyStories](https://huggingface.co/datasets/roneneldan/TinyStories) dataset (~60MB download):
```bash
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin
```
Compile and run the C code:
```bash
make run
./run stories15M.bin
```
You'll see the text stream a sample. On my M1 MacBook Air this runs at ~110 tokens/s. See [performance](#performance) or the Makefile for compile flags that can significantly speed this up. We can also try a bit bigger 42M parameter model:
```bash
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories42M.bin
./run stories42M.bin
```
This still runs at interactive rates and samples more coherent and diverse stories:
> Once upon a time, there was a little girl named Lily. She loved playing with her toys on top of her bed. One day, she decided to have a tea party with her stuffed animals. She poured some tea into a tiny teapot and put it on top of the teapot. Suddenly, her little brother Max came into the room and wanted to join the tea party too. Lily didn't want to share her tea and she told Max to go away. Max started to cry and Lily felt bad. She decided to yield her tea party to Max and they both shared the teapot. But then, something unexpected happened. The teapot started to shake and wiggle. Lily and Max were scared and didn't know what to do. Suddenly, the teapot started to fly towards the ceiling and landed on the top of the bed. Lily and Max were amazed and they hugged each other. They realized that sharing was much more fun than being selfish. From that day on, they always shared their tea parties and toys.
You can also prompt the model with a prefix or a number of additional command line arguments, e.g. to sample at temperature 0.8 for 256 steps and with a prompt:
```bash
./run stories42M.bin -t 0.8 -n 256 -i "One day, Lily met a Shoggoth"
```
> One day, Lily met a Shoggoth. He was very shy, but was also very generous. Lily said “Hello Shoggy! Can I be your friend?” Shoggy was happy to have a friend and said “Yes, lets explore the universe together!” So they set off on a journey to explore the universe. As they travelled, Shoggy was happy to explain to Lily about all the wonderful things in the universe. At the end of the day, Lily and Shoggy had gathered lots of wonderful things from the universe, and they both felt very proud. They promised to explore the universe as one big pair and to never stop being generous to each other.
There is also an even better 110M param model available, see [models](#models).
Quick note on sampling, the recommendation for ~best results is to sample with `-t 1.0 -p 0.9`, i.e. temperature 1.0 (default) but also top-p sampling at 0.9 (default). Intuitively, top-p ensures that tokens with tiny probabilities do not get sampled, so we can't get "unlucky" during sampling, and we are less likely to go "off the rails" afterwards. More generally, to control the diversity of samples use either the temperature (i.e. vary `-t` between 0 and 1 and keep top-p off with `-p 0`) or the top-p value (i.e. vary `-p` between 0 and 1 and keep `-t 1`), but not both. Nice explainers on LLM sampling strategies include [this](https://peterchng.com/blog/2023/05/02/token-selection-strategies-top-k-top-p-and-temperature/), [this](https://docs.cohere.com/docs/controlling-generation-with-top-k-top-p) or [this](https://huggingface.co/blog/how-to-generate).
## Meta's Llama 2 models
As the neural net architecture is identical, we can also inference the Llama 2 models released by Meta. Sadly there is a bit of friction here due to licensing (I can't directly upload the checkpoints, I think). So Step 1, get the Llama 2 checkpoints by following the [Meta instructions](https://github.com/facebookresearch/llama). Once we have those checkpoints, we have to convert them into the llama2.c format.
For this we need to install the python dependencies (`pip install -r requirements.txt`) and then use the `export_meta_llama_bin.py` file, e.g. for 7B model:
```bash
python export_meta_llama_bin.py path/to/llama/model/7B llama2_7b.bin
```
The export will take ~10 minutes or so and generate a 26GB file (the weights of the 7B model in float32) called `llama2_7b.bin` in the current directory. It has been [reported](https://github.com/karpathy/llama2.c/pull/85) that despite efforts, the 13B export currently doesn't work for unknown reasons (accepting PRs for fix). We can run the model as normal:
```bash
./run llama2_7b.bin
```
This ran at about 4 tokens/s compiled with [OpenMP](#OpenMP) on 96 threads on my CPU Linux box in the cloud. (On my MacBook Air M1, currently it's closer to 30 seconds per token if you just build with `make runfast`.) Example output:
> The purpose of this document is to highlight the state-of-the-art of CoO generation technologies, both recent developments and those in commercial use. The focus is on the technologies with the highest merit to become the dominating processes of the future and therefore to be technologies of interest to S&amp;T ... R&amp;D. As such, CoO generation technologies developed in Russia, Japan and Europe are described in some depth. The document starts with an introduction to cobalt oxides as complex products and a short view on cobalt as an essential material. The document continues with the discussion of the available CoO generation processes with respect to energy and capital consumption as well as to environmental damage.
base models... ¯\\_(ツ)_/¯. Since we can inference the base model, it should be possible to also inference the chat model quite easily, and have a conversation with it. And if we can find a way to run 7B more efficiently, we can start adding LoRA to our training script, and going wild with finetunes all within the repo!
## models
For the sake of examples of smaller, from-scratch models, I trained a small model series on TinyStories. All of these trained in a few hours on my training setup (4X A100 40GB GPUs). The 110M took around 24 hours. I am hosting them on huggingface hub [tinyllamas](https://huggingface.co/karpathy/tinyllamas), both in the original PyTorch .pt, and also in the llama2.c format .bin:
| model | dim | n_layers | n_heads | n_kv_heads | max context length | parameters | val loss | download
| --- | --- | --- | --- | --- | --- | --- | --- | --- |
| 260K | 64 | 5 | 8 | 4 | 512 | 260K | 1.297 | [stories260K](https://huggingface.co/karpathy/tinyllamas/tree/main/stories260K)
| OG | 288 | 6 | 6 | 6 | 256 | 15M | 1.072 | [stories15M.bin](https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin) |
| 42M| 512 | 8 | 8 | 8 | 1024 | 42M | 0.847 | [stories42M.bin](https://huggingface.co/karpathy/tinyllamas/resolve/main/stories42M.bin) |
| 110M| 768 | 12 | 12 | 12 | 1024 | 110M | 0.760 | [stories110M.bin](https://huggingface.co/karpathy/tinyllamas/resolve/main/stories110M.bin) |
You'll notice that the 110M model is equivalent to GPT-1 in size. Alternatively, this is also the smallest model in the GPT-2 series (`GPT-2 small`), except the max context length is only 1024 instead of 2048. The only notable changes from GPT-1/2 architecture is that Llama uses RoPE relatively positional embeddings instead of absolute/learned positional embeddings, a bit more fancy SwiGLU non-linearity in the MLP, RMSNorm instead of LayerNorm, bias=False on all Linear layers, and is optionally multiquery (but this is not yet supported in llama2.c).
## training
Let's see how we can train a baby Llama 2 from scratch using the code in this repo. First let's download and pretokenize some source dataset, e.g. I like [TinyStories](https://huggingface.co/datasets/roneneldan/TinyStories) so this is the only example currently available in this repo. But it should be very easy to add datasets, see the code.
```bash
python tinystories.py download
python tinystories.py pretokenize
```
Then train our model:
```bash
python train.py
```
**brief training guide**. See the train.py script for more exotic launches and hyperparameter overrides. Here is a brief guide to how to set the parameters. Look at the table at the very end of the [Chinchilla paper](https://arxiv.org/abs/2203.15556) to get a sense of how the Transformer parameters (dim, n_layers, n_heads) grow or shrink together. Extrapolate/interpolate this pattern to get bigger or smaller transformers. Set the max context length however you wish, depending on the problem: this should be the max number of tokens that matter to predict the next token. E.g. Llama 2 uses 2048. Next, you want the _total_ batch size per update (printed by the script as "tokens per iteration will be:") to be somewhere around 100K tokens for medium-sized applications. For tiny applications it could be lower, for large training (e.g. GPTs/LLamas) it is usually ~0.5M, or even more. You get there by first maxing out the batch_size to whatever your system allows (e.g. mine was 16 in a recent run because after that my GPU runs out of memory), and then you want to increase gradient_accumulation_steps to be as high as necessary to reach the total batch size of ~100K. Finally, you want to tune your learning_rate (LR). You want this to be as high as your training allows. Very small networks can get away with a large LR (e.g. 1e-3 or even higher). Large networks need lower LRs. 3e-4 is a safe choice in most medium-sized applications, but can be too low for small networks, so try to increase it! Finally, max_iters is the length of training. Play with different settings. I mostly only ever tune these parameters and leave most of the others unchanged. Here is an example of how I trained the 110M model, which I don't think is anywhere near optimal, but looked sensible to me: dim 768, n_layers 12, n_heads 12 (so size of each head is 768 / 12 = 64 channels), seq len of 1024, batch size 16 (this is the most that fit my A100 40GB GPU), gradient_accumulation_steps = 8 was needed to get total tokens batch size to be 16 batch size * 1024 tokens in sequence * 8 grad_accum = 131,072 tokens per update. Good. Learning rate 4e-4 (probably a little too low). max_iters 200K (probably a bit too high). Dropout 0.1, as that usually helps a bit at medium size. That was it. I ran using Distributed Data Parallel (DDP) on 4 GPUs on my cloud machine, training took ~day or so.
Totally understand if you want to skip model training, for simple demo just download one of the pretrained models (see [models](#models) section), e.g.:
```bash
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin
```
Once we have the model.bin file, we can inference in C. Compile the C code first:
```bash
make run
```
You can now run it simply as
```bash
./run stories15M.bin
```
Watch the tokens stream by, fun! We can also run the PyTorch inference script for a comparison. Download one of the models again from huggingface hub and point the `sample.py` script at it:
```bash
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.pt -P out15M
python sample.py --checkpoint=out15M/stories15M.pt
```
Which gives the same results.
## custom tokenizers
In everything above, we've assumed the custom Lllama 2 tokenizer with 32,000 tokens. However, in many boutique LLMs, using vocabulary this big might be an overkill. If you have a small application you have in mind, you might be much better off training your own tokenizers. This can make everything nicer - with smaller vocabs your model has fewer parameters (because the token embedding table is a lot smaller), the inference is faster (because there are fewer tokens to predict), and your average sequence length per example could also get smaller (because the compression is a lot more efficient on your data). So let's see how we train a custom tokenizer.
By default, to pretokenize the tinystories dataset we had to run, in order:
```
python tinystories.py download
python tinystories.py pretokenize
```
The `pretokenize` stage here loads the Llama 2 tokenizer (vocab size 32,000) and uses it to convert the downloaded text into integers, and saves that to file. We now change this as follows, to train an example 4096-token tokenizer:
```
python tinystories.py download
python tinystories.py train_vocab --vocab_size=4096
python tinystories.py pretokenize --vocab_size=4096
```
The `train_vocab` stage will call the `train_vocab.sh` script, which calls the `sentencepiece` library to train the tokenizer, storing it in a new file `data/tok4096.model`. I tried to reproduce as well as I could the settings that (I think) Meta used to train their vocabulary. This uses the Byte Pair Encoding algorithm that starts out with raw utf8 byte sequences of the text data and then iteratively merges the most common consecutive pairs of tokens to form the vocabulary. Inspect the `tinystories.py` file - the custom tokenizers are stored in a special directory structure indexed by the vocab size.
A quick note of interest is that vocab size of 4096 trained specifically on tinystories creates integer sequences with about the same sequence length per example as the default Llama 2 tokenizer of 32000 tokens! This means that our custom, tailored tokenizer is a lot better adapted to our specific text, and can compress it very effectively. So our trained models are smaller and faster.
Now that we have pretokenized the dataset with our custom tokenizer, we can train the model. The training script `train.py` doesn't care about the exact tokens, it only cares about the vocabulary size so it can correctly initialize the model. So when training your model, make sure to pass in
```
python train.py --vocab_source=custom --vocab_size=4096
```
(The defaults are `llama2` and `32000` respectively, which indicates the default Llama 2 tokenizer). This trains the model. Finally we are ready to run inference with our `run.c` script. For that we need two things. Number one, we have to export our tokenizer in the `.bin` format, do that with:
```
python tokenizer.py --tokenizer-model=data/tok4096.model
```
This writes the tokenizer to `data/tok4096.bin`. Now we can run inference, pointing it to this tokenizer using the `-z` flag:
```
./run out/model.bin -z data/tok4096.bin
```
This should print the samples. If you leave out the `-z` flag, it will use the default Llama 2 tokenizer, which would generate a good sequence of integers, but they would get translated using a different vocabulary to text, so it would look like gibberish.
## performance
There are many ways to potentially speed up this code depending on your system. Have a look at the [Makefile](Makefile), which contains a lot of notes. The `make run` command currently uses the `-O3` optimization by default, i.e.:
```bash
gcc -O3 -o run run.c -lm
```
-O3 includes optimizations that are expensive in terms of compile time and memory usage. Including vectorization, loop unrolling, and predicting branches.
To get a much better performance, try to compile with `make runfast`. This turns on the `-Ofast` flag, which includes additional optimizations that may break compliance with the C/IEEE specifications, in addition to `-O3`. See [the GCC docs](https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html) for more information.
Try `-march=native` to compile the program to use the architecture of the machine you're compiling on rather than a more generic CPU. This may enable additional optimizations and hardware-specific tuning such as improved vector instructions/width.
The fastest throughput I saw so far on my MacBook Air (M1) so far is with `make runfast`.
You can also experiment with replacing `gcc` with `clang`.
If compiling with gcc, try experimenting with `-funroll-all-loops`, see PR [#183](https://github.com/karpathy/llama2.c/pull/183)
### OpenMP
Big improvements can also be achieved by compiling with OpenMP, which "activates" the `#pragma omp parallel for` inside the matmul and attention, allowing the work in the loops to be split up over multiple processors.
You'll need to install the OpenMP library and the clang compiler first (e.g. `apt install clang libomp-dev` on ubuntu). Then you can compile with `make runomp`, which does:
```bash
clang -Ofast -fopenmp -march=native run.c -lm -o run
```
When you run inference make sure to use OpenMP flags to set the number of threads, e.g.:
```bash
OMP_NUM_THREADS=4 ./run out/model.bin
```
Depending on your system resources you may want to tweak these hyperparameters and use more threads. But more is not always better, usually this is a bit U shaped.
## platforms
On **Windows**, use `build_msvc.bat` in a Visual Studio Command Prompt to build with msvc, or you can use `make win64` to use mingw compiler toolchain from linux or windows to build the windows target. MSVC build will automatically use openmp and max threads appropriate for your CPU unless you set `OMP_NUM_THREADS` env.
On **Centos 7**, **Amazon Linux 2018** use `rungnu` Makefile target: `make rungnu` or `make runompgnu` to use openmp.
On **Mac**, use clang from brew for openmp build. Install clang as `brew install llvm` and use the installed clang binary to compile with openmp: `make runomp CC=/opt/homebrew/opt/llvm/bin/clang`
## tests
You can run tests simply with pytest:
```bash
$ pip install pytest
$ pytest
```
This will currently invoke two tests inside `test_all.py`, which forward the model in both C and Python for 200 steps and check the output against a known good expected output. The tests currently run in only a few seconds, but will have to download and cache the stories260K models in a temporary `test` directory (only ~2MB download).
## ack
I trained the llama2.c storyteller models on a 4X A100 40GB box graciously provided by the excellent [Lambda labs](https://lambdalabs.com/service/gpu-cloud), thank you.
## discord
Figured it's possible to reuse my existing discord channel (that I use for my [zero to hero youtube series](https://karpathy.ai/zero-to-hero.html)), see #llama2c channel on [discord](https://discord.gg/3zy8kqD9Cp), for any quick questions, related discussions, etc.
## contributing
A few words on this repo and the kinds of PRs that are likely to be accepted. What is the goal of this repo? Basically I think there will be a lot of interest in training or finetuning custom micro-LLMs (think ~100M - ~1B params, but let's say up to ~10B params) across a large diversity of applications, and deploying them in edge-adjacent environments (think MCUs, phones, web browsers, laptops, etc.). I'd like this repo to be the simplest, smallest, most hackable repo to support this workflow, both training and inference. In particular, this repo is not a complex framework with a 1000 knobs controlling inscrutible code across a nested directory structure of hundreds of files. Instead, I expect most applications will wish to create a fork of this repo and hack it to their specific needs and deployment platforms.
People who care about deployment efficiency above all else should look at [llama.cpp](https://github.com/ggerganov/llama.cpp). This repo still cares about efficiency, but not at the cost of simplicity, readability or portability. Basically, I expect that a lot of people come to this repo because the training code is 2 readable .py files and the inference code is 500 lines of C. So I'd like this to continue to be a kind of simplest "reference implementation" that can be easily hacked in a separate fork into whatever downstream application people are excited about. It shouldn't be full-featured. It shouldn't take 100 different options or settings. It shouldn't be the most efficient. A few examples:
- someone re-ordered two loops to improve data locality for a small efficieny win => instant merge.
- someone added the one line "pragma omp parallel for", which allows you to compile with OpenMP and dramatically speed up the code, or acts as just a comment if you don't compile it that way => instant merge.
- bug fixes and touchups etc. => happy to merge
A few examples of PRs are that are not an excellent fit:
- adding more than several #ifdefs all over the place in code. If they are localized / few, might be okay.
- adding a lot of code that is very specific to some specific platform (e.g. MCUs, or some special version of linux or processor). These may be a better fit for forks of the project, and I am very happy to maintain a list of these forks in section below.
- adding hundreds of lines of code to run.c that are only active in specific scenarios or platforms.
If your candidate PRs have elements of these it doesn't mean they won't get merged, it just means they will make it into the gray territory. TLDR: I am eager to merge any mostly small, mostly localized, broadly applicable, clean changes that improve the efficiency and portability of the repo, while keep its hackability and readability. I appreciate all PRs seeking to help me improve the project, thank you! <3.
## notable forks
- Rust
- [llama2.rs](https://github.com/gaxler/llama2.rs) by @[gaxler](https://github.com/gaxler): a Rust port of this project
- [llama2.rs](https://github.com/leo-du/llama2.rs) by @[leo-du](https://github.com/leo-du): A Rust port of this project
- [llama2-rs](https://github.com/danielgrittner/llama2-rs) by @[danielgrittner](https://github.com/danielgrittner): a Rust port of this project
- [llama2.rs](https://github.com/lintian06/llama2.rs) by @[lintian06](https://github.com/lintian06): A Rust port of this project
- Go
- [go-llama2](https://github.com/tmc/go-llama2) by @[tmc](https://github.com/tmc): a Go port of this project
- [llama2.go](https://github.com/nikolaydubina/llama2.go) by @[nikolaydubina](https://github.com/nikolaydubina): a Go port of this project
- [llama2.go](https://github.com/haormj/llama2.go) by @[haormj](https://github.com/haormj): a Go port of this project
- [llama2.go](https://github.com/saracen/llama2.go) by @[saracen](https://github.com/saracen): a Go port of this project
- Android
- [llama2.c-android](https://github.com/Manuel030/llama2.c-android): by @[Manuel030](https://github.com/Manuel030): adds Android binaries of this project
- [llama2.c-android-wrapper](https://github.com/celikin/llama2.c-android-wrapper): by @[celikin](https://github.com/celikin): added JNI wrapper, PoC
- C++
- [llama2.cpp](https://github.com/leloykun/llama2.cpp) by @[leloykun](https://github.com/leloykun): a C++ port of this project
- JavaScript
- [llama2.js](https://github.com/epicure/llama2.js) by @[epicure](https://github.com/epicure): a JavaScript port of this project
- [llama2.ts](https://github.com/wizzard0/llama2.ts) by @[oleksandr_now](https://twitter.com/oleksandr_now): a TypeScript port of this project. Full Llama2-7B capable.
- [llama2.c-emscripten](https://github.com/gohai/llama2.c-emscripten) by @[gohai](https://github.com/gohai): Emscripten (JavaScript) port, based on @ggerganov's initial prototype
- Zig
- [llama2.zig](https://github.com/cgbur/llama2.zig) by @[cgbur](https://github.com/cgbur): A Zig port of this project
- [llama2.zig](https://github.com/vodkaslime/llama2.zig) by @[vodkaslime](https://github.com/vodkaslime): a Zig port of this project
- [llama2.zig](https://github.com/clebert/llama2.zig) by @[clebert](https://github.com/clebert): a Zig port of this project
- Julia
- [llama2.jl](https://github.com/juvi21/llama2.jl) by @[juvi21](https://github.com/juvi21): a Julia port of this project
- Scala
- [llama2.scala](https://github.com/jrudolph/llama2.scala) by @[jrudolph](https://github.com/jrudolph): a Scala port of this project
- Java
- [llama2.java](https://github.com/mukel/llama2.java) by @[mukel](https://github.com/mukel): a Java port of this project
- Kotlin
- [llama2.kt](https://github.com/madroidmaq/llama2.kt) by @[madroidmaq](https://github.com/madroidmaq): a Kotlin port of this project
- Python
- [llama2.py](https://github.com/tairov/llama2.py) by @[tairov](https://github.com/tairov): a simple one file pure Python port of this project with zero dependencies
- C#
- [llama2.cs](https://github.com/trrahul/llama2.cs) by @[trrahul](https://github.com/trrahul): a C# port of this project
- WebAssembly
- [icpp-llm](https://github.com/icppWorld/icpp-llm): LLMs for the Internet Computer
- [llama2.c - Llama 2 Everywhere](https://github.com/trholding/llama2.c) by @[trholding](https://github.com/trholding): Standalone, Bootable & Portable Binary Llama 2
- [llama2.c-zh - Bilingual Chinese and English](https://github.com/chenyangMl/llama2.c-zh) by @[chenyangMl](https://github.com/chenyangMl): Expand tokenizer to support training and inference in both Chinese and English
## unsorted todos
- make it easier to add a new dataset with not too much pain
- should calculate freq_cis online in the script run.c instead of loading them
- int4/8 quantization
- export the model in a more sensible output format with a proper header, etc.
- support Llama 2 7B Chat models and tune run.c to Chat UI/UX
- llama2.cu investigate and merge
- (LoRA) finetuning and export of Llama 2 models
## License
MIT
Please refer to [Original README](/ORIGINAL.md) or the upstream repo for more information on llama2.c

View File

@ -1 +0,0 @@
cl.exe /fp:fast /Ox /openmp /I. run.c win.c

13
pubspec.lock Normal file
View File

@ -0,0 +1,13 @@
# Generated by pub
# See https://dart.dev/tools/pub/glossary#lockfile
packages:
args:
dependency: "direct main"
description:
name: args
sha256: eef6c46b622e0494a36c5a12d10d77fb4e855501a91c1b9ef9339326e58f0596
url: "https://pub.dev"
source: hosted
version: "2.4.2"
sdks:
dart: ">=3.1.0 <4.0.0"

10
pubspec.yaml Normal file
View File

@ -0,0 +1,10 @@
name: llama2.dart
description: A one file implementation of llama2 inference
version: 1.0.0
environment:
sdk: ^3.1.0
# Add regular dependencies here.
dependencies:
args: ^2.4.2

740
run.c
View File

@ -1,740 +0,0 @@
/* Inference for Llama-2 Transformer model in pure C */
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <time.h>
#include <math.h>
#include <string.h>
#include <fcntl.h>
#if defined _WIN32
#include "win.h"
#else
#include <unistd.h>
#include <sys/mman.h>
#endif
// ----------------------------------------------------------------------------
// Transformer and RunState structs, and related memory management
typedef struct {
int dim; // transformer dimension
int hidden_dim; // for ffn layers
int n_layers; // number of layers
int n_heads; // number of query heads
int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery)
int vocab_size; // vocabulary size, usually 256 (byte-level)
int seq_len; // max sequence length
} Config;
typedef struct {
// token embedding table
float* token_embedding_table; // (vocab_size, dim)
// weights for rmsnorms
float* rms_att_weight; // (layer, dim) rmsnorm weights
float* rms_ffn_weight; // (layer, dim)
// weights for matmuls. note dim == n_heads * head_size
float* wq; // (layer, dim, n_heads * head_size)
float* wk; // (layer, dim, n_kv_heads * head_size)
float* wv; // (layer, dim, n_kv_heads * head_size)
float* wo; // (layer, n_heads * head_size, dim)
// weights for ffn
float* w1; // (layer, hidden_dim, dim)
float* w2; // (layer, dim, hidden_dim)
float* w3; // (layer, hidden_dim, dim)
// final rmsnorm
float* rms_final_weight; // (dim,)
// freq_cis for RoPE relatively positional embeddings (not used anymore)
float* freq_cis_real; // (seq_len, head_size/2)
float* freq_cis_imag; // (seq_len, head_size/2)
// (optional) classifier weights for the logits, on the last layer
float* wcls;
} TransformerWeights;
typedef struct {
float prob;
int index;
} ProbIndex; // struct used when sorting probabilities during top-p sampling
typedef struct {
// current wave of activations
float *x; // activation at current time stamp (dim,)
float *xb; // same, but inside a residual branch (dim,)
float *xb2; // an additional buffer just for convenience (dim,)
float *hb; // buffer for hidden dimension in the ffn (hidden_dim,)
float *hb2; // buffer for hidden dimension in the ffn (hidden_dim,)
float *q; // query (dim,)
float *k; // key (dim,)
float *v; // value (dim,)
float *att; // buffer for scores/attention values (n_heads, seq_len)
float *logits; // output logits
ProbIndex *probindex; // buffer used in top-p sampling
// kv cache
float* key_cache; // (layer, seq_len, dim)
float* value_cache; // (layer, seq_len, dim)
} RunState;
void malloc_run_state(RunState* s, Config* p) {
// we calloc instead of malloc to keep valgrind happy
int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
s->x = calloc(p->dim, sizeof(float));
s->xb = calloc(p->dim, sizeof(float));
s->xb2 = calloc(p->dim, sizeof(float));
s->hb = calloc(p->hidden_dim, sizeof(float));
s->hb2 = calloc(p->hidden_dim, sizeof(float));
s->q = calloc(p->dim, sizeof(float));
s->k = calloc(kv_dim, sizeof(float));
s->v = calloc(kv_dim, sizeof(float));
s->att = calloc(p->n_heads * p->seq_len, sizeof(float));
s->logits = calloc(p->vocab_size, sizeof(float));
s->probindex = calloc(p->vocab_size, sizeof(ProbIndex));
s->key_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
s->value_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));
// ensure all mallocs went fine
if (!s->x || !s->xb || !s->xb2 || !s->hb || !s->hb2 || !s->q
|| !s->k || !s->v || !s->att || !s->logits || !s->key_cache
|| !s->value_cache || !s->probindex) {
fprintf(stderr, "malloc failed!\n");
exit(EXIT_FAILURE);
}
}
void free_run_state(RunState* s) {
free(s->x);
free(s->xb);
free(s->xb2);
free(s->hb);
free(s->hb2);
free(s->q);
free(s->k);
free(s->v);
free(s->att);
free(s->logits);
free(s->probindex);
free(s->key_cache);
free(s->value_cache);
}
// ----------------------------------------------------------------------------
// initialization: read from checkpoint
void checkpoint_init_weights(TransformerWeights *w, Config* p, float* ptr, int shared_weights) {
int head_size = p->dim / p->n_heads;
w->token_embedding_table = ptr;
ptr += p->vocab_size * p->dim;
w->rms_att_weight = ptr;
ptr += p->n_layers * p->dim;
w->wq = ptr;
ptr += p->n_layers * p->dim * (p->n_heads * head_size);
w->wk = ptr;
ptr += p->n_layers * p->dim * (p->n_kv_heads * head_size);
w->wv = ptr;
ptr += p->n_layers * p->dim * (p->n_kv_heads * head_size);
w->wo = ptr;
ptr += p->n_layers * (p->n_heads * head_size) * p->dim;
w->rms_ffn_weight = ptr;
ptr += p->n_layers * p->dim;
w->w1 = ptr;
ptr += p->n_layers * p->dim * p->hidden_dim;
w->w2 = ptr;
ptr += p->n_layers * p->hidden_dim * p->dim;
w->w3 = ptr;
ptr += p->n_layers * p->dim * p->hidden_dim;
w->rms_final_weight = ptr;
ptr += p->dim;
w->freq_cis_real = ptr;
ptr += p->seq_len * head_size / 2;
w->freq_cis_imag = ptr;
ptr += p->seq_len * head_size / 2;
w->wcls = shared_weights ? w->token_embedding_table : ptr;
}
// ----------------------------------------------------------------------------
// neural net blocks
void rmsnorm(float* o, float* x, float* weight, int size) {
// calculate sum of squares
float ss = 0.0f;
for (int j = 0; j < size; j++) {
ss += x[j] * x[j];
}
ss /= size;
ss += 1e-5f;
ss = 1.0f / sqrtf(ss);
// normalize and scale
for (int j = 0; j < size; j++) {
o[j] = weight[j] * (ss * x[j]);
}
}
void softmax(float* x, int size) {
// find max value (for numerical stability)
float max_val = x[0];
for (int i = 1; i < size; i++) {
if (x[i] > max_val) {
max_val = x[i];
}
}
// exp and sum
float sum = 0.0f;
for (int i = 0; i < size; i++) {
x[i] = expf(x[i] - max_val);
sum += x[i];
}
// normalize
for (int i = 0; i < size; i++) {
x[i] /= sum;
}
}
void matmul(float* xout, float* x, float* w, int n, int d) {
// W (d,n) @ x (n,) -> xout (d,)
// by far the most amount of time is spent inside this little function
int i;
#pragma omp parallel for private(i)
for (i = 0; i < d; i++) {
float val = 0.0f;
for (int j = 0; j < n; j++) {
val += w[i * n + j] * x[j];
}
xout[i] = val;
}
}
void transformer(int token, int pos, Config* p, RunState* s, TransformerWeights* w) {
// a few convenience variables
float *x = s->x;
int dim = p->dim;
int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;
int kv_mul = p->n_heads / p->n_kv_heads; // integer multiplier of the kv sharing in multiquery
int hidden_dim = p->hidden_dim;
int head_size = dim / p->n_heads;
// copy the token embedding into x
float* content_row = &(w->token_embedding_table[token * dim]);
memcpy(x, content_row, dim*sizeof(*x));
// forward all the layers
for(int l = 0; l < p->n_layers; l++) {
// attention rmsnorm
rmsnorm(s->xb, x, w->rms_att_weight + l*dim, dim);
// qkv matmuls for this position
matmul(s->q, s->xb, w->wq + l*dim*dim, dim, dim);
matmul(s->k, s->xb, w->wk + l*dim*kv_dim, dim, kv_dim);
matmul(s->v, s->xb, w->wv + l*dim*kv_dim, dim, kv_dim);
// RoPE relative positional encoding: complex-valued rotate q and k in each head
for (int i = 0; i < dim; i+=2) {
int head_dim = i % head_size;
float freq = 1.0f / powf(10000.0f, head_dim / (float)head_size);
float val = pos * freq;
float fcr = cosf(val);
float fci = sinf(val);
int rotn = i < kv_dim ? 2 : 1; // how many vectors? 2 = q & k, 1 = q only
for (int v = 0; v < rotn; v++) {
float* vec = v == 0 ? s->q : s->k; // the vector to rotate (query or key)
float v0 = vec[i];
float v1 = vec[i+1];
vec[i] = v0 * fcr - v1 * fci;
vec[i+1] = v0 * fci + v1 * fcr;
}
}
// save key,value at this time step (pos) to our kv cache
int loff = l * p->seq_len * kv_dim; // kv cache layer offset for convenience
float* key_cache_row = s->key_cache + loff + pos * kv_dim;
float* value_cache_row = s->value_cache + loff + pos * kv_dim;
memcpy(key_cache_row, s->k, kv_dim * sizeof(*key_cache_row));
memcpy(value_cache_row, s->v, kv_dim * sizeof(*value_cache_row));
// multihead attention. iterate over all heads
int h;
#pragma omp parallel for private(h)
for (h = 0; h < p->n_heads; h++) {
// get the query vector for this head
float* q = s->q + h * head_size;
// attention scores for this head
float* att = s->att + h * p->seq_len;
// iterate over all timesteps, including the current one
for (int t = 0; t <= pos; t++) {
// get the key vector for this head and at this timestep
float* k = s->key_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
// calculate the attention score as the dot product of q and k
float score = 0.0f;
for (int i = 0; i < head_size; i++) {
score += q[i] * k[i];
}
score /= sqrtf(head_size);
// save the score to the attention buffer
att[t] = score;
}
// softmax the scores to get attention weights, from 0..pos inclusively
softmax(att, pos + 1);
// weighted sum of the values, store back into xb
float* xb = s->xb + h * head_size;
memset(xb, 0, head_size * sizeof(float));
for (int t = 0; t <= pos; t++) {
// get the value vector for this head and at this timestep
float* v = s->value_cache + loff + t * kv_dim + (h / kv_mul) * head_size;
// get the attention weight for this timestep
float a = att[t];
// accumulate the weighted value into xb
for (int i = 0; i < head_size; i++) {
xb[i] += a * v[i];
}
}
}
// final matmul to get the output of the attention
matmul(s->xb2, s->xb, w->wo + l*dim*dim, dim, dim);
// residual connection back into x
for (int i = 0; i < dim; i++) {
x[i] += s->xb2[i];
}
// ffn rmsnorm
rmsnorm(s->xb, x, w->rms_ffn_weight + l*dim, dim);
// Now for FFN in PyTorch we have: self.w2(F.silu(self.w1(x)) * self.w3(x))
// first calculate self.w1(x) and self.w3(x)
matmul(s->hb, s->xb, w->w1 + l*dim*hidden_dim, dim, hidden_dim);
matmul(s->hb2, s->xb, w->w3 + l*dim*hidden_dim, dim, hidden_dim);
// F.silu; silu(x)=x*σ(x),where σ(x) is the logistic sigmoid
for (int i = 0; i < hidden_dim; i++) {
s->hb[i] = s->hb[i] * (1.0f / (1.0f + expf(-s->hb[i])));
}
// elementwise multiply with w3(x)
for (int i = 0; i < hidden_dim; i++) {
s->hb[i] = s->hb[i] * s->hb2[i];
}
// final matmul to get the output of the ffn
matmul(s->xb, s->hb, w->w2 + l*dim*hidden_dim, hidden_dim, dim);
// residual connection
for (int i = 0; i < dim; i++) {
x[i] += s->xb[i];
}
}
// final rmsnorm
rmsnorm(x, x, w->rms_final_weight, dim);
// classifier into logits
matmul(s->logits, x, w->wcls, p->dim, p->vocab_size);
}
// ----------------------------------------------------------------------------
// byte pair encoding (BPE) tokenizer, encodes strings into tokens so we can prompt
typedef struct {
char *str;
int id;
} TokenIndex;
int compare_tokens(const void *a, const void *b) {
return strcmp(((TokenIndex*)a)->str, ((TokenIndex*)b)->str);
}
int str_lookup(char *str, TokenIndex *sorted_vocab, int vocab_size) {
// efficiently find the perfect match for str in vocab, return its index or -1 if not found
TokenIndex tok = { .str = str }; // acts as the key to search for
TokenIndex *res = bsearch(&tok, sorted_vocab, vocab_size, sizeof(TokenIndex), compare_tokens);
return res != NULL ? res->id : -1;
}
void bpe_encode(char *text, char **vocab, float *vocab_scores, int vocab_size, unsigned int max_token_length, int *tokens, int *n_tokens) {
// sort vocabulary
TokenIndex *sorted_vocab = malloc(vocab_size * sizeof(TokenIndex));
for (int i = 0; i < vocab_size; i++) {
sorted_vocab[i].str = vocab[i];
sorted_vocab[i].id = i;
}
qsort(sorted_vocab, vocab_size, sizeof(TokenIndex), compare_tokens);
// create a temporary buffer that will store merge candidates of always two consecutive tokens
char* str_buffer = malloc((max_token_length*2 +1 +2) * sizeof(char)); // *2 for concat, +1 for null terminator +2 for UTF8 (in case max_token_lenght is 1)
size_t str_len = 0;
// add_dummy_prefix is true by default
tokens[0] = str_lookup(" ", sorted_vocab, vocab_size);
*n_tokens = 1; // the number of tokens
// Okay UTF-8 time. This will get messy. Here is the reference from Wikipedia:
// Code point ↔ UTF-8 conversion
// First code point Last code point Byte 1 Byte 2 Byte 3 Byte 4
// U+0000 U+007F 0xxxxxxx
// U+0080 U+07FF 110xxxxx 10xxxxxx
// U+0800 U+FFFF 1110xxxx 10xxxxxx 10xxxxxx
// U+10000 U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
// process the raw (UTF-8) byte sequence of the input string
for (char *c = text; *c != '\0'; c++) {
// reset buffer if the current byte is ASCII or a leading byte
// 0xC0 is 11000000, so (*c & 0xC0) keeps the first 2 bits and zeros the rest
// 0x80 is 10000000
// in UTF-8, all continuation bytes start with "10" in first two bits
// so in English this is: "if this byte is not a continuation byte"
if ((*c & 0xC0) != 0x80) {
// this byte must be either a leading byte (11...) or an ASCII char (0x...)
// => reset our location, as we're starting a new UTF-8 codepoint
str_len = 0;
}
// append the current byte to the buffer
str_buffer[str_len++] = *c; // ++ is post-increment, incremented after this line
str_buffer[str_len] = '\0';
// while the next character is a continuation byte, continue appending
// but if there are too many of them, just stop to avoid overruning str_buffer size.
if ((*(c+1) & 0xC0) == 0x80 && str_len < 4) {
continue;
}
// ok c+1 is not a continuation byte, so we've read in a full codepoint
int id = str_lookup(str_buffer, sorted_vocab, vocab_size);
if (id != -1) {
// we found this codepoint in vocab, add it as a token
tokens[(*n_tokens)++] = id;
} else {
// byte_fallback encoding: just encode each byte as a token
// +3 is here because the first 3 vocab elements are <unk>, <s>, </s>
// so the individual bytes only start at index 3
for (int i=0; i < str_len; i++) {
tokens[(*n_tokens)++] = (unsigned char)str_buffer[i] + 3;
}
}
str_len = 0; // protect against a sequence of stray UTF8 continuation bytes
}
// merge the best consecutive pair each iteration, according the scores in vocab_scores
while (1) {
float best_score = -1e10;
int best_id = -1;
int best_idx = -1;
for (int i=0; i < (*n_tokens-1); i++) {
// check if we can merge the pair (tokens[i], tokens[i+1])
sprintf(str_buffer, "%s%s", vocab[tokens[i]], vocab[tokens[i+1]]);
int id = str_lookup(str_buffer, sorted_vocab, vocab_size);
if (id != -1 && vocab_scores[id] > best_score) {
// this merge pair exists in vocab! record its score and position
best_score = vocab_scores[id];
best_id = id;
best_idx = i;
}
}
if (best_idx == -1) {
break; // we couldn't find any more pairs to merge, so we're done
}
// merge the consecutive pair (best_idx, best_idx+1) into new token best_id
tokens[best_idx] = best_id;
// delete token at position best_idx+1, shift the entire sequence back 1
for (int i = best_idx+1; i < (*n_tokens-1); i++) {
tokens[i] = tokens[i+1];
}
(*n_tokens)--; // token length decreased
}
free(str_buffer);
free(sorted_vocab);
}
// ----------------------------------------------------------------------------
// utilities: time / rng
long time_in_ms() {
// return time in milliseconds, for benchmarking the model speed
struct timespec time;
clock_gettime(CLOCK_REALTIME, &time);
return time.tv_sec * 1000 + time.tv_nsec / 1000000;
}
unsigned long long rng_seed;
unsigned int random_u32() {
// xorshift rng: https://en.wikipedia.org/wiki/Xorshift#xorshift.2A
rng_seed ^= rng_seed >> 12;
rng_seed ^= rng_seed << 25;
rng_seed ^= rng_seed >> 27;
return (rng_seed * 0x2545F4914F6CDD1Dull) >> 32;
}
float random_f32() { // random float32 in [0,1)
return (random_u32() >> 8) / 16777216.0f;
}
// ----------------------------------------------------------------------------
// sampling can be done in a few ways: greedy argmax, sampling, top-p sampling
int argmax(float* probabilities, int n) {
// return the index that has the highest probability
int max_i = 0;
float max_p = probabilities[0];
for (int i = 1; i < n; i++) {
if (probabilities[i] > max_p) {
max_i = i;
max_p = probabilities[i];
}
}
return max_i;
}
int sample(float* probabilities, int n) {
// sample index from probabilities (they must sum to 1!)
float r = random_f32();
float cdf = 0.0f;
for (int i = 0; i < n; i++) {
cdf += probabilities[i];
if (r < cdf) {
return i;
}
}
return n - 1; // in case of rounding errors
}
int compare(const void* a, const void* b) {
ProbIndex* a_ = (ProbIndex*) a;
ProbIndex* b_ = (ProbIndex*) b;
if (a_->prob > b_->prob) return -1;
if (a_->prob < b_->prob) return 1;
return 0;
}
int sample_topp(float* probabilities, int n, float topp, ProbIndex* probindex) {
// top-p sampling (or "nucleus sampling") samples from the smallest set of
// tokens that exceed probability topp. This way we never sample tokens that
// have very low probabilities and are less likely to go "off the rails".
int n0 = 0;
// quicksort indices in descending order of probabilities
// values smaller than (1 - topp) / (n - 1) cannot be part of the result
// so for efficiency we crop these out as candidates before sorting
const float cutoff = (1.0f - topp) / (n - 1);
for (int i = 0; i < n; i++) {
if (probabilities[i] >= cutoff) {
probindex[n0].index = i;
probindex[n0].prob = probabilities[i];
n0++;
}
}
qsort(probindex, n0, sizeof(ProbIndex), compare);
// truncate the list where cumulative probability exceeds topp
float cumulative_prob = 0.0f;
int last_idx = n0 - 1; // in case of rounding errors consider all elements
for (int i = 0; i < n0; i++) {
cumulative_prob += probindex[i].prob;
if (cumulative_prob > topp) {
last_idx = i;
break; // we've exceeded topp by including last_idx
}
}
// sample from the truncated list
float r = random_f32() * cumulative_prob;
float cdf = 0.0f;
for (int i = 0; i <= last_idx; i++) {
cdf += probindex[i].prob;
if (r < cdf) {
return probindex[i].index;
}
}
return probindex[last_idx].index; // in case of rounding errors
}
// ----------------------------------------------------------------------------
// int main
void error_usage() {
fprintf(stderr, "Usage: run <checkpoint> [options]\n");
fprintf(stderr, "Example: run model.bin -n 256 -i \"Once upon a time\"\n");
fprintf(stderr, "Options:\n");
fprintf(stderr, " -t <float> temperature, default 1.0\n");
fprintf(stderr, " -p <float> p value in top-p (nucleus) sampling. default 0.9\n");
fprintf(stderr, " -s <int> random seed, default time(NULL)\n");
fprintf(stderr, " -n <int> number of steps to run for, default 256. 0 = max_seq_len\n");
fprintf(stderr, " -i <string> input prompt\n");
fprintf(stderr, " -z <string> optional path to custom tokenizer\n");
exit(EXIT_FAILURE);
}
int main(int argc, char *argv[]) {
// default inits
char *checkpoint = NULL; // e.g. out/model.bin
char *tokenizer = "tokenizer.bin";
float temperature = 1.0f; // 0.0 = greedy deterministic. 1.0 = original. don't set higher
float topp = 0.9f; // top-p in nucleus sampling. 1.0 = off. 0.9 works well, but slower
rng_seed = 0; // seed rng with time by default
int steps = 256; // number of steps to run for
char *prompt = NULL; // prompt string
// poor man's C argparse so we can override the defaults above from the command line
if (argc >= 2) { checkpoint = argv[1]; } else { error_usage(); }
for (int i = 2; i < argc; i+=2) {
// do some basic validation
if (i + 1 >= argc) { error_usage(); } // must have arg after flag
if (argv[i][0] != '-') { error_usage(); } // must start with dash
if (strlen(argv[i]) != 2) { error_usage(); } // must be -x (one dash, one letter)
// read in the args
if (argv[i][1] == 't') { temperature = atof(argv[i + 1]); }
else if (argv[i][1] == 'p') { topp = atof(argv[i + 1]); }
else if (argv[i][1] == 's') { rng_seed = atoi(argv[i + 1]); }
else if (argv[i][1] == 'n') { steps = atoi(argv[i + 1]); }
else if (argv[i][1] == 'i') { prompt = argv[i + 1]; }
else if (argv[i][1] == 'z') { tokenizer = argv[i + 1]; }
else { error_usage(); }
}
if(rng_seed == 0) { rng_seed = (unsigned int)time(NULL);}
// read in the model.bin file
Config config;
TransformerWeights weights;
int fd = 0; // file descriptor for memory mapping
float* data = NULL; // memory mapped data pointer
ssize_t file_size; // size of the checkpoint file in bytes
{
FILE *file = fopen(checkpoint, "rb");
if (!file) { fprintf(stderr, "Couldn't open file %s\n", checkpoint); return 1; }
// read in the config header
if (fread(&config, sizeof(Config), 1, file) != 1) { return 1; }
// negative vocab size is hacky way of signaling unshared weights. bit yikes.
int shared_weights = config.vocab_size > 0 ? 1 : 0;
config.vocab_size = abs(config.vocab_size);
// figure out the file size
fseek(file, 0, SEEK_END); // move file pointer to end of file
file_size = ftell(file); // get the file size, in bytes
fclose(file);
// memory map the Transformer weights into the data pointer
fd = open(checkpoint, O_RDONLY); // open in read only mode
if (fd == -1) { fprintf(stderr, "open failed!\n"); return 1; }
data = mmap(NULL, file_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (data == MAP_FAILED) { fprintf(stderr, "mmap failed!\n"); return 1; }
float* weights_ptr = data + sizeof(Config)/sizeof(float);
checkpoint_init_weights(&weights, &config, weights_ptr, shared_weights);
}
// right now we cannot run for more than config.seq_len steps
if (steps <= 0 || steps > config.seq_len) { steps = config.seq_len; }
// read in the tokenizer .bin file
char** vocab = (char**)malloc(config.vocab_size * sizeof(char*));
float* vocab_scores = (float*)malloc(config.vocab_size * sizeof(float));
unsigned int max_token_length;
{
FILE *file = fopen(tokenizer, "rb");
if (!file) { fprintf(stderr, "couldn't load %s\n", tokenizer); return 1; }
if (fread(&max_token_length, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); return 1; }
int len;
for (int i = 0; i < config.vocab_size; i++) {
if (fread(vocab_scores + i, sizeof(float), 1, file) != 1) { fprintf(stderr, "failed read\n"); return 1;}
if (fread(&len, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); return 1; }
vocab[i] = (char *)malloc(len + 1);
if (fread(vocab[i], len, 1, file) != 1) { fprintf(stderr, "failed read\n"); return 1; }
vocab[i][len] = '\0'; // add the string terminating token
}
fclose(file);
}
// create and init the application RunState
RunState state;
malloc_run_state(&state, &config);
// process the prompt, if any
int *prompt_tokens = NULL;
int num_prompt_tokens = 0;
if (prompt != NULL) {
prompt_tokens = (int*)malloc((strlen(prompt)+1) * sizeof(int));
bpe_encode(prompt, vocab, vocab_scores, config.vocab_size, max_token_length, prompt_tokens, &num_prompt_tokens);
}
// start the main loop
long start = 0; // used to time our code, only initialized after first iteration
int next; // will store the next token in the sequence
int token = 1; // init with token 1 (=BOS), as done in Llama-2 sentencepiece tokenizer
int pos = 0; // position in the sequence
while (pos < steps) {
// forward the transformer to get logits for the next token
transformer(token, pos, &config, &state, &weights);
// advance the state state machine
if(pos < num_prompt_tokens) {
// if we are still processing the input prompt, force the next prompt token
next = prompt_tokens[pos];
} else {
// sample the next token
if (temperature == 0.0f) {
// greedy argmax sampling: take the token with the highest probability
next = argmax(state.logits, config.vocab_size);
} else {
// apply the temperature to the logits
for (int q=0; q<config.vocab_size; q++) { state.logits[q] /= temperature; }
// apply softmax to the logits to get the probabilities for next token
softmax(state.logits, config.vocab_size);
// we sample from this distribution to get the next token
if (topp <= 0 || topp >= 1) {
// simply sample from the predicted probability distribution
next = sample(state.logits, config.vocab_size);
} else {
// top-p (nucleus) sampling, clamping the least likely tokens to zero
next = sample_topp(state.logits, config.vocab_size, topp, state.probindex);
}
}
}
pos++;
// data-dependent terminating condition: the BOS (1) token delimits sequences
if (next == 1) { break; }
// following BOS (1) token, sentencepiece decoder strips any leading whitespace (see PR #89)
char *token_str = (token == 1 && vocab[next][0] == ' ') ? vocab[next]+1 : vocab[next];
// careful, some tokens designate raw bytes, and look like e.g. '<0x01>'
unsigned char byte_val;
if (sscanf(token_str, "<0x%02hhX>", &byte_val) == 1) {
// ok this token is a raw byte token, carefuly to only print printable chars or whitespace
// some of the other bytes can be various control codes, backspace, etc. => skip
if (isprint(byte_val) || isspace(byte_val)) {
char byte_piece[2];
byte_piece[0] = byte_val;
byte_piece[1] = '\0';
printf("%s", byte_piece);
}
} else {
printf("%s", token_str);
}
fflush(stdout);
token = next;
// init the timer here because the first iteration can be slower
if (start == 0) { start = time_in_ms(); }
}
printf("\n");
// report achieved tok/s (pos-1 because the timer starts after first iteration)
if (pos > 1) {
long end = time_in_ms();
fprintf(stderr, "achieved tok/s: %f\n", (pos-1) / (double)(end-start)*1000);
}
// memory and file handles cleanup
free_run_state(&state);
for (int i = 0; i < config.vocab_size; i++) { free(vocab[i]); }
free(vocab);
free(vocab_scores);
if (prompt_tokens != NULL) free(prompt_tokens);
if (data != MAP_FAILED) munmap(data, file_size);
if (fd != -1) close(fd);
return 0;
}

799
run.dart Normal file
View File

@ -0,0 +1,799 @@
import 'dart:convert';
import 'dart:developer';
import 'dart:io';
import 'dart:math';
import 'dart:typed_data';
import 'package:args/args.dart';
class Config {
// transformer dimension
late int dim;
// for ffn layers
late int hidden_dim;
// number of layers
late int n_layers;
// number of query heads
late int n_heads;
// number of key/value heads (can be < query heads because of multiquery)
late int n_kv_heads;
// vocabulary size, usually 256 (byte-level)
late int vocab_size;
// max sequence length
late int seq_len;
@override
String toString() {
return "Config(dim: $dim, hidden_dim: $hidden_dim, n_layers: $n_layers, n_heads: $n_heads, n_kv_heads: $n_kv_heads, vocab_size: $vocab_size, seq_len: $seq_len)";
}
}
const configByteSize = 7 * 4;
//We are using 32 bit percision floats here
class TransformerWeights {
// token embedding table
late Float32List token_embedding_table; // (vocab_size, dim)
// weights for rmsnorms
late Float32List rms_att_weight; // (layer, dim) rmsnorm weights
late Float32List rms_ffn_weight; // (layer, dim)
// weights for matmuls. note dim == n_heads * head_size
late Float32List wq; // (layer, dim, n_heads * head_size)
late Float32List wk; // (layer, dim, n_kv_heads * head_size)
late Float32List wv; // (layer, dim, n_kv_heads * head_size)
late Float32List wo; // (layer, n_heads * head_size, dim)
// weights for ffn
late Float32List w1; // (layer, hidden_dim, dim)
late Float32List w2; // (layer, dim, hidden_dim)
late Float32List w3; // (layer, hidden_dim, dim)
// final rmsnorm
late Float32List rms_final_weight; // (dim,)
// freq_cis for RoPE relatively positional embeddings
late Float32List freq_cis_real; // (seq_len, head_size/2)
late Float32List freq_cis_imag; // (seq_len, head_size/2)
// (optional) classifier weights for the logits, on the last layer
late Float32List wcls;
}
class ProbIndex {
double prob;
int index;
ProbIndex(this.prob, this.index);
}
class TokenIndex {
String str;
int id;
TokenIndex(this.str, this.id);
}
class RunState {
// current wave of activations
late Float32List x; // activation at current time stamp (dim,)
late Float32List xb; // same, but inside a residual branch (dim,)
late Float32List xb2; // an additional buffer just for convenience (dim,)
late Float32List hb; // buffer for hidden dimension in the ffn (hidden_dim,)
late Float32List hb2; // buffer for hidden dimension in the ffn (hidden_dim,)
late Float32List q; // query (dim,)
late Float32List k; // key (dim,)
late Float32List v; // value (dim,)
late Float32List att; // buffer for scores/attention values (n_heads, seq_len)
late Float32List logits; // output logits
late List<ProbIndex> probindex; // buffer used in top-p sampling
// kv cache
late Float32List key_cache; // (layer, seq_len, dim)
late Float32List value_cache; // (layer, seq_len, dim)
}
initialize_run_state(RunState s, Config config) {
// we calloc instead of malloc to keep valgrind happy
int kv_dim = (config.dim * config.n_kv_heads) ~/ config.n_heads;
s.x = Float32List(config.dim);
s.xb = Float32List(config.dim);
s.xb2 = Float32List(config.dim);
s.hb = Float32List(config.hidden_dim);
s.hb2 = Float32List(config.hidden_dim);
s.q = Float32List(config.dim);
s.k = Float32List(kv_dim);
s.v = Float32List(kv_dim);
s.att = Float32List(config.n_heads * config.seq_len);
s.logits = Float32List(config.vocab_size);
s.probindex = [];
s.key_cache = Float32List(config.n_layers * config.seq_len * kv_dim);
s.value_cache = Float32List(config.n_layers * config.seq_len * kv_dim);
}
class Tokenizer {
List<String> vocab;
List<double> vocab_scores;
Tokenizer(
this.vocab,
this.vocab_scores,
);
bpe_encode(String text, List<int> tokens, int n_tokens) {
tokens = [];
// First pass, combine raw tokens
text.runes.forEach((element) {
String decoded = utf8.decode([element]);
if (vocab.contains(decoded)) {
tokens.add(vocab.indexOf(decoded));
}
});
// Second pass, combine bpe tokens
while (true) {
double best_score = -1e10;
int best_id = -1;
int best_index = -1;
for (int i = 0; i < tokens.length - 1; i++) {
String newStr = vocab[tokens[i]] + vocab[tokens[i + 1]];
int newStrIndex = vocab.indexOf(newStr);
if (newStrIndex != -1 && vocab_scores[newStrIndex] > best_score) {
best_score = vocab_scores[newStrIndex];
best_id = newStrIndex;
best_index = i;
}
}
if (best_index == -1) break;
tokens[best_index] = best_id;
tokens.removeAt(best_index + 1);
}
return tokens;
}
}
// ----------------------------------------------------------------------------
// sampling can be done in a few ways: greedy argmax, sampling, top-p sampling
int argmax(Float32List probabilities) {
// return the index that has the highest probability
int max_i = 0;
double max_p = probabilities[0];
for (int i = 1; i < probabilities.length; i++) {
if (probabilities[i] > max_p) {
max_i = i;
max_p = probabilities[i];
}
}
return max_i;
}
int sample(Float32List probabilities) {
// sample index from probabilities (they must sum to 1!)
double r = Random().nextDouble();
double cdf = 0.0;
for (int i = 0; i < probabilities.length; i++) {
cdf += probabilities[i];
if (r < cdf) return i;
}
return probabilities.length - 1; // in case of rounding errors
}
int sample_topp(Float32List probabilities, double topp) {
// top-p sampling (or "nucleus sampling") samples from the smallest set of
// tokens that exceed probability topp. This way we never sample tokens that
// have very low probabilities and are less likely to go "off the rails".
// quicksort indices in descending order of probabilities
// values smaller than (1 - topp) / (n - 1) cannot be part of the result
// In the original llama.c they crop these out as candidates before sorting
List<ProbIndex> probindex = [];
double cutoff = (1.0 - topp) / (probabilities.length - 1);
for (int i = 0; i < probabilities.length; i++) {
if (probabilities[i] >= cutoff) {
probindex.add(ProbIndex(probabilities[i], i));
}
}
probindex.sort((a, b) => b.prob.compareTo(a.prob));
// truncate the list where cumulative probability exceeds topp
double cumulative_prob = 0.0;
int last_idx =
probindex.length - 1; // in case of rounding errors consider all elements
for (int i = 0; i < probindex.length; i++) {
cumulative_prob += probindex[i].prob;
if (cumulative_prob > topp) {
last_idx = i;
break; // we've exceeded topp by including last_idx
}
}
probindex.removeRange(last_idx + 1, probindex.length);
// sample from the truncated list
double r = new Random().nextDouble() * cumulative_prob;
double cdf = 0.0;
for (int i = 0; i <= last_idx; i++) {
cdf += probindex[i].prob;
if (r < cdf) {
return probindex[i].index;
}
}
return probindex[last_idx].index; // in case of rounding errors
}
rmsnorm(Float32List out, Float32List x, Float32List weight) {
assert(out.length == x.length);
assert(x.length == weight.length);
// calculate sum of squares
double ss = 0.0;
x.forEach((element) {
ss += element * element;
});
ss /= x.length;
ss += 1e-5;
ss = 1.0 / sqrt(ss); // sqr mean sum of squares
// normalize and scale
for (int j = 0; j < x.length; j++) {
out[j] = weight[j] * (ss * x[j]);
}
}
void softmax(Float32List x, int size) {
// find max value (for numerical stability)
double max_val = x[0];
for (int i = 1; i < size; i++) {
if (x[i] > max_val) {
max_val = x[i];
}
}
// exp and sum
double sum = 0.0;
for (int i = 0; i < size; i++) {
x[i] = exp(x[i] - max_val);
sum += x[i];
}
// normalize
for (int i = 0; i < size; i++) x[i] /= sum;
}
void matmul(Float32List out, Float32List x, Float32List w, int n, int d) {
assert(out.length == d);
assert(x.length == n);
assert(w.length == n * d);
// W (d,n) @ x (n,) -> xout (d,)
// by far the most amount of time is spent inside this little function
for (int i = 0; i < d; i++) {
double val = 0.0;
for (int j = 0; j < n; j++) {
val += w[i * n + j] * x[j];
}
out[i] = val;
}
}
transformer(int token, int pos, Config config, RunState state,
TransformerWeights weights) {
int dim = config.dim;
int kv_dim = config.dim * config.n_kv_heads ~/ config.n_heads;
int kv_mul = config.n_kv_heads ~/
config.n_heads; // integer multiplier of the kv sharing in multiquery
int hidden_dim = config.hidden_dim;
int head_size = config.dim ~/ config.n_heads;
// copy the token embedding into x
Float32List current_row = Float32List.sublistView(
weights.token_embedding_table,
token * config.dim,
(token + 1) * config.dim);
for (int i = 0; i < config.dim; i++) state.x[i] = current_row[i];
// Note: Divide by 2 here because Rope Parameters repeat after every 2 dimensions
Float32List freq_cis_real_row = weights.freq_cis_real
.sublist(pos * head_size ~/ 2, (pos + 1) * head_size ~/ 2);
Float32List freq_cis_imag_row = weights.freq_cis_imag
.sublist(pos * head_size ~/ 2, (pos + 1) * head_size ~/ 2);
// forward all the layers
for (int l = 0; l < config.n_layers; l++) {
rmsnorm(
state.xb,
state.x,
Float32List.sublistView(
weights.rms_att_weight, l * dim, (l + 1) * dim));
// qkv matmuls for this position
// NOTE:yiming This look slike a place for lots of paralle work :thinking:
// x = x @ wq, wq with dim * dim
matmul(
state.q,
state.xb,
Float32List.sublistView(weights.wq, l * dim * dim, (l + 1) * dim * dim),
dim,
dim);
// x = x @ wk, wq with dim * kv_dim
matmul(
state.k,
state.xb,
Float32List.sublistView(
weights.wk, l * dim * kv_dim, (l + 1) * dim * kv_dim),
dim,
kv_dim);
// x = x @ wv, wq with dim * kv_dim
matmul(
state.v,
state.xb,
Float32List.sublistView(
weights.wv, l * dim * kv_dim, (l + 1) * dim * kv_dim),
dim,
kv_dim);
// RoPE relative positional encoding: complex-valued rotate q and k by freq_cis in each head
// https://arxiv.org/pdf/2104.09864v4.pdf
// We are just reusing the loop for k and q distance calculation
for (int v = 0; v < 2; v++) {
Float32List vec =
v == 0 ? state.q : state.k; // the vector to rotate (query or key)
int vec_size = v == 0 ? dim : kv_dim; // the size of the vector
// We are only rotating in a group of 2
for (int i = 0; i < vec_size; i += 2) {
double v0 = vec[i];
double v1 = vec[i + 1];
double fcr = freq_cis_real_row[(i % head_size) ~/ 2];
double fci = freq_cis_imag_row[(i % head_size) ~/ 2];
// See the RoPE paper for this section
// 3.4.2 Computational efficient realization of rotary matrix multiplication
// x1 = x1 + cos mθ_1 - x2 sin mθ_1
vec[i] = v0 * fcr - v1 * fci;
// x2 = x1 sin mθ_1 + x2 + cos mθ_1
vec[i + 1] = v0 * fci + v1 * fcr;
}
}
// save key,value at this time step (pos) to our kv cache
// offset by n_layer * seq_len * kv_dim
int loff =
l * config.seq_len * kv_dim; // kv cache layer offset for convenience
// key cache = loff + pos * kv_dim
int key_cache_row_offset = loff + pos * kv_dim;
// save k,v into kv cache
for (int i = 0; i < state.k.length; i++)
state.key_cache[key_cache_row_offset + i] = state.k[i];
for (int i = 0; i < state.v.length; i++)
state.value_cache[key_cache_row_offset + i] = state.v[i];
// multihead attention. iterate over all heads
for (int h = 0; h < config.n_heads; h++) {
// get the query vector for this head
Float32List q =
Float32List.sublistView(state.q, h * head_size, (h + 1) * head_size);
// attention scores for this head
Float32List att = Float32List.sublistView(
state.att, h * config.seq_len, (h + 1) * config.seq_len);
// iterate over all timesteps, including the current one
for (int t = 0; t <= pos; t++) {
// get the key vector for this head and at this timestep
// kv_mul is just 1 now
int key_cache_offset = loff +
t * kv_dim +
(h ~/ kv_mul) *
head_size; // it's still offset by head size kv_dim = head_size * h!
// but sometimes multiple head can share a key_cache
Float32List k = Float32List.sublistView(
state.key_cache, key_cache_offset, key_cache_offset + kv_dim);
// calculate the attention score as the dot product of q and k
double score = 0.0;
for (int ll = 0; ll < head_size; ll++) {
score += q[ll] * k[ll];
}
// TODO(yiming): reread the paper to understand better
score /= sqrt(head_size);
// save the score to the attention buffer
att[t] = score;
}
// softmax the scores to get attention weights, from 0..pos inclusively
// soft max happens before attention * v
// softmax is done on the entire attention
// I think there's some trick in pytorch for this
softmax(att, pos + 1);
// Now we have calculated the weighted attention vector, it's time to apply attention value
// weighted sum of the values, store back into xb
// Clear out xb for the next stage
for (int i = 0; i < head_size; i++) {
state.xb[h * head_size + i] = 0.0;
}
Float32List xb_off =
Float32List.sublistView(state.xb, h * head_size, (h + 1) * head_size);
for (int t = 0; t <= pos; t++) {
// get the value vector for this head and at this timestep
int v_cache_offset = loff + t * kv_dim + (h ~/ kv_mul) * head_size;
Float32List v = Float32List.sublistView(
state.value_cache, v_cache_offset, v_cache_offset + head_size);
// get the attention weight for this timestep
double a = att[t];
// accumulate the weighted value into xb
for (int i = 0; i < head_size; i++) {
xb_off[i] += a * v[i];
}
}
}
// final matmul to get the output of the attention
// The "Aggregate output" of all the attention heads
matmul(
state.xb2,
state.xb,
Float32List.sublistView(weights.wo, l * dim * dim, (l + 1) * dim * dim),
dim,
dim);
// residual connection back into x
for (int i = 0; i < dim; i++) {
state.x[i] += state.xb2[i];
}
// ffn rmsnorm
rmsnorm(
state.xb,
state.x,
Float32List.sublistView(
weights.rms_ffn_weight, l * dim, (l + 1) * dim));
// Now for FFN in PyTorch we have: self.w2(F.silu(self.w1(x)) * self.w3(x))
// first calculate self.w1(x) and self.w3(x)
matmul(
state.hb,
state.xb,
Float32List.sublistView(
weights.w1, (l * dim * hidden_dim), (l + 1) * dim * hidden_dim),
dim,
hidden_dim);
matmul(
state.hb2,
state.xb,
Float32List.sublistView(
weights.w3, (l * dim * hidden_dim), (l + 1) * dim * hidden_dim),
dim,
hidden_dim);
// F.silu; silu(x)=x*σ(x),where σ(x) is the logistic sigmoid
for (int i = 0; i < hidden_dim; i++) {
state.hb[i] = state.hb[i] * (1.0 / (1.0 + exp(-state.hb[i])));
}
// elementwise multiply with w3(x)
// F.silu(self.w1(x)) * self.w3(x)
for (int i = 0; i < hidden_dim; i++) {
state.hb[i] = state.hb[i] * state.hb2[i];
}
// final matmul to get the output of the ffn
// here we are reusing xb again!
// x = self.w2(F.silu(self.w1(x)) * self.w3(x))
matmul(
state.xb,
state.hb,
Float32List.sublistView(
weights.w2, l * dim * hidden_dim, (l + 1) * dim * hidden_dim),
hidden_dim,
dim);
// residual connection
for (int i = 0; i < dim; i++) {
state.x[i] += state.xb[i];
}
}
// final rmsnorm
rmsnorm(state.x, state.x, weights.rms_final_weight);
// classifier into logits
matmul(state.logits, state.x, weights.wcls, config.dim, config.vocab_size);
}
void main(List<String> args) {
String? checkpoint_path = "./stories15M.bin";
String tokenizer_path = "tokenizer.bin";
double temperature = 1.0;
double top_p = 0.9;
int rng_seed = 0; // seed rng with time by default
int steps = 256; // number of steps to run for
String? prompt = " One";
var parser = ArgParser();
parser.addOption(
'checkpoint_path',
abbr: 'c',
callback: (value) => checkpoint_path = value,
);
parser.addOption('temp',
abbr: 't',
callback: (value) =>
{if (value != null) temperature = double.parse(value)},
defaultsTo: "1.0");
parser.addOption('topp',
abbr: 'p',
callback: (value) => {if (value != null) top_p = double.parse(value)},
defaultsTo: "0.9");
parser.addOption('seed',
abbr: 's',
callback: (value) => {if (value != null) rng_seed = int.parse(value)},
defaultsTo: "0");
parser.addOption('steps',
abbr: 'n',
callback: (value) => {if (value != null) steps = int.parse(value)},
defaultsTo: "256");
parser.addOption('prompt',
abbr: 'i',
callback: (value) => {if (value != null) prompt = value},
defaultsTo: "");
parser.addOption('tokenizer_path',
abbr: 'z',
callback: (value) => {if (value != null) tokenizer_path = value});
parser.parse(args);
if (rng_seed == 0) rng_seed = Timeline.now;
print("===========llama2.dart===========");
print("check_point_path: $checkpoint_path");
print("tokenizer_path: $tokenizer_path");
print("temperature: $temperature");
print("top_p: $top_p");
print("rng_seed: $rng_seed");
print("steps: $steps");
print("prompt: $prompt");
var config = Config();
var weights = TransformerWeights();
if (checkpoint_path == null) return print("No checkpoint path provided");
print("========= Reading Weights =========");
// Read Weights and Config from file
{
Uint8List checkpoint_bytes = File(checkpoint_path!).readAsBytesSync();
print("Read ${checkpoint_bytes.length} bytes from $checkpoint_path");
{
// Reading Config
Uint8List config_bytes = checkpoint_bytes.sublist(0, configByteSize);
Int32List config_ints = config_bytes.buffer.asInt32List();
config.dim = config_ints[0];
config.hidden_dim = config_ints[1];
config.n_layers = config_ints[2];
config.n_heads = config_ints[3];
config.n_kv_heads = config_ints[4];
config.vocab_size = config_ints[5];
config.seq_len = config_ints[6];
print("Read Config: $config");
}
{
bool shared_weights = config.vocab_size > 0;
// negative vocab size is hacky way of signaling unshared weights. bit yikes.
config.vocab_size = config.vocab_size.abs();
// Load the weights
int offset = 0;
Float32List weight_floats =
checkpoint_bytes.buffer.asFloat32List(configByteSize);
int head_size = config.dim ~/ config.n_heads;
weights.token_embedding_table = weight_floats.sublist(
offset, offset + config.vocab_size * config.dim);
offset += config.vocab_size * config.dim;
print(
"Read ${weights.token_embedding_table.lengthInBytes} bytes into token_embedding_table");
weights.rms_att_weight =
weight_floats.sublist(offset, offset + config.n_layers * config.dim);
offset += config.n_layers * config.dim;
print(
"Read ${weights.rms_att_weight.lengthInBytes} bytes into rms_att_weight");
weights.wq = weight_floats.sublist(offset,
offset + config.n_layers * config.dim * config.n_heads * head_size);
offset += config.n_layers * config.dim * config.n_heads * head_size;
print("Read ${weights.wq.lengthInBytes} bytes into wq");
weights.wk = weight_floats.sublist(
offset,
offset +
config.n_layers * config.dim * config.n_kv_heads * head_size);
offset += config.n_layers * config.dim * config.n_kv_heads * head_size;
print("Read ${weights.wk.lengthInBytes} bytes into wk");
weights.wv = weight_floats.sublist(
offset,
offset +
config.n_layers * config.dim * config.n_kv_heads * head_size);
offset += config.n_layers * config.dim * config.n_kv_heads * head_size;
print("Read ${weights.wv.lengthInBytes} bytes into wv");
weights.wo = weight_floats.sublist(offset,
offset + config.n_layers * config.n_heads * head_size * config.dim);
offset += config.n_layers * config.n_heads * head_size * config.dim;
print("Read ${weights.wo.lengthInBytes} bytes into wo");
weights.rms_ffn_weight =
weight_floats.sublist(offset, offset + config.n_layers * config.dim);
offset += config.n_layers * config.dim;
print(
"Read ${weights.rms_ffn_weight.lengthInBytes} bytes into rms_ffn_weight");
weights.w1 = weight_floats.sublist(
offset, offset + config.n_layers * config.hidden_dim * config.dim);
offset += config.n_layers * config.hidden_dim * config.dim;
print("Read ${weights.w1.lengthInBytes} bytes into w1");
weights.w2 = weight_floats.sublist(
offset, offset + config.n_layers * config.dim * config.hidden_dim);
offset += config.n_layers * config.dim * config.hidden_dim;
print("Read ${weights.w2.lengthInBytes} bytes into w2");
weights.w3 = weight_floats.sublist(
offset, offset + config.n_layers * config.hidden_dim * config.dim);
offset += config.n_layers * config.hidden_dim * config.dim;
print("Read ${weights.w3.lengthInBytes} bytes into w3");
weights.rms_final_weight =
weight_floats.sublist(offset, offset + config.dim);
offset += config.dim;
print(
"Read ${weights.rms_final_weight.lengthInBytes} bytes into rms_final_weight");
weights.freq_cis_real = weight_floats.sublist(
offset, offset + config.seq_len * head_size ~/ 2);
offset += config.seq_len * head_size ~/ 2;
print(
"Read ${weights.freq_cis_real.lengthInBytes} bytes into freq_cis_real");
weights.freq_cis_imag = weight_floats.sublist(
offset, offset + config.seq_len * head_size ~/ 2);
offset += config.seq_len * head_size ~/ 2;
print(
"Read ${weights.freq_cis_imag.lengthInBytes} bytes into freq_cis_imag");
if (shared_weights) {
print("Read shared weights into wcls");
weights.wcls = weights.token_embedding_table;
} else {
weights.wcls = weight_floats.sublist(
offset, offset + config.vocab_size * config.dim);
offset += config.dim;
print("Read ${weights.wcls.lengthInBytes} bytes into wcls");
}
}
}
// clamp number of steps to supported range
if (steps <= 0 || steps > config.seq_len) {
steps = config.seq_len;
}
// read in the tokenizer .bin file
List<Uint8List> vocab = new List.filled(
config.vocab_size, new Uint8List(0)); // config.vocab_size;
Float32List vocab_scores = new Float32List(config.vocab_size);
{
ByteData tokenizer_bytes =
File(tokenizer_path).readAsBytesSync().buffer.asByteData(0);
int offset = 0;
// Not being used but read anyways
int max_token_length = tokenizer_bytes.getUint32(offset, Endian.little);
offset += 4;
int next_str_length = 0;
for (int i = 0; i < config.vocab_size; i++) {
double score = tokenizer_bytes.getFloat32(offset, Endian.little);
offset += 4;
next_str_length = tokenizer_bytes.getUint32(offset, Endian.little);
offset += 4;
Uint8List next_chunk =
tokenizer_bytes.buffer.asUint8List(offset, next_str_length);
vocab_scores[i] = score;
offset += next_str_length;
vocab[i] = next_chunk;
}
}
print("=====beginning generation=====");
Tokenizer tokenizer;
tokenizer =
Tokenizer(vocab.map((e) => utf8.decode(e)).toList(), vocab_scores);
// process the prompt, if any
List<int> prompt_tokens = [];
int num_prompt_tokens = 0;
if (prompt != null) {
prompt_tokens =
tokenizer.bpe_encode(prompt!, prompt_tokens, num_prompt_tokens);
}
RunState state = RunState();
initialize_run_state(state, config);
// Finally! the main loop
// used to time our code, only initialized after first iteration
int start = 0;
int next; // will store the next token in the sequence
// init with token 1 (=BOS), as done in Llama-2 sentencepiece tokenizer
int token = 1;
int pos = 0; // position in the sequence
while (pos < steps) {
// transformer! Run the model
transformer(token, pos, config, state, weights);
// advance the state state machine
if (pos < prompt_tokens.length) {
// if we are still processing the input prompt, force the next prompt token
next = prompt_tokens[pos];
} else {
// sample the next token
if (temperature == 0.0) {
// greedy argmax sampling: take the token with the highest probability
next = argmax(state.logits);
} else {
// apply the temperature to the logits
for (int q = 0; q < config.vocab_size; q++) {
state.logits[q] /= temperature;
}
// apply softmax to the logits to get the probabilities for next token
softmax(state.logits, state.logits.length);
// we sample from this distribution to get the next token
if (top_p <= 0 || top_p >= 1) {
// simply sample from the predicted probability distribution
next = sample(state.logits);
} else {
// top-p (nucleus) sampling, clamping the least likely tokens to zero
next = sample_topp(state.logits, top_p);
}
}
}
pos++;
// data-dependent terminating condition: the BOS (1) token delimits sequences
if (next == 1) {
break;
}
// following BOS (1) token, sentencepiece decoder strips any leading whitespace (see PR #89)
Uint8List token_str =
(token == 1 && (vocab[next][0] == ' ')) ? vocab[next + 1] : vocab[next];
// careful, some tokens designate raw bytes, and look like e.g. '<0x01>'
String str;
str = utf8.decode(token_str);
// In the original llama2.c they check for a lot of special tokens, but I've only seen this token really being used
// Being a little lazy here Hehe.
if (str == "<0x0A>") {
str = "\n";
}
stdout.write("$str");
token = next;
// init the timer here because the first iteration can be slower
if (start == 0) {
start = DateTime.now().millisecondsSinceEpoch;
}
}
stdout.write("\n");
// report achieved tok/s (pos-1 because the timer starts after first iteration)
if (pos > 1) {
int end = DateTime.now().millisecondsSinceEpoch;
print("achieved tok/s: ${(pos - 1) / (end - start) * 1000} \n");
}
}

View File

@ -1,89 +0,0 @@
"""
Run simply with
$ pytest
"""
import os
import pytest # pip install pytest
import requests
import subprocess
import torch
from model import ModelArgs, Transformer
from tokenizer import Tokenizer
# -----------------------------------------------------------------------------
# test utilities
test_ckpt_dir = "test"
def download_file(url, filename):
print(f"Downloading {url} to {filename}")
response = requests.get(url, stream=True)
response.raise_for_status() # Raise an HTTPError on bad status code
with open(filename, 'wb') as file:
for chunk in response.iter_content(chunk_size=8192):
file.write(chunk)
def attempt_download_files():
os.makedirs(test_ckpt_dir, exist_ok=True)
root_url = "https://huggingface.co/karpathy/tinyllamas/resolve/main/stories260K"
need = ["stories260K.bin", "stories260K.pt", "tok512.bin", "tok512.model"]
for file in need:
url = root_url + '/' + file #os.path.join inserts \\ on windows
filename = os.path.join(test_ckpt_dir, file)
if not os.path.exists(filename):
download_file(url, filename)
expected_stdout = b'Once upon a time, there was a little girl named Lily. She loved to play outside in the park. One day, she saw a big, red ball. She wanted to play with it, but it was too high.\nLily\'s mom said, "Lily, let\'s go to the park." Lily was sad and didn\'t know what to do. She said, "I want to play with your ball, but I can\'t find it."\nLily was sad and didn\'t know what to do. She said, "I\'m sorry, Lily. I didn\'t know what to do."\nLily didn\'t want to help her mom, so she'
# -----------------------------------------------------------------------------
# actual tests
def test_runc():
""" Forwards a model against a known-good desired outcome in run.c for 200 steps"""
attempt_download_files()
model_path = os.path.join(test_ckpt_dir, "stories260K.bin")
tokenizer_path = os.path.join(test_ckpt_dir, "tok512.bin")
command = ["./run", model_path, "-z", tokenizer_path, "-t", "0.0", "-n", "200"]
with open('err.txt', mode='wb') as fe:
with open('stdout.txt', mode='wb') as fo:
proc = subprocess.Popen(command, stdout=fo, stderr=fe) #pipe in windows terminal does funny things like replacing \n with \r\n
proc.wait()
with open('stdout.txt', mode='r') as f:
stdout = f.read()
# strip the very last \n that is added by run.c for aesthetic reasons
stdout = stdout[:-1].encode('ascii')
assert stdout == expected_stdout
def test_python():
""" Forwards a model against a known-good desired outcome in sample.py for 200 steps"""
attempt_download_files()
device = "cpu" # stories260K is small enough to just breeze through it on CPU
checkpoint = os.path.join(test_ckpt_dir, "stories260K.pt")
checkpoint_dict = torch.load(checkpoint, map_location=device)
gptconf = ModelArgs(**checkpoint_dict['model_args'])
model = Transformer(gptconf)
state_dict = checkpoint_dict['model']
unwanted_prefix = '_orig_mod.'
for k,v in list(state_dict.items()):
if k.startswith(unwanted_prefix):
state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
model.load_state_dict(state_dict, strict=False)
model.eval()
model.to(device)
x = torch.tensor([[1]], dtype=torch.long, device=device) # 1 is BOS
with torch.inference_mode():
y = model.generate(x, max_new_tokens=200, temperature=0.0)
pt_tokens = y[0].tolist()
tokenizer_model = os.path.join(test_ckpt_dir, "tok512.model")
enc = Tokenizer(tokenizer_model=tokenizer_model)
text = enc.decode(pt_tokens)
text = text.encode('ascii') # turn into bytes
assert text == expected_stdout

180
win.c
View File

@ -1,180 +0,0 @@
#include "win.h"
#include <errno.h>
#include <io.h>
#ifndef FILE_MAP_EXECUTE
#define FILE_MAP_EXECUTE 0x0020
#endif /* FILE_MAP_EXECUTE */
static int __map_mman_error(const uint32_t err, const int deferr)
{
if (err == 0)
return 0;
//TODO: implement
return err;
}
static uint32_t __map_mmap_prot_page(const int prot)
{
uint32_t protect = 0;
if (prot == PROT_NONE)
return protect;
if ((prot & PROT_EXEC) != 0)
{
protect = ((prot & PROT_WRITE) != 0) ?
PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
}
else
{
protect = ((prot & PROT_WRITE) != 0) ?
PAGE_READWRITE : PAGE_READONLY;
}
return protect;
}
static uint32_t __map_mmap_prot_file(const int prot)
{
uint32_t desiredAccess = 0;
if (prot == PROT_NONE)
return desiredAccess;
if ((prot & PROT_READ) != 0)
desiredAccess |= FILE_MAP_READ;
if ((prot & PROT_WRITE) != 0)
desiredAccess |= FILE_MAP_WRITE;
if ((prot & PROT_EXEC) != 0)
desiredAccess |= FILE_MAP_EXECUTE;
return desiredAccess;
}
void* mmap(void *addr, size_t len, int prot, int flags, int fildes, ssize_t off)
{
HANDLE fm, h;
void * map = MAP_FAILED;
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable: 4293)
#endif
const uint32_t dwFileOffsetLow = (uint32_t)(off & 0xFFFFFFFFL);
const uint32_t dwFileOffsetHigh = (uint32_t)((off >> 32) & 0xFFFFFFFFL);
const uint32_t protect = __map_mmap_prot_page(prot);
const uint32_t desiredAccess = __map_mmap_prot_file(prot);
const ssize_t maxSize = off + (ssize_t)len;
const uint32_t dwMaxSizeLow = (uint32_t)(maxSize & 0xFFFFFFFFL);
const uint32_t dwMaxSizeHigh = (uint32_t)((maxSize >> 32) & 0xFFFFFFFFL);
#ifdef _MSC_VER
#pragma warning(pop)
#endif
errno = 0;
if (len == 0
/* Unsupported flag combinations */
|| (flags & MAP_FIXED) != 0
/* Usupported protection combinations */
|| prot == PROT_EXEC)
{
errno = EINVAL;
return MAP_FAILED;
}
h = ((flags & MAP_ANONYMOUS) == 0) ?
(HANDLE)_get_osfhandle(fildes) : INVALID_HANDLE_VALUE;
if ((flags & MAP_ANONYMOUS) == 0 && h == INVALID_HANDLE_VALUE)
{
errno = EBADF;
return MAP_FAILED;
}
fm = CreateFileMapping(h, NULL, protect, dwMaxSizeHigh, dwMaxSizeLow, NULL);
if (fm == NULL)
{
errno = __map_mman_error(GetLastError(), EPERM);
return MAP_FAILED;
}
map = MapViewOfFile(fm, desiredAccess, dwFileOffsetHigh, dwFileOffsetLow, len);
CloseHandle(fm);
if (map == NULL)
{
errno = __map_mman_error(GetLastError(), EPERM);
return MAP_FAILED;
}
return map;
}
int munmap(void *addr, size_t len)
{
if (UnmapViewOfFile(addr))
return 0;
errno = __map_mman_error(GetLastError(), EPERM);
return -1;
}
int mprotect(void *addr, size_t len, int prot)
{
uint32_t newProtect = __map_mmap_prot_page(prot);
uint32_t oldProtect = 0;
if (VirtualProtect(addr, len, newProtect, &oldProtect))
return 0;
errno = __map_mman_error(GetLastError(), EPERM);
return -1;
}
int msync(void *addr, size_t len, int flags)
{
if (FlushViewOfFile(addr, len))
return 0;
errno = __map_mman_error(GetLastError(), EPERM);
return -1;
}
int mlock(const void *addr, size_t len)
{
if (VirtualLock((LPVOID)addr, len))
return 0;
errno = __map_mman_error(GetLastError(), EPERM);
return -1;
}
int munlock(const void *addr, size_t len)
{
if (VirtualUnlock((LPVOID)addr, len))
return 0;
errno = __map_mman_error(GetLastError(), EPERM);
return -1;
}
// Portable clock_gettime function for Windows
int clock_gettime(int clk_id, struct timespec *tp) {
uint32_t ticks = GetTickCount();
tp->tv_sec = ticks / 1000;
tp->tv_nsec = (ticks % 1000) * 1000000;
return 0;
}

69
win.h
View File

@ -1,69 +0,0 @@
#ifndef _WIN_H_
#define _WIN_H_
#define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
#include <windows.h>
#include <time.h>
#include <stdint.h>
#define ssize_t int64_t
#define ftell _ftelli64
// Below code is originally from mman-win32
//
/*
* sys/mman.h
* mman-win32
*/
#ifndef _WIN32_WINNT // Allow use of features specific to Windows XP or later.
#define _WIN32_WINNT 0x0501 // Change this to the appropriate value to target other versions of Windows.
#endif
/* All the headers include this file. */
#ifndef _MSC_VER
#include <_mingw.h>
#endif
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#define PROT_NONE 0
#define PROT_READ 1
#define PROT_WRITE 2
#define PROT_EXEC 4
#define MAP_FILE 0
#define MAP_SHARED 1
#define MAP_PRIVATE 2
#define MAP_TYPE 0xf
#define MAP_FIXED 0x10
#define MAP_ANONYMOUS 0x20
#define MAP_ANON MAP_ANONYMOUS
#define MAP_FAILED ((void *)-1)
/* Flags for msync. */
#define MS_ASYNC 1
#define MS_SYNC 2
#define MS_INVALIDATE 4
/* Flags for portable clock_gettime call. */
#define CLOCK_REALTIME 0
void* mmap(void *addr, size_t len, int prot, int flags, int fildes, ssize_t off);
int munmap(void *addr, size_t len);
int mprotect(void *addr, size_t len, int prot);
int msync(void *addr, size_t len, int flags);
int mlock(const void *addr, size_t len);
int munlock(const void *addr, size_t len);
int clock_gettime(int clk_id, struct timespec *tp);
#ifdef __cplusplus
};
#endif
#endif /* _WIN_H_ */