faisalhr1997 Illumotion commited on
Commit
833d7c1
0 Parent(s):

Duplicate from Illumotion/Koboldcpp

Browse files

Co-authored-by: Cuong Vu <[email protected]>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .devops/full-cuda.Dockerfile +33 -0
  2. .devops/main-cuda.Dockerfile +32 -0
  3. .editorconfig +19 -0
  4. .flake8 +2 -0
  5. .gitattributes +38 -0
  6. .github/ISSUE_TEMPLATE/custom.md +185 -0
  7. .github/workflows/editorconfig.yml +17 -0
  8. .github/workflows/tidy-post.yml +20 -0
  9. .github/workflows/tidy-review.yml +23 -0
  10. .gitignore +73 -0
  11. .pre-commit-config.yaml +15 -0
  12. CMakeLists.txt +328 -0
  13. Dockerfile +10 -0
  14. LICENSE.md +661 -0
  15. MIT_LICENSE_GGML_LLAMACPP_ONLY +26 -0
  16. Makefile +381 -0
  17. Package.swift +24 -0
  18. README.md +8 -0
  19. Remote-Link.cmd +2 -0
  20. build-info.h +7 -0
  21. clblast.dll +3 -0
  22. convert-lora-to-ggml.py +133 -0
  23. convert-pth-to-ggml.py +13 -0
  24. convert.py +1263 -0
  25. cudart64_110.dll +0 -0
  26. docs/token_generation_performance_tips.md +40 -0
  27. examples/CMakeLists.txt +49 -0
  28. examples/Miku.sh +49 -0
  29. examples/alpaca.sh +19 -0
  30. examples/baby-llama/CMakeLists.txt +4 -0
  31. examples/baby-llama/baby-llama.cpp +1708 -0
  32. examples/benchmark/CMakeLists.txt +7 -0
  33. examples/benchmark/benchmark-matmult.cpp +272 -0
  34. examples/chat-13B.bat +57 -0
  35. examples/chat-13B.sh +41 -0
  36. examples/chat-persistent.sh +151 -0
  37. examples/chat-vicuna.sh +41 -0
  38. examples/chat.sh +16 -0
  39. examples/common.cpp +982 -0
  40. examples/common.h +147 -0
  41. examples/embd-input/.gitignore +4 -0
  42. examples/embd-input/CMakeLists.txt +15 -0
  43. examples/embd-input/README.md +63 -0
  44. examples/embd-input/embd-input-lib.cpp +223 -0
  45. examples/embd-input/embd-input-test.cpp +35 -0
  46. examples/embd-input/embd-input.h +28 -0
  47. examples/embd-input/embd_input.py +71 -0
  48. examples/embd-input/llava.py +70 -0
  49. examples/embd-input/minigpt4.py +128 -0
  50. examples/embd-input/panda_gpt.py +98 -0
.devops/full-cuda.Dockerfile ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG UBUNTU_VERSION=22.04
2
+
3
+ # This needs to generally match the container host's environment.
4
+ ARG CUDA_VERSION=11.7.1
5
+
6
+ # Target the CUDA build image
7
+ ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
8
+
9
+ FROM ${BASE_CUDA_DEV_CONTAINER} as build
10
+
11
+ # Unless otherwise specified, we make a fat build.
12
+ ARG CUDA_DOCKER_ARCH=all
13
+
14
+ RUN apt-get update && \
15
+ apt-get install -y build-essential python3 python3-pip
16
+
17
+ COPY requirements.txt requirements.txt
18
+
19
+ RUN pip install --upgrade pip setuptools wheel \
20
+ && pip install -r requirements.txt
21
+
22
+ WORKDIR /app
23
+
24
+ COPY . .
25
+
26
+ # Set nvcc architecture
27
+ ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
28
+ # Enable cuBLAS
29
+ ENV LLAMA_CUBLAS=1
30
+
31
+ RUN make
32
+
33
+ ENTRYPOINT ["/app/.devops/tools.sh"]
.devops/main-cuda.Dockerfile ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ARG UBUNTU_VERSION=22.04
2
+ # This needs to generally match the container host's environment.
3
+ ARG CUDA_VERSION=11.7.1
4
+ # Target the CUDA build image
5
+ ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
6
+ # Target the CUDA runtime image
7
+ ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
8
+
9
+ FROM ${BASE_CUDA_DEV_CONTAINER} as build
10
+
11
+ # Unless otherwise specified, we make a fat build.
12
+ ARG CUDA_DOCKER_ARCH=all
13
+
14
+ RUN apt-get update && \
15
+ apt-get install -y build-essential
16
+
17
+ WORKDIR /app
18
+
19
+ COPY . .
20
+
21
+ # Set nvcc architecture
22
+ ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
23
+ # Enable cuBLAS
24
+ ENV LLAMA_CUBLAS=1
25
+
26
+ RUN make
27
+
28
+ FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
29
+
30
+ COPY --from=build /app/main /main
31
+
32
+ ENTRYPOINT [ "/main" ]
.editorconfig ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://EditorConfig.org
2
+
3
+ # Top-most EditorConfig file
4
+ root = true
5
+
6
+ # Unix-style newlines with a newline ending every file, utf-8 charset
7
+ [*]
8
+ end_of_line = lf
9
+ insert_final_newline = true
10
+ trim_trailing_whitespace = true
11
+ charset = utf-8
12
+ indent_style = space
13
+ indent_size = 4
14
+
15
+ [Makefile]
16
+ indent_style = tab
17
+
18
+ [prompts/*.txt]
19
+ insert_final_newline = unset
.flake8 ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [flake8]
2
+ max-line-length = 125
.gitattributes ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ clblast.dll filter=lfs diff=lfs merge=lfs -text
37
+ lib/libopenblas.lib filter=lfs diff=lfs merge=lfs -text
38
+ libopenblas.dll filter=lfs diff=lfs merge=lfs -text
.github/ISSUE_TEMPLATE/custom.md ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: Issue and enhancement template
3
+ about: Used to report issues and request enhancements for llama.cpp
4
+ title: "[User] Insert summary of your issue or enhancement.."
5
+ labels: ''
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ # Prerequisites
11
+
12
+ Please answer the following questions for yourself before submitting an issue.
13
+
14
+ - [ ] I am running the latest code. Development is very rapid so there are no tagged versions as of now.
15
+ - [ ] I carefully followed the [README.md](https://github.com/ggerganov/llama.cpp/blob/master/README.md).
16
+ - [ ] I [searched using keywords relevant to my issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/filtering-and-searching-issues-and-pull-requests) to make sure that I am creating a new issue that is not already open (or closed).
17
+ - [ ] I reviewed the [Discussions](https://github.com/ggerganov/llama.cpp/discussions), and have a new bug or useful enhancement to share.
18
+
19
+ # Expected Behavior
20
+
21
+ Please provide a detailed written description of what you were trying to do, and what you expected `llama.cpp` to do.
22
+
23
+ # Current Behavior
24
+
25
+ Please provide a detailed written description of what `llama.cpp` did, instead.
26
+
27
+ # Environment and Context
28
+
29
+ Please provide detailed information about your computer setup. This is important in case the issue is not reproducible except for under certain specific conditions.
30
+
31
+ * Physical (or virtual) hardware you are using, e.g. for Linux:
32
+
33
+ `$ lscpu`
34
+
35
+ * Operating System, e.g. for Linux:
36
+
37
+ `$ uname -a`
38
+
39
+ * SDK version, e.g. for Linux:
40
+
41
+ ```
42
+ $ python3 --version
43
+ $ make --version
44
+ $ g++ --version
45
+ ```
46
+
47
+ # Failure Information (for bugs)
48
+
49
+ Please help provide information about the failure if this is a bug. If it is not a bug, please remove the rest of this template.
50
+
51
+ # Steps to Reproduce
52
+
53
+ Please provide detailed steps for reproducing the issue. We are not sitting in front of your screen, so the more detail the better.
54
+
55
+ 1. step 1
56
+ 2. step 2
57
+ 3. step 3
58
+ 4. etc.
59
+
60
+ # Failure Logs
61
+
62
+ Please include any relevant log snippets or files. If it works under one configuration but not under another, please provide logs for both configurations and their corresponding outputs so it is easy to see where behavior changes.
63
+
64
+ Also, please try to **avoid using screenshots** if at all possible. Instead, copy/paste the console output and use [Github's markdown](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) to cleanly format your logs for easy readability.
65
+
66
+ Example environment info:
67
+ ```
68
+ llama.cpp$ git log | head -1
69
+ commit 2af23d30434a677c6416812eea52ccc0af65119c
70
+
71
+ llama.cpp$ lscpu | egrep "AMD|Flags"
72
+ Vendor ID: AuthenticAMD
73
+ Model name: AMD Ryzen Threadripper 1950X 16-Core Processor
74
+ Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid amd_dcm aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb hw_pstate ssbd ibpb vmmcall fsgsbase bmi1 avx2 smep bmi2 rdseed adx smap clflushopt sha_ni xsaveopt xsavec xgetbv1 xsaves clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif overflow_recov succor smca sme sev
75
+ Virtualization: AMD-V
76
+
77
+ llama.cpp$ python3 --version
78
+ Python 3.10.9
79
+
80
+ llama.cpp$ pip list | egrep "torch|numpy|sentencepiece"
81
+ numpy 1.24.2
82
+ numpydoc 1.5.0
83
+ sentencepiece 0.1.97
84
+ torch 1.13.1
85
+ torchvision 0.14.1
86
+
87
+ llama.cpp$ make --version | head -1
88
+ GNU Make 4.3
89
+
90
+ $ md5sum ./models/65B/ggml-model-q4_0.bin
91
+ dbdd682cce80e2d6e93cefc7449df487 ./models/65B/ggml-model-q4_0.bin
92
+ ```
93
+
94
+ Example run with the Linux command [perf](https://www.brendangregg.com/perf.html)
95
+ ```
96
+ llama.cpp$ perf stat ./main -m ./models/65B/ggml-model-q4_0.bin -t 16 -n 1024 -p "Please close your issue when it has been answered."
97
+ main: seed = 1679149377
98
+ llama_model_load: loading model from './models/65B/ggml-model-q4_0.bin' - please wait ...
99
+ llama_model_load: n_vocab = 32000
100
+ llama_model_load: n_ctx = 512
101
+ llama_model_load: n_embd = 8192
102
+ llama_model_load: n_mult = 256
103
+ llama_model_load: n_head = 64
104
+ llama_model_load: n_layer = 80
105
+ llama_model_load: n_rot = 128
106
+ llama_model_load: f16 = 2
107
+ llama_model_load: n_ff = 22016
108
+ llama_model_load: n_parts = 8
109
+ llama_model_load: ggml ctx size = 41477.73 MB
110
+ llama_model_load: memory_size = 2560.00 MB, n_mem = 40960
111
+ llama_model_load: loading model part 1/8 from './models/65B/ggml-model-q4_0.bin'
112
+ llama_model_load: .......................................................................................... done
113
+ llama_model_load: model size = 4869.09 MB / num tensors = 723
114
+ llama_model_load: loading model part 2/8 from './models/65B/ggml-model-q4_0.bin.1'
115
+ llama_model_load: .......................................................................................... done
116
+ llama_model_load: model size = 4869.09 MB / num tensors = 723
117
+ llama_model_load: loading model part 3/8 from './models/65B/ggml-model-q4_0.bin.2'
118
+ llama_model_load: .......................................................................................... done
119
+ llama_model_load: model size = 4869.09 MB / num tensors = 723
120
+ llama_model_load: loading model part 4/8 from './models/65B/ggml-model-q4_0.bin.3'
121
+ llama_model_load: .......................................................................................... done
122
+ llama_model_load: model size = 4869.09 MB / num tensors = 723
123
+ llama_model_load: loading model part 5/8 from './models/65B/ggml-model-q4_0.bin.4'
124
+ llama_model_load: .......................................................................................... done
125
+ llama_model_load: model size = 4869.09 MB / num tensors = 723
126
+ llama_model_load: loading model part 6/8 from './models/65B/ggml-model-q4_0.bin.5'
127
+ llama_model_load: .......................................................................................... done
128
+ llama_model_load: model size = 4869.09 MB / num tensors = 723
129
+ llama_model_load: loading model part 7/8 from './models/65B/ggml-model-q4_0.bin.6'
130
+ llama_model_load: .......................................................................................... done
131
+ llama_model_load: model size = 4869.09 MB / num tensors = 723
132
+ llama_model_load: loading model part 8/8 from './models/65B/ggml-model-q4_0.bin.7'
133
+ llama_model_load: .......................................................................................... done
134
+ llama_model_load: model size = 4869.09 MB / num tensors = 723
135
+
136
+ system_info: n_threads = 16 / 32 | AVX = 1 | AVX2 = 1 | AVX512 = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 |
137
+
138
+ main: prompt: 'Please close your issue when it has been answered.'
139
+ main: number of tokens in prompt = 11
140
+ 1 -> ''
141
+ 12148 -> 'Please'
142
+ 3802 -> ' close'
143
+ 596 -> ' your'
144
+ 2228 -> ' issue'
145
+ 746 -> ' when'
146
+ 372 -> ' it'
147
+ 756 -> ' has'
148
+ 1063 -> ' been'
149
+ 7699 -> ' answered'
150
+ 29889 -> '.'
151
+
152
+ sampling parameters: temp = 0.800000, top_k = 40, top_p = 0.950000, repeat_last_n = 64, repeat_penalty = 1.300000
153
+
154
+
155
+ Please close your issue when it has been answered.
156
+ @duncan-donut: I'm trying to figure out what kind of "support" you need for this script and why, exactly? Is there a question about how the code works that hasn't already been addressed in one or more comments below this ticket, or are we talking something else entirely like some sorta bugfixing job because your server setup is different from mine??
157
+ I can understand if your site needs to be running smoothly and you need help with a fix of sorts but there should really be nothing wrong here that the code itself could not handle. And given that I'm getting reports about how it works perfectly well on some other servers, what exactly are we talking? A detailed report will do wonders in helping us get this resolved for ya quickly so please take your time and describe the issue(s) you see as clearly & concisely as possible!!
158
+ @duncan-donut: I'm not sure if you have access to cPanel but you could try these instructions. It is worth a shot! Let me know how it goes (or what error message, exactly!) when/if ya give that code a go? [end of text]
159
+
160
+
161
+ main: mem per token = 71159620 bytes
162
+ main: load time = 19309.95 ms
163
+ main: sample time = 168.62 ms
164
+ main: predict time = 223895.61 ms / 888.47 ms per token
165
+ main: total time = 246406.42 ms
166
+
167
+ Performance counter stats for './main -m ./models/65B/ggml-model-q4_0.bin -t 16 -n 1024 -p Please close your issue when it has been answered.':
168
+
169
+ 3636882.89 msec task-clock # 14.677 CPUs utilized
170
+ 13509 context-switches # 3.714 /sec
171
+ 2436 cpu-migrations # 0.670 /sec
172
+ 10476679 page-faults # 2.881 K/sec
173
+ 13133115082869 cycles # 3.611 GHz (16.77%)
174
+ 29314462753 stalled-cycles-frontend # 0.22% frontend cycles idle (16.76%)
175
+ 10294402631459 stalled-cycles-backend # 78.39% backend cycles idle (16.74%)
176
+ 23479217109614 instructions # 1.79 insn per cycle
177
+ # 0.44 stalled cycles per insn (16.76%)
178
+ 2353072268027 branches # 647.002 M/sec (16.77%)
179
+ 1998682780 branch-misses # 0.08% of all branches (16.76%)
180
+
181
+ 247.802177522 seconds time elapsed
182
+
183
+ 3618.573072000 seconds user
184
+ 18.491698000 seconds sys
185
+ ```
.github/workflows/editorconfig.yml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: EditorConfig Checker
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - master
7
+ pull_request:
8
+ branches:
9
+ - master
10
+
11
+ jobs:
12
+ editorconfig:
13
+ runs-on: ubuntu-latest
14
+ steps:
15
+ - uses: actions/checkout@v3
16
+ - uses: editorconfig-checker/action-editorconfig-checker@main
17
+ - run: editorconfig-checker
.github/workflows/tidy-post.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: clang-tidy review post comments
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ workflows: ["clang-tidy-review"]
6
+ types:
7
+ - completed
8
+
9
+ jobs:
10
+ build:
11
+ runs-on: ubuntu-latest
12
+
13
+ steps:
14
+ - uses: ZedThree/clang-tidy-review/[email protected]
15
+ # lgtm_comment_body, max_comments, and annotations need to be set on the posting workflow in a split setup
16
+ with:
17
+ # adjust options as necessary
18
+ lgtm_comment_body: ''
19
+ annotations: false
20
+ max_comments: 25
.github/workflows/tidy-review.yml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: clang-tidy-review
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - master
7
+
8
+ jobs:
9
+ clang-tidy-review:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - uses: actions/checkout@v3
14
+
15
+ - uses: ZedThree/[email protected]
16
+ id: review
17
+ with:
18
+ lgtm_comment_body: ''
19
+ build_dir: build
20
+ cmake_command: cmake . -B build -DCMAKE_EXPORT_COMPILE_COMMANDS=on
21
+ split_workflow: true
22
+
23
+ - uses: ZedThree/clang-tidy-review/[email protected]
.gitignore ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.o
2
+ *.a
3
+ *.so
4
+ .DS_Store
5
+ .build/
6
+ .cache/
7
+ .direnv/
8
+ .envrc
9
+ .swiftpm
10
+ .venv
11
+ .clang-tidy
12
+ .vs/
13
+ .vscode/
14
+
15
+ build/
16
+ build-em/
17
+ build-debug/
18
+ build-release/
19
+ build-static/
20
+ build-cublas/
21
+ build-opencl/
22
+ build-metal/
23
+ build-mpi/
24
+ build-no-accel/
25
+ build-sanitize-addr/
26
+ build-sanitize-thread/
27
+ out/
28
+
29
+ /main
30
+ /quantize
31
+ /quantize-stats
32
+ /result
33
+ /perplexity
34
+ /embedding
35
+ /train-text-from-scratch
36
+ /simple
37
+ /benchmark-matmult
38
+ /vdot
39
+ /server
40
+ /Pipfile
41
+ /embd-input-test
42
+ /libllama.so
43
+
44
+ arm_neon.h
45
+ compile_commands.json
46
+ CMakeSettings.json
47
+
48
+ __pycache__
49
+
50
+ dist/
51
+ *.spec
52
+
53
+ zig-out/
54
+ zig-cache/
55
+
56
+ ppl-*.txt
57
+ qnt-*.txt
58
+ perf-*.txt
59
+
60
+ examples/jeopardy/results.txt
61
+ koboldcpp.so
62
+ koboldcpp_failsafe.so
63
+ koboldcpp_openblas.so
64
+ koboldcpp_openblas_noavx2.so
65
+ koboldcpp_clblast.so
66
+ koboldcpp.dll
67
+ koboldcpp_failsafe.dll
68
+ koboldcpp_openblas.dll
69
+ koboldcpp_openblas_noavx2.dll
70
+ koboldcpp_clblast.dll
71
+ koboldcpp_cublas.dll
72
+ cublas64_11.dll
73
+ cublasLt64_11.dll
.pre-commit-config.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # See https://pre-commit.com for more information
2
+ # See https://pre-commit.com/hooks.html for more hooks
3
+ exclude: prompts/.*.txt
4
+ repos:
5
+ - repo: https://github.com/pre-commit/pre-commit-hooks
6
+ rev: v3.2.0
7
+ hooks:
8
+ - id: trailing-whitespace
9
+ - id: end-of-file-fixer
10
+ - id: check-yaml
11
+ - id: check-added-large-files
12
+ - repo: https://github.com/PyCQA/flake8
13
+ rev: 6.0.0
14
+ hooks:
15
+ - id: flake8
CMakeLists.txt ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DO NOT USE THIS FILE.
2
+ # IT'S ONLY FOR CUBLAS BUILD PURPOSES ON WINDOWS VISUAL STUDIO.
3
+ # IT WILL NOT BE UPDATED OR MAINTAINED !!!
4
+
5
+ message(STATUS "============== ============== ==============")
6
+ message(STATUS "WARNING! Do NOT use this file. It is UNSUPPORTED for normal users. Use MAKE instead.")
7
+ message(STATUS "It is ONLY for CUBLAS build testing on windows visual studio. IT WILL NOT BE UPDATED OR MAINTAINED !!!")
8
+ message(STATUS "IF YOU ARE SEEING THIS, you MUST ONLY be building AN EXPERIMENAL WINDOWS CUBLAS BUILD! NOTHING ELSE WILL BE SUPPORTED !!!")
9
+ message(STATUS "============== ============== ==============")
10
+
11
+ cmake_minimum_required(VERSION 3.12) # Don't bump this version for no reason
12
+ project("llama.cpp" C CXX)
13
+
14
+ set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
15
+ set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS 1)
16
+ set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
17
+ set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Release")
18
+ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
19
+ set(LLAMA_STANDALONE ON)
20
+ set(BUILD_SHARED_LIBS_DEFAULT ON)
21
+ set(LLAMA_STATIC OFF)
22
+ set(LLAMA_NATIVE OFF)
23
+ set(LLAMA_LTO OFF)
24
+ set(LLAMA_ALL_WARNINGS OFF)
25
+ set(LLAMA_ALL_WARNINGS_3RD_PARTY OFF)
26
+ set(LLAMA_GPROF OFF)
27
+ set(LLAMA_SANITIZE_THREAD OFF)
28
+ set(LLAMA_SANITIZE_ADDRESS OFF)
29
+ set(LLAMA_SANITIZE_UNDEFINED OFF)
30
+
31
+ option(MAKE_MISC_FILES "MAKE_MISC_FILES" OFF)
32
+
33
+ # instruction set specific
34
+ option(LLAMA_AVX "llama: enable AVX" ON)
35
+ option(LLAMA_AVX2 "llama: enable AVX2" ON)
36
+ option(LLAMA_AVX512 "llama: enable AVX512" OFF)
37
+ option(LLAMA_AVX512_VBMI "llama: enable AVX512-VBMI" OFF)
38
+ option(LLAMA_AVX512_VNNI "llama: enable AVX512-VNNI" OFF)
39
+ option(LLAMA_FMA "llama: enable FMA" ON)
40
+ # in MSVC F16C is implied with AVX2/AVX512
41
+ if (NOT MSVC)
42
+ option(LLAMA_F16C "llama: enable F16C" ON)
43
+ endif()
44
+
45
+ # 3rd party libs
46
+ option(LLAMA_CUBLAS "llama: use cuBLAS" ON)
47
+ set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels")
48
+ set(LLAMA_CUDA_DMMV_Y "1" CACHE STRING "llama: y block size for dmmv CUDA kernels")
49
+ set(LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels")
50
+ option(LLAMA_CUDA_DMMV_F16 "llama: use 16 bit floats for dmmv CUDA kernels" OFF)
51
+ set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for Q2_K/Q6_K")
52
+ option(LLAMA_K_QUANTS "llama: use k-quants" ON)
53
+
54
+
55
+ #
56
+ # Compile flags
57
+ #
58
+
59
+ set(CMAKE_CXX_STANDARD 11)
60
+ set(CMAKE_CXX_STANDARD_REQUIRED true)
61
+ set(CMAKE_C_STANDARD 11)
62
+ set(CMAKE_C_STANDARD_REQUIRED true)
63
+ set(THREADS_PREFER_PTHREAD_FLAG ON)
64
+ find_package(Threads REQUIRED)
65
+
66
+ add_compile_definitions(GGML_USE_K_QUANTS)
67
+
68
+ if (LLAMA_CUBLAS)
69
+ cmake_minimum_required(VERSION 3.17)
70
+
71
+ find_package(CUDAToolkit)
72
+ if (CUDAToolkit_FOUND)
73
+ message(STATUS "cuBLAS found")
74
+
75
+ enable_language(CUDA)
76
+
77
+ set(GGML_SOURCES_CUDA ggml-cuda.cu ggml-cuda.h)
78
+ set(GGML_V2_CUDA_SOURCES otherarch/ggml_v2-cuda.cu otherarch/ggml_v2-cuda.h)
79
+ set(GGML_V2_LEGACY_CUDA_SOURCES otherarch/ggml_v2-cuda-legacy.cu otherarch/ggml_v2-cuda-legacy.h)
80
+
81
+ add_compile_definitions(GGML_USE_CUBLAS)
82
+ #add_compile_definitions(GGML_CUDA_FORCE_DMMV) #non dmmv broken for me
83
+
84
+ add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
85
+ add_compile_definitions(GGML_CUDA_DMMV_Y=${LLAMA_CUDA_DMMV_Y})
86
+ add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
87
+ if (LLAMA_CUDA_DMMV_F16)
88
+ add_compile_definitions(GGML_CUDA_DMMV_F16)
89
+ endif()
90
+ add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
91
+
92
+ if (LLAMA_STATIC)
93
+ set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)
94
+ else()
95
+ set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
96
+ endif()
97
+
98
+ if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
99
+ if (LLAMA_CUDA_DMMV_F16)
100
+ set(CMAKE_CUDA_ARCHITECTURES "61") # needed for f16 CUDA intrinsics
101
+ else()
102
+ set(CMAKE_CUDA_ARCHITECTURES "52;61") # lowest CUDA 12 standard + lowest for integer intrinsics
103
+ endif()
104
+ endif()
105
+ message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}")
106
+
107
+ else()
108
+ message(WARNING "cuBLAS not found")
109
+ endif()
110
+ endif()
111
+
112
+ if (LLAMA_ALL_WARNINGS)
113
+ if (NOT MSVC)
114
+ set(c_flags
115
+ -Wall
116
+ -Wextra
117
+ -Wpedantic
118
+ -Wcast-qual
119
+ -Wdouble-promotion
120
+ -Wshadow
121
+ -Wstrict-prototypes
122
+ -Wpointer-arith
123
+ )
124
+ set(cxx_flags
125
+ -Wall
126
+ -Wextra
127
+ -Wpedantic
128
+ -Wcast-qual
129
+ -Wno-unused-function
130
+ -Wno-multichar
131
+ )
132
+ else()
133
+ # todo : msvc
134
+ endif()
135
+
136
+ add_compile_options(
137
+ "$<$<COMPILE_LANGUAGE:C>:${c_flags}>"
138
+ "$<$<COMPILE_LANGUAGE:CXX>:${cxx_flags}>"
139
+ )
140
+
141
+ endif()
142
+
143
+ if (MSVC)
144
+ add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
145
+
146
+ if (BUILD_SHARED_LIBS)
147
+ set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
148
+ endif()
149
+ endif()
150
+
151
+ if (LLAMA_LTO)
152
+ include(CheckIPOSupported)
153
+ check_ipo_supported(RESULT result OUTPUT output)
154
+ if (result)
155
+ set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
156
+ else()
157
+ message(WARNING "IPO is not supported: ${output}")
158
+ endif()
159
+ endif()
160
+
161
+ # Architecture specific
162
+ # TODO: probably these flags need to be tweaked on some architectures
163
+ # feel free to update the Makefile for your architecture and send a pull request or issue
164
+ message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}")
165
+ if (NOT MSVC)
166
+ if (LLAMA_STATIC)
167
+ add_link_options(-static)
168
+ if (MINGW)
169
+ add_link_options(-static-libgcc -static-libstdc++)
170
+ endif()
171
+ endif()
172
+ if (LLAMA_GPROF)
173
+ add_compile_options(-pg)
174
+ endif()
175
+ if (LLAMA_NATIVE)
176
+ add_compile_options(-march=native)
177
+ endif()
178
+ endif()
179
+
180
+ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
181
+ message(STATUS "ARM detected")
182
+ if (MSVC)
183
+ # TODO: arm msvc?
184
+ else()
185
+ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv6")
186
+ # Raspberry Pi 1, Zero
187
+ add_compile_options(-mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access)
188
+ endif()
189
+ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv7")
190
+ # Raspberry Pi 2
191
+ add_compile_options(-mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations)
192
+ endif()
193
+ if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv8")
194
+ # Raspberry Pi 3, 4, Zero 2 (32-bit)
195
+ add_compile_options(-mfp16-format=ieee -mno-unaligned-access)
196
+ endif()
197
+ endif()
198
+ elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$")
199
+ message(STATUS "x86 detected")
200
+ if (MSVC)
201
+ if (LLAMA_AVX512)
202
+ add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX512>)
203
+ add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX512>)
204
+ # MSVC has no compile-time flags enabling specific
205
+ # AVX512 extensions, neither it defines the
206
+ # macros corresponding to the extensions.
207
+ # Do it manually.
208
+ if (LLAMA_AVX512_VBMI)
209
+ add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VBMI__>)
210
+ add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VBMI__>)
211
+ endif()
212
+ if (LLAMA_AVX512_VNNI)
213
+ add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VNNI__>)
214
+ add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VNNI__>)
215
+ endif()
216
+ elseif (LLAMA_AVX2)
217
+ add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX2>)
218
+ add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX2>)
219
+ elseif (LLAMA_AVX)
220
+ add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX>)
221
+ add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX>)
222
+ endif()
223
+ else()
224
+ if (LLAMA_F16C)
225
+ add_compile_options(-mf16c)
226
+ endif()
227
+ if (LLAMA_FMA)
228
+ add_compile_options(-mfma)
229
+ endif()
230
+ if (LLAMA_AVX)
231
+ add_compile_options(-mavx)
232
+ endif()
233
+ if (LLAMA_AVX2)
234
+ add_compile_options(-mavx2)
235
+ endif()
236
+ if (LLAMA_AVX512)
237
+ add_compile_options(-mavx512f)
238
+ add_compile_options(-mavx512bw)
239
+ endif()
240
+ if (LLAMA_AVX512_VBMI)
241
+ add_compile_options(-mavx512vbmi)
242
+ endif()
243
+ if (LLAMA_AVX512_VNNI)
244
+ add_compile_options(-mavx512vnni)
245
+ endif()
246
+ endif()
247
+ elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64")
248
+ message(STATUS "PowerPC detected")
249
+ add_compile_options(-mcpu=native -mtune=native)
250
+ #TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be)
251
+ else()
252
+ message(STATUS "Unknown architecture")
253
+ endif()
254
+
255
+ #
256
+ # Build libraries
257
+ #
258
+
259
+ add_library(ggml OBJECT
260
+ ggml.c
261
+ ggml.h
262
+ k_quants.h
263
+ k_quants.c
264
+ ${GGML_SOURCES_CUDA})
265
+ target_include_directories(ggml PUBLIC . ./otherarch ./otherarch/tools)
266
+ target_compile_features(ggml PUBLIC c_std_11) # don't bump
267
+ target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
268
+ set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON)
269
+
270
+ add_library(ggml_v1 OBJECT
271
+ otherarch/ggml_v1.c
272
+ otherarch/ggml_v1.h)
273
+ target_include_directories(ggml_v1 PUBLIC . ./otherarch ./otherarch/tools)
274
+ target_compile_features(ggml_v1 PUBLIC c_std_11) # don't bump
275
+ target_link_libraries(ggml_v1 PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
276
+ set_target_properties(ggml_v1 PROPERTIES POSITION_INDEPENDENT_CODE ON)
277
+
278
+ add_library(ggml_v2 OBJECT
279
+ otherarch/ggml_v2.c
280
+ otherarch/ggml_v2.h
281
+ ${GGML_V2_CUDA_SOURCES}
282
+ ${GGML_V2_LEGACY_CUDA_SOURCES})
283
+ target_include_directories(ggml_v2 PUBLIC . ./otherarch ./otherarch/tools)
284
+ target_compile_features(ggml_v2 PUBLIC c_std_11) # don't bump
285
+ target_link_libraries(ggml_v2 PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
286
+ set_target_properties(ggml_v2 PROPERTIES POSITION_INDEPENDENT_CODE ON)
287
+
288
+ add_library(common2
289
+ examples/common.cpp
290
+ examples/common.h)
291
+ target_include_directories(common2 PUBLIC . ./otherarch ./otherarch/tools ./examples)
292
+ target_compile_features(common2 PUBLIC cxx_std_11) # don't bump
293
+ target_link_libraries(common2 PRIVATE ggml ${LLAMA_EXTRA_LIBS})
294
+ set_target_properties(common2 PROPERTIES POSITION_INDEPENDENT_CODE ON)
295
+
296
+ add_library(gpttype_adapter
297
+ gpttype_adapter.cpp)
298
+ target_include_directories(gpttype_adapter PUBLIC . ./otherarch ./otherarch/tools ./examples)
299
+ target_compile_features(gpttype_adapter PUBLIC cxx_std_11) # don't bump
300
+ target_link_libraries(gpttype_adapter PRIVATE common2 ggml ${LLAMA_EXTRA_LIBS})
301
+ set_target_properties(gpttype_adapter PROPERTIES POSITION_INDEPENDENT_CODE ON)
302
+
303
+
304
+ set(TARGET koboldcpp_cublas)
305
+ add_library(${TARGET} SHARED expose.cpp expose.h)
306
+ target_include_directories(${TARGET} PUBLIC . ./otherarch ./otherarch/tools ./examples)
307
+ target_compile_features(${TARGET} PUBLIC cxx_std_11) # don't bump
308
+ set_target_properties(${TARGET} PROPERTIES PREFIX "")
309
+ set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME "koboldcpp_cublas")
310
+ set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
311
+ target_link_libraries(${TARGET} PUBLIC ggml ggml_v1 ggml_v2 common2 gpttype_adapter ${CMAKE_THREAD_LIBS_INIT})
312
+ target_compile_features(${TARGET} PRIVATE cxx_std_11)
313
+
314
+
315
+ if (MAKE_MISC_FILES)
316
+ add_library(llama
317
+ llama.cpp
318
+ llama.h
319
+ llama-util.h
320
+ )
321
+ target_include_directories(llama PUBLIC .)
322
+ target_compile_features(llama PUBLIC cxx_std_11) # don't bump
323
+ target_link_libraries(llama PRIVATE
324
+ ggml
325
+ ${LLAMA_EXTRA_LIBS}
326
+ )
327
+ add_subdirectory(examples)
328
+ endif()
Dockerfile ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python
2
+ WORKDIR /app
3
+ COPY . .
4
+ RUN apt update \
5
+ && apt install build-essential wget libopenblas-dev make -y \
6
+ && make LLAMA_OPENBLAS=1 \
7
+ && wget https://huggingface.co/notstoic/pygmalion-13b-ggml/resolve/main/pygmalion-13b-ggml-q4_0.bin \
8
+ && apt remove build-essential wget make -y
9
+
10
+ ENTRYPOINT ["python", "koboldcpp.py", "pygmalion-13b-ggml-q4_0.bin", "--port", "7860"]
LICENSE.md ADDED
@@ -0,0 +1,661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU Affero General Public License is a free, copyleft license for
11
+ software and other kinds of works, specifically designed to ensure
12
+ cooperation with the community in the case of network server software.
13
+
14
+ The licenses for most software and other practical works are designed
15
+ to take away your freedom to share and change the works. By contrast,
16
+ our General Public Licenses are intended to guarantee your freedom to
17
+ share and change all versions of a program--to make sure it remains free
18
+ software for all its users.
19
+
20
+ When we speak of free software, we are referring to freedom, not
21
+ price. Our General Public Licenses are designed to make sure that you
22
+ have the freedom to distribute copies of free software (and charge for
23
+ them if you wish), that you receive source code or can get it if you
24
+ want it, that you can change the software or use pieces of it in new
25
+ free programs, and that you know you can do these things.
26
+
27
+ Developers that use our General Public Licenses protect your rights
28
+ with two steps: (1) assert copyright on the software, and (2) offer
29
+ you this License which gives you legal permission to copy, distribute
30
+ and/or modify the software.
31
+
32
+ A secondary benefit of defending all users' freedom is that
33
+ improvements made in alternate versions of the program, if they
34
+ receive widespread use, become available for other developers to
35
+ incorporate. Many developers of free software are heartened and
36
+ encouraged by the resulting cooperation. However, in the case of
37
+ software used on network servers, this result may fail to come about.
38
+ The GNU General Public License permits making a modified version and
39
+ letting the public access it on a server without ever releasing its
40
+ source code to the public.
41
+
42
+ The GNU Affero General Public License is designed specifically to
43
+ ensure that, in such cases, the modified source code becomes available
44
+ to the community. It requires the operator of a network server to
45
+ provide the source code of the modified version running there to the
46
+ users of that server. Therefore, public use of a modified version, on
47
+ a publicly accessible server, gives the public access to the source
48
+ code of the modified version.
49
+
50
+ An older license, called the Affero General Public License and
51
+ published by Affero, was designed to accomplish similar goals. This is
52
+ a different license, not a version of the Affero GPL, but Affero has
53
+ released a new version of the Affero GPL which permits relicensing under
54
+ this license.
55
+
56
+ The precise terms and conditions for copying, distribution and
57
+ modification follow.
58
+
59
+ TERMS AND CONDITIONS
60
+
61
+ 0. Definitions.
62
+
63
+ "This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+ "Copyright" also means copyright-like laws that apply to other kinds of
66
+ works, such as semiconductor masks.
67
+
68
+ "The Program" refers to any copyrightable work licensed under this
69
+ License. Each licensee is addressed as "you". "Licensees" and
70
+ "recipients" may be individuals or organizations.
71
+
72
+ To "modify" a work means to copy from or adapt all or part of the work
73
+ in a fashion requiring copyright permission, other than the making of an
74
+ exact copy. The resulting work is called a "modified version" of the
75
+ earlier work or a work "based on" the earlier work.
76
+
77
+ A "covered work" means either the unmodified Program or a work based
78
+ on the Program.
79
+
80
+ To "propagate" a work means to do anything with it that, without
81
+ permission, would make you directly or secondarily liable for
82
+ infringement under applicable copyright law, except executing it on a
83
+ computer or modifying a private copy. Propagation includes copying,
84
+ distribution (with or without modification), making available to the
85
+ public, and in some countries other activities as well.
86
+
87
+ To "convey" a work means any kind of propagation that enables other
88
+ parties to make or receive copies. Mere interaction with a user through
89
+ a computer network, with no transfer of a copy, is not conveying.
90
+
91
+ An interactive user interface displays "Appropriate Legal Notices"
92
+ to the extent that it includes a convenient and prominently visible
93
+ feature that (1) displays an appropriate copyright notice, and (2)
94
+ tells the user that there is no warranty for the work (except to the
95
+ extent that warranties are provided), that licensees may convey the
96
+ work under this License, and how to view a copy of this License. If
97
+ the interface presents a list of user commands or options, such as a
98
+ menu, a prominent item in the list meets this criterion.
99
+
100
+ 1. Source Code.
101
+
102
+ The "source code" for a work means the preferred form of the work
103
+ for making modifications to it. "Object code" means any non-source
104
+ form of a work.
105
+
106
+ A "Standard Interface" means an interface that either is an official
107
+ standard defined by a recognized standards body, or, in the case of
108
+ interfaces specified for a particular programming language, one that
109
+ is widely used among developers working in that language.
110
+
111
+ The "System Libraries" of an executable work include anything, other
112
+ than the work as a whole, that (a) is included in the normal form of
113
+ packaging a Major Component, but which is not part of that Major
114
+ Component, and (b) serves only to enable use of the work with that
115
+ Major Component, or to implement a Standard Interface for which an
116
+ implementation is available to the public in source code form. A
117
+ "Major Component", in this context, means a major essential component
118
+ (kernel, window system, and so on) of the specific operating system
119
+ (if any) on which the executable work runs, or a compiler used to
120
+ produce the work, or an object code interpreter used to run it.
121
+
122
+ The "Corresponding Source" for a work in object code form means all
123
+ the source code needed to generate, install, and (for an executable
124
+ work) run the object code and to modify the work, including scripts to
125
+ control those activities. However, it does not include the work's
126
+ System Libraries, or general-purpose tools or generally available free
127
+ programs which are used unmodified in performing those activities but
128
+ which are not part of the work. For example, Corresponding Source
129
+ includes interface definition files associated with source files for
130
+ the work, and the source code for shared libraries and dynamically
131
+ linked subprograms that the work is specifically designed to require,
132
+ such as by intimate data communication or control flow between those
133
+ subprograms and other parts of the work.
134
+
135
+ The Corresponding Source need not include anything that users
136
+ can regenerate automatically from other parts of the Corresponding
137
+ Source.
138
+
139
+ The Corresponding Source for a work in source code form is that
140
+ same work.
141
+
142
+ 2. Basic Permissions.
143
+
144
+ All rights granted under this License are granted for the term of
145
+ copyright on the Program, and are irrevocable provided the stated
146
+ conditions are met. This License explicitly affirms your unlimited
147
+ permission to run the unmodified Program. The output from running a
148
+ covered work is covered by this License only if the output, given its
149
+ content, constitutes a covered work. This License acknowledges your
150
+ rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+ You may make, run and propagate covered works that you do not
153
+ convey, without conditions so long as your license otherwise remains
154
+ in force. You may convey covered works to others for the sole purpose
155
+ of having them make modifications exclusively for you, or provide you
156
+ with facilities for running those works, provided that you comply with
157
+ the terms of this License in conveying all material for which you do
158
+ not control copyright. Those thus making or running the covered works
159
+ for you must do so exclusively on your behalf, under your direction
160
+ and control, on terms that prohibit them from making any copies of
161
+ your copyrighted material outside their relationship with you.
162
+
163
+ Conveying under any other circumstances is permitted solely under
164
+ the conditions stated below. Sublicensing is not allowed; section 10
165
+ makes it unnecessary.
166
+
167
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+ No covered work shall be deemed part of an effective technological
170
+ measure under any applicable law fulfilling obligations under article
171
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+ similar laws prohibiting or restricting circumvention of such
173
+ measures.
174
+
175
+ When you convey a covered work, you waive any legal power to forbid
176
+ circumvention of technological measures to the extent such circumvention
177
+ is effected by exercising rights under this License with respect to
178
+ the covered work, and you disclaim any intention to limit operation or
179
+ modification of the work as a means of enforcing, against the work's
180
+ users, your or third parties' legal rights to forbid circumvention of
181
+ technological measures.
182
+
183
+ 4. Conveying Verbatim Copies.
184
+
185
+ You may convey verbatim copies of the Program's source code as you
186
+ receive it, in any medium, provided that you conspicuously and
187
+ appropriately publish on each copy an appropriate copyright notice;
188
+ keep intact all notices stating that this License and any
189
+ non-permissive terms added in accord with section 7 apply to the code;
190
+ keep intact all notices of the absence of any warranty; and give all
191
+ recipients a copy of this License along with the Program.
192
+
193
+ You may charge any price or no price for each copy that you convey,
194
+ and you may offer support or warranty protection for a fee.
195
+
196
+ 5. Conveying Modified Source Versions.
197
+
198
+ You may convey a work based on the Program, or the modifications to
199
+ produce it from the Program, in the form of source code under the
200
+ terms of section 4, provided that you also meet all of these conditions:
201
+
202
+ a) The work must carry prominent notices stating that you modified
203
+ it, and giving a relevant date.
204
+
205
+ b) The work must carry prominent notices stating that it is
206
+ released under this License and any conditions added under section
207
+ 7. This requirement modifies the requirement in section 4 to
208
+ "keep intact all notices".
209
+
210
+ c) You must license the entire work, as a whole, under this
211
+ License to anyone who comes into possession of a copy. This
212
+ License will therefore apply, along with any applicable section 7
213
+ additional terms, to the whole of the work, and all its parts,
214
+ regardless of how they are packaged. This License gives no
215
+ permission to license the work in any other way, but it does not
216
+ invalidate such permission if you have separately received it.
217
+
218
+ d) If the work has interactive user interfaces, each must display
219
+ Appropriate Legal Notices; however, if the Program has interactive
220
+ interfaces that do not display Appropriate Legal Notices, your
221
+ work need not make them do so.
222
+
223
+ A compilation of a covered work with other separate and independent
224
+ works, which are not by their nature extensions of the covered work,
225
+ and which are not combined with it such as to form a larger program,
226
+ in or on a volume of a storage or distribution medium, is called an
227
+ "aggregate" if the compilation and its resulting copyright are not
228
+ used to limit the access or legal rights of the compilation's users
229
+ beyond what the individual works permit. Inclusion of a covered work
230
+ in an aggregate does not cause this License to apply to the other
231
+ parts of the aggregate.
232
+
233
+ 6. Conveying Non-Source Forms.
234
+
235
+ You may convey a covered work in object code form under the terms
236
+ of sections 4 and 5, provided that you also convey the
237
+ machine-readable Corresponding Source under the terms of this License,
238
+ in one of these ways:
239
+
240
+ a) Convey the object code in, or embodied in, a physical product
241
+ (including a physical distribution medium), accompanied by the
242
+ Corresponding Source fixed on a durable physical medium
243
+ customarily used for software interchange.
244
+
245
+ b) Convey the object code in, or embodied in, a physical product
246
+ (including a physical distribution medium), accompanied by a
247
+ written offer, valid for at least three years and valid for as
248
+ long as you offer spare parts or customer support for that product
249
+ model, to give anyone who possesses the object code either (1) a
250
+ copy of the Corresponding Source for all the software in the
251
+ product that is covered by this License, on a durable physical
252
+ medium customarily used for software interchange, for a price no
253
+ more than your reasonable cost of physically performing this
254
+ conveying of source, or (2) access to copy the
255
+ Corresponding Source from a network server at no charge.
256
+
257
+ c) Convey individual copies of the object code with a copy of the
258
+ written offer to provide the Corresponding Source. This
259
+ alternative is allowed only occasionally and noncommercially, and
260
+ only if you received the object code with such an offer, in accord
261
+ with subsection 6b.
262
+
263
+ d) Convey the object code by offering access from a designated
264
+ place (gratis or for a charge), and offer equivalent access to the
265
+ Corresponding Source in the same way through the same place at no
266
+ further charge. You need not require recipients to copy the
267
+ Corresponding Source along with the object code. If the place to
268
+ copy the object code is a network server, the Corresponding Source
269
+ may be on a different server (operated by you or a third party)
270
+ that supports equivalent copying facilities, provided you maintain
271
+ clear directions next to the object code saying where to find the
272
+ Corresponding Source. Regardless of what server hosts the
273
+ Corresponding Source, you remain obligated to ensure that it is
274
+ available for as long as needed to satisfy these requirements.
275
+
276
+ e) Convey the object code using peer-to-peer transmission, provided
277
+ you inform other peers where the object code and Corresponding
278
+ Source of the work are being offered to the general public at no
279
+ charge under subsection 6d.
280
+
281
+ A separable portion of the object code, whose source code is excluded
282
+ from the Corresponding Source as a System Library, need not be
283
+ included in conveying the object code work.
284
+
285
+ A "User Product" is either (1) a "consumer product", which means any
286
+ tangible personal property which is normally used for personal, family,
287
+ or household purposes, or (2) anything designed or sold for incorporation
288
+ into a dwelling. In determining whether a product is a consumer product,
289
+ doubtful cases shall be resolved in favor of coverage. For a particular
290
+ product received by a particular user, "normally used" refers to a
291
+ typical or common use of that class of product, regardless of the status
292
+ of the particular user or of the way in which the particular user
293
+ actually uses, or expects or is expected to use, the product. A product
294
+ is a consumer product regardless of whether the product has substantial
295
+ commercial, industrial or non-consumer uses, unless such uses represent
296
+ the only significant mode of use of the product.
297
+
298
+ "Installation Information" for a User Product means any methods,
299
+ procedures, authorization keys, or other information required to install
300
+ and execute modified versions of a covered work in that User Product from
301
+ a modified version of its Corresponding Source. The information must
302
+ suffice to ensure that the continued functioning of the modified object
303
+ code is in no case prevented or interfered with solely because
304
+ modification has been made.
305
+
306
+ If you convey an object code work under this section in, or with, or
307
+ specifically for use in, a User Product, and the conveying occurs as
308
+ part of a transaction in which the right of possession and use of the
309
+ User Product is transferred to the recipient in perpetuity or for a
310
+ fixed term (regardless of how the transaction is characterized), the
311
+ Corresponding Source conveyed under this section must be accompanied
312
+ by the Installation Information. But this requirement does not apply
313
+ if neither you nor any third party retains the ability to install
314
+ modified object code on the User Product (for example, the work has
315
+ been installed in ROM).
316
+
317
+ The requirement to provide Installation Information does not include a
318
+ requirement to continue to provide support service, warranty, or updates
319
+ for a work that has been modified or installed by the recipient, or for
320
+ the User Product in which it has been modified or installed. Access to a
321
+ network may be denied when the modification itself materially and
322
+ adversely affects the operation of the network or violates the rules and
323
+ protocols for communication across the network.
324
+
325
+ Corresponding Source conveyed, and Installation Information provided,
326
+ in accord with this section must be in a format that is publicly
327
+ documented (and with an implementation available to the public in
328
+ source code form), and must require no special password or key for
329
+ unpacking, reading or copying.
330
+
331
+ 7. Additional Terms.
332
+
333
+ "Additional permissions" are terms that supplement the terms of this
334
+ License by making exceptions from one or more of its conditions.
335
+ Additional permissions that are applicable to the entire Program shall
336
+ be treated as though they were included in this License, to the extent
337
+ that they are valid under applicable law. If additional permissions
338
+ apply only to part of the Program, that part may be used separately
339
+ under those permissions, but the entire Program remains governed by
340
+ this License without regard to the additional permissions.
341
+
342
+ When you convey a copy of a covered work, you may at your option
343
+ remove any additional permissions from that copy, or from any part of
344
+ it. (Additional permissions may be written to require their own
345
+ removal in certain cases when you modify the work.) You may place
346
+ additional permissions on material, added by you to a covered work,
347
+ for which you have or can give appropriate copyright permission.
348
+
349
+ Notwithstanding any other provision of this License, for material you
350
+ add to a covered work, you may (if authorized by the copyright holders of
351
+ that material) supplement the terms of this License with terms:
352
+
353
+ a) Disclaiming warranty or limiting liability differently from the
354
+ terms of sections 15 and 16 of this License; or
355
+
356
+ b) Requiring preservation of specified reasonable legal notices or
357
+ author attributions in that material or in the Appropriate Legal
358
+ Notices displayed by works containing it; or
359
+
360
+ c) Prohibiting misrepresentation of the origin of that material, or
361
+ requiring that modified versions of such material be marked in
362
+ reasonable ways as different from the original version; or
363
+
364
+ d) Limiting the use for publicity purposes of names of licensors or
365
+ authors of the material; or
366
+
367
+ e) Declining to grant rights under trademark law for use of some
368
+ trade names, trademarks, or service marks; or
369
+
370
+ f) Requiring indemnification of licensors and authors of that
371
+ material by anyone who conveys the material (or modified versions of
372
+ it) with contractual assumptions of liability to the recipient, for
373
+ any liability that these contractual assumptions directly impose on
374
+ those licensors and authors.
375
+
376
+ All other non-permissive additional terms are considered "further
377
+ restrictions" within the meaning of section 10. If the Program as you
378
+ received it, or any part of it, contains a notice stating that it is
379
+ governed by this License along with a term that is a further
380
+ restriction, you may remove that term. If a license document contains
381
+ a further restriction but permits relicensing or conveying under this
382
+ License, you may add to a covered work material governed by the terms
383
+ of that license document, provided that the further restriction does
384
+ not survive such relicensing or conveying.
385
+
386
+ If you add terms to a covered work in accord with this section, you
387
+ must place, in the relevant source files, a statement of the
388
+ additional terms that apply to those files, or a notice indicating
389
+ where to find the applicable terms.
390
+
391
+ Additional terms, permissive or non-permissive, may be stated in the
392
+ form of a separately written license, or stated as exceptions;
393
+ the above requirements apply either way.
394
+
395
+ 8. Termination.
396
+
397
+ You may not propagate or modify a covered work except as expressly
398
+ provided under this License. Any attempt otherwise to propagate or
399
+ modify it is void, and will automatically terminate your rights under
400
+ this License (including any patent licenses granted under the third
401
+ paragraph of section 11).
402
+
403
+ However, if you cease all violation of this License, then your
404
+ license from a particular copyright holder is reinstated (a)
405
+ provisionally, unless and until the copyright holder explicitly and
406
+ finally terminates your license, and (b) permanently, if the copyright
407
+ holder fails to notify you of the violation by some reasonable means
408
+ prior to 60 days after the cessation.
409
+
410
+ Moreover, your license from a particular copyright holder is
411
+ reinstated permanently if the copyright holder notifies you of the
412
+ violation by some reasonable means, this is the first time you have
413
+ received notice of violation of this License (for any work) from that
414
+ copyright holder, and you cure the violation prior to 30 days after
415
+ your receipt of the notice.
416
+
417
+ Termination of your rights under this section does not terminate the
418
+ licenses of parties who have received copies or rights from you under
419
+ this License. If your rights have been terminated and not permanently
420
+ reinstated, you do not qualify to receive new licenses for the same
421
+ material under section 10.
422
+
423
+ 9. Acceptance Not Required for Having Copies.
424
+
425
+ You are not required to accept this License in order to receive or
426
+ run a copy of the Program. Ancillary propagation of a covered work
427
+ occurring solely as a consequence of using peer-to-peer transmission
428
+ to receive a copy likewise does not require acceptance. However,
429
+ nothing other than this License grants you permission to propagate or
430
+ modify any covered work. These actions infringe copyright if you do
431
+ not accept this License. Therefore, by modifying or propagating a
432
+ covered work, you indicate your acceptance of this License to do so.
433
+
434
+ 10. Automatic Licensing of Downstream Recipients.
435
+
436
+ Each time you convey a covered work, the recipient automatically
437
+ receives a license from the original licensors, to run, modify and
438
+ propagate that work, subject to this License. You are not responsible
439
+ for enforcing compliance by third parties with this License.
440
+
441
+ An "entity transaction" is a transaction transferring control of an
442
+ organization, or substantially all assets of one, or subdividing an
443
+ organization, or merging organizations. If propagation of a covered
444
+ work results from an entity transaction, each party to that
445
+ transaction who receives a copy of the work also receives whatever
446
+ licenses to the work the party's predecessor in interest had or could
447
+ give under the previous paragraph, plus a right to possession of the
448
+ Corresponding Source of the work from the predecessor in interest, if
449
+ the predecessor has it or can get it with reasonable efforts.
450
+
451
+ You may not impose any further restrictions on the exercise of the
452
+ rights granted or affirmed under this License. For example, you may
453
+ not impose a license fee, royalty, or other charge for exercise of
454
+ rights granted under this License, and you may not initiate litigation
455
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
456
+ any patent claim is infringed by making, using, selling, offering for
457
+ sale, or importing the Program or any portion of it.
458
+
459
+ 11. Patents.
460
+
461
+ A "contributor" is a copyright holder who authorizes use under this
462
+ License of the Program or a work on which the Program is based. The
463
+ work thus licensed is called the contributor's "contributor version".
464
+
465
+ A contributor's "essential patent claims" are all patent claims
466
+ owned or controlled by the contributor, whether already acquired or
467
+ hereafter acquired, that would be infringed by some manner, permitted
468
+ by this License, of making, using, or selling its contributor version,
469
+ but do not include claims that would be infringed only as a
470
+ consequence of further modification of the contributor version. For
471
+ purposes of this definition, "control" includes the right to grant
472
+ patent sublicenses in a manner consistent with the requirements of
473
+ this License.
474
+
475
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+ patent license under the contributor's essential patent claims, to
477
+ make, use, sell, offer for sale, import and otherwise run, modify and
478
+ propagate the contents of its contributor version.
479
+
480
+ In the following three paragraphs, a "patent license" is any express
481
+ agreement or commitment, however denominated, not to enforce a patent
482
+ (such as an express permission to practice a patent or covenant not to
483
+ sue for patent infringement). To "grant" such a patent license to a
484
+ party means to make such an agreement or commitment not to enforce a
485
+ patent against the party.
486
+
487
+ If you convey a covered work, knowingly relying on a patent license,
488
+ and the Corresponding Source of the work is not available for anyone
489
+ to copy, free of charge and under the terms of this License, through a
490
+ publicly available network server or other readily accessible means,
491
+ then you must either (1) cause the Corresponding Source to be so
492
+ available, or (2) arrange to deprive yourself of the benefit of the
493
+ patent license for this particular work, or (3) arrange, in a manner
494
+ consistent with the requirements of this License, to extend the patent
495
+ license to downstream recipients. "Knowingly relying" means you have
496
+ actual knowledge that, but for the patent license, your conveying the
497
+ covered work in a country, or your recipient's use of the covered work
498
+ in a country, would infringe one or more identifiable patents in that
499
+ country that you have reason to believe are valid.
500
+
501
+ If, pursuant to or in connection with a single transaction or
502
+ arrangement, you convey, or propagate by procuring conveyance of, a
503
+ covered work, and grant a patent license to some of the parties
504
+ receiving the covered work authorizing them to use, propagate, modify
505
+ or convey a specific copy of the covered work, then the patent license
506
+ you grant is automatically extended to all recipients of the covered
507
+ work and works based on it.
508
+
509
+ A patent license is "discriminatory" if it does not include within
510
+ the scope of its coverage, prohibits the exercise of, or is
511
+ conditioned on the non-exercise of one or more of the rights that are
512
+ specifically granted under this License. You may not convey a covered
513
+ work if you are a party to an arrangement with a third party that is
514
+ in the business of distributing software, under which you make payment
515
+ to the third party based on the extent of your activity of conveying
516
+ the work, and under which the third party grants, to any of the
517
+ parties who would receive the covered work from you, a discriminatory
518
+ patent license (a) in connection with copies of the covered work
519
+ conveyed by you (or copies made from those copies), or (b) primarily
520
+ for and in connection with specific products or compilations that
521
+ contain the covered work, unless you entered into that arrangement,
522
+ or that patent license was granted, prior to 28 March 2007.
523
+
524
+ Nothing in this License shall be construed as excluding or limiting
525
+ any implied license or other defenses to infringement that may
526
+ otherwise be available to you under applicable patent law.
527
+
528
+ 12. No Surrender of Others' Freedom.
529
+
530
+ If conditions are imposed on you (whether by court order, agreement or
531
+ otherwise) that contradict the conditions of this License, they do not
532
+ excuse you from the conditions of this License. If you cannot convey a
533
+ covered work so as to satisfy simultaneously your obligations under this
534
+ License and any other pertinent obligations, then as a consequence you may
535
+ not convey it at all. For example, if you agree to terms that obligate you
536
+ to collect a royalty for further conveying from those to whom you convey
537
+ the Program, the only way you could satisfy both those terms and this
538
+ License would be to refrain entirely from conveying the Program.
539
+
540
+ 13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+ Notwithstanding any other provision of this License, if you modify the
543
+ Program, your modified version must prominently offer all users
544
+ interacting with it remotely through a computer network (if your version
545
+ supports such interaction) an opportunity to receive the Corresponding
546
+ Source of your version by providing access to the Corresponding Source
547
+ from a network server at no charge, through some standard or customary
548
+ means of facilitating copying of software. This Corresponding Source
549
+ shall include the Corresponding Source for any work covered by version 3
550
+ of the GNU General Public License that is incorporated pursuant to the
551
+ following paragraph.
552
+
553
+ Notwithstanding any other provision of this License, you have
554
+ permission to link or combine any covered work with a work licensed
555
+ under version 3 of the GNU General Public License into a single
556
+ combined work, and to convey the resulting work. The terms of this
557
+ License will continue to apply to the part which is the covered work,
558
+ but the work with which it is combined will remain governed by version
559
+ 3 of the GNU General Public License.
560
+
561
+ 14. Revised Versions of this License.
562
+
563
+ The Free Software Foundation may publish revised and/or new versions of
564
+ the GNU Affero General Public License from time to time. Such new versions
565
+ will be similar in spirit to the present version, but may differ in detail to
566
+ address new problems or concerns.
567
+
568
+ Each version is given a distinguishing version number. If the
569
+ Program specifies that a certain numbered version of the GNU Affero General
570
+ Public License "or any later version" applies to it, you have the
571
+ option of following the terms and conditions either of that numbered
572
+ version or of any later version published by the Free Software
573
+ Foundation. If the Program does not specify a version number of the
574
+ GNU Affero General Public License, you may choose any version ever published
575
+ by the Free Software Foundation.
576
+
577
+ If the Program specifies that a proxy can decide which future
578
+ versions of the GNU Affero General Public License can be used, that proxy's
579
+ public statement of acceptance of a version permanently authorizes you
580
+ to choose that version for the Program.
581
+
582
+ Later license versions may give you additional or different
583
+ permissions. However, no additional obligations are imposed on any
584
+ author or copyright holder as a result of your choosing to follow a
585
+ later version.
586
+
587
+ 15. Disclaimer of Warranty.
588
+
589
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+ 16. Limitation of Liability.
599
+
600
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+ SUCH DAMAGES.
609
+
610
+ 17. Interpretation of Sections 15 and 16.
611
+
612
+ If the disclaimer of warranty and limitation of liability provided
613
+ above cannot be given local legal effect according to their terms,
614
+ reviewing courts shall apply local law that most closely approximates
615
+ an absolute waiver of all civil liability in connection with the
616
+ Program, unless a warranty or assumption of liability accompanies a
617
+ copy of the Program in return for a fee.
618
+
619
+ END OF TERMS AND CONDITIONS
620
+
621
+ How to Apply These Terms to Your New Programs
622
+
623
+ If you develop a new program, and you want it to be of the greatest
624
+ possible use to the public, the best way to achieve this is to make it
625
+ free software which everyone can redistribute and change under these terms.
626
+
627
+ To do so, attach the following notices to the program. It is safest
628
+ to attach them to the start of each source file to most effectively
629
+ state the exclusion of warranty; and each file should have at least
630
+ the "copyright" line and a pointer to where the full notice is found.
631
+
632
+ <one line to give the program's name and a brief idea of what it does.>
633
+ Copyright (C) <year> <name of author>
634
+
635
+ This program is free software: you can redistribute it and/or modify
636
+ it under the terms of the GNU Affero General Public License as published
637
+ by the Free Software Foundation, either version 3 of the License, or
638
+ (at your option) any later version.
639
+
640
+ This program is distributed in the hope that it will be useful,
641
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+ GNU Affero General Public License for more details.
644
+
645
+ You should have received a copy of the GNU Affero General Public License
646
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
647
+
648
+ Also add information on how to contact you by electronic and paper mail.
649
+
650
+ If your software can interact with users remotely through a computer
651
+ network, you should also make sure that it provides a way for users to
652
+ get its source. For example, if your program is a web application, its
653
+ interface could display a "Source" link that leads users to an archive
654
+ of the code. There are many ways you could offer source, and different
655
+ solutions will be better for different programs; see section 13 for the
656
+ specific requirements.
657
+
658
+ You should also get your employer (if you work as a programmer) or school,
659
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+ For more information on this, and how to apply and follow the GNU AGPL, see
661
+ <https://www.gnu.org/licenses/>.
MIT_LICENSE_GGML_LLAMACPP_ONLY ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Georgi Gerganov
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
22
+
23
+ ===================================
24
+
25
+ Note that the above license applies ONLY to the GGML library and llama.cpp by ggerganov which are licensed under the MIT License
26
+ Kobold Lite by Concedo and the provided python ctypes bindings in koboldcpp.dll are licensed under the AGPL v3.0 License
Makefile ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default: koboldcpp koboldcpp_failsafe koboldcpp_openblas koboldcpp_openblas_noavx2 koboldcpp_clblast koboldcpp_cublas
2
+ tools: quantize_gpt2 quantize_gptj quantize_llama quantize_neox quantize_mpt
3
+ dev: koboldcpp_openblas
4
+ dev2: koboldcpp_clblast
5
+
6
+
7
+ ifndef UNAME_S
8
+ UNAME_S := $(shell uname -s)
9
+ endif
10
+
11
+ ifndef UNAME_P
12
+ UNAME_P := $(shell uname -p)
13
+ endif
14
+
15
+ ifndef UNAME_M
16
+ UNAME_M := $(shell uname -m)
17
+ endif
18
+
19
+ ifneq ($(shell grep -e "Arch Linux" -e "ID_LIKE=arch" /etc/os-release 2>/dev/null),)
20
+ ARCH_ADD = -lcblas
21
+ endif
22
+
23
+ CCV := $(shell $(CC) --version | head -n 1)
24
+ CXXV := $(shell $(CXX) --version | head -n 1)
25
+
26
+ # Mac OS + Arm can report x86_64
27
+ # ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789
28
+ ifeq ($(UNAME_S),Darwin)
29
+ ifneq ($(UNAME_P),arm)
30
+ SYSCTL_M := $(shell sysctl -n hw.optional.arm64 2>/dev/null)
31
+ ifeq ($(SYSCTL_M),1)
32
+ # UNAME_P := arm
33
+ # UNAME_M := arm64
34
+ warn := $(warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\#issuecomment-1282546789)
35
+ endif
36
+ endif
37
+ endif
38
+
39
+ #
40
+ # Compile flags
41
+ #
42
+
43
+ # keep standard at C11 and C++11
44
+ CFLAGS = -I. -I./include -I./include/CL -I./otherarch -I./otherarch/tools -Ofast -DNDEBUG -std=c11 -fPIC -DGGML_USE_K_QUANTS
45
+ CXXFLAGS = -I. -I./examples -I./include -I./include/CL -I./otherarch -I./otherarch/tools -O3 -DNDEBUG -std=c++11 -fPIC -DGGML_USE_K_QUANTS
46
+ LDFLAGS =
47
+
48
+ # these are used on windows, to build some libraries with extra old device compatibility
49
+ SIMPLECFLAGS =
50
+ FULLCFLAGS =
51
+ NONECFLAGS =
52
+
53
+ OPENBLAS_FLAGS = -DGGML_USE_OPENBLAS -I/usr/local/include/openblas
54
+ CLBLAST_FLAGS = -DGGML_USE_CLBLAST
55
+ FAILSAFE_FLAGS = -DUSE_FAILSAFE
56
+ ifdef LLAMA_CUBLAS
57
+ CUBLAS_FLAGS = -DGGML_USE_CUBLAS
58
+ else
59
+ CUBLAS_FLAGS =
60
+ endif
61
+ CUBLASLD_FLAGS =
62
+ CUBLAS_OBJS =
63
+
64
+ #lets try enabling everything
65
+ CFLAGS += -pthread -s
66
+ CXXFLAGS += -pthread -s -Wno-multichar -Wno-write-strings
67
+
68
+ # OS specific
69
+ # TODO: support Windows
70
+ ifeq ($(UNAME_S),Linux)
71
+ CFLAGS += -pthread
72
+ CXXFLAGS += -pthread
73
+ endif
74
+
75
+ ifeq ($(UNAME_S),Darwin)
76
+ CFLAGS += -pthread
77
+ CXXFLAGS += -pthread
78
+ endif
79
+ ifeq ($(UNAME_S),FreeBSD)
80
+ CFLAGS += -pthread
81
+ CXXFLAGS += -pthread
82
+ endif
83
+ ifeq ($(UNAME_S),NetBSD)
84
+ CFLAGS += -pthread
85
+ CXXFLAGS += -pthread
86
+ endif
87
+ ifeq ($(UNAME_S),OpenBSD)
88
+ CFLAGS += -pthread
89
+ CXXFLAGS += -pthread
90
+ endif
91
+ ifeq ($(UNAME_S),Haiku)
92
+ CFLAGS += -pthread
93
+ CXXFLAGS += -pthread
94
+ endif
95
+
96
+ ifdef LLAMA_GPROF
97
+ CFLAGS += -pg
98
+ CXXFLAGS += -pg
99
+ endif
100
+ ifdef LLAMA_PERF
101
+ CFLAGS += -DGGML_PERF
102
+ CXXFLAGS += -DGGML_PERF
103
+ endif
104
+
105
+ # Architecture specific
106
+ # TODO: probably these flags need to be tweaked on some architectures
107
+ # feel free to update the Makefile for your architecture and send a pull request or issue
108
+ ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686))
109
+ # Use all CPU extensions that are available:
110
+ # old library NEEDS mf16c to work. so we must build with it. new one doesnt
111
+ ifeq ($(OS),Windows_NT)
112
+ CFLAGS +=
113
+ NONECFLAGS += -mno-sse3
114
+ SIMPLECFLAGS += -mavx -msse3
115
+ FULLCFLAGS += -mavx2 -msse3 -mfma -mf16c -mavx
116
+ else
117
+ # if not on windows, they are clearly building it themselves, so lets just use whatever is supported
118
+ CFLAGS += -march=native -mtune=native
119
+ endif
120
+ endif
121
+ ifneq ($(filter ppc64%,$(UNAME_M)),)
122
+ POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
123
+ ifneq (,$(findstring POWER9,$(POWER9_M)))
124
+ CFLAGS += -mcpu=power9
125
+ CXXFLAGS += -mcpu=power9
126
+ endif
127
+ # Require c++23's std::byteswap for big-endian support.
128
+ ifeq ($(UNAME_M),ppc64)
129
+ CXXFLAGS += -std=c++23 -DGGML_BIG_ENDIAN
130
+ endif
131
+ endif
132
+ ifndef LLAMA_NO_ACCELERATE
133
+ # Mac M1 - include Accelerate framework.
134
+ # `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time).
135
+ ifeq ($(UNAME_S),Darwin)
136
+ CFLAGS += -DGGML_USE_ACCELERATE
137
+ LDFLAGS += -framework Accelerate
138
+ endif
139
+ endif
140
+
141
+ # it is recommended to use the CMAKE file to build for cublas if you can - will likely work better
142
+ ifdef LLAMA_CUBLAS
143
+ CUBLAS_FLAGS = -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
144
+ CUBLASLD_FLAGS = -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
145
+ CUBLAS_OBJS = ggml-cuda.o ggml_v2-cuda.o ggml_v2-cuda-legacy.o
146
+ NVCC = nvcc
147
+ NVCCFLAGS = --forward-unknown-to-host-compiler
148
+ ifdef CUDA_DOCKER_ARCH
149
+ NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH)
150
+ else
151
+ NVCCFLAGS += -arch=native
152
+ endif # CUDA_DOCKER_ARCH
153
+ ifdef LLAMA_CUDA_FORCE_DMMV
154
+ NVCCFLAGS += -DGGML_CUDA_FORCE_DMMV
155
+ endif # LLAMA_CUDA_FORCE_DMMV
156
+ ifdef LLAMA_CUDA_DMMV_X
157
+ NVCCFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
158
+ else
159
+ NVCCFLAGS += -DGGML_CUDA_DMMV_X=32
160
+ endif # LLAMA_CUDA_DMMV_X
161
+ ifdef LLAMA_CUDA_MMV_Y
162
+ NVCCFLAGS += -DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_MMV_Y)
163
+ else ifdef LLAMA_CUDA_DMMV_Y
164
+ NVCCFLAGS += -DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_DMMV_Y) # for backwards compatibility
165
+ else
166
+ NVCCFLAGS += -DGGML_CUDA_MMV_Y=1
167
+ endif # LLAMA_CUDA_MMV_Y
168
+ ifdef LLAMA_CUDA_DMMV_F16
169
+ NVCCFLAGS += -DGGML_CUDA_DMMV_F16
170
+ endif # LLAMA_CUDA_DMMV_F16
171
+ ifdef LLAMA_CUDA_KQUANTS_ITER
172
+ NVCCFLAGS += -DK_QUANTS_PER_ITERATION=$(LLAMA_CUDA_KQUANTS_ITER)
173
+ else
174
+ NVCCFLAGS += -DK_QUANTS_PER_ITERATION=2
175
+ endif
176
+ ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
177
+ $(NVCC) $(NVCCFLAGS) $(CXXFLAGS) $(CUBLAS_FLAGS) $(CUBLAS_CXXFLAGS) -Wno-pedantic -c $< -o $@
178
+ ggml_v2-cuda.o: otherarch/ggml_v2-cuda.cu otherarch/ggml_v2-cuda.h
179
+ $(NVCC) $(NVCCFLAGS) $(CXXFLAGS) $(CUBLAS_FLAGS) $(CUBLAS_CXXFLAGS) -Wno-pedantic -c $< -o $@
180
+ ggml_v2-cuda-legacy.o: otherarch/ggml_v2-cuda-legacy.cu otherarch/ggml_v2-cuda-legacy.h
181
+ $(NVCC) $(NVCCFLAGS) $(CXXFLAGS) $(CUBLAS_FLAGS) $(CUBLAS_CXXFLAGS) -Wno-pedantic -c $< -o $@
182
+ endif # LLAMA_CUBLAS
183
+
184
+ ifdef LLAMA_METAL
185
+ CFLAGS += -DGGML_USE_METAL -DGGML_METAL_NDEBUG
186
+ CXXFLAGS += -DGGML_USE_METAL
187
+ LDFLAGS += -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
188
+ OBJS += ggml-metal.o
189
+
190
+ ggml-metal.o: ggml-metal.m ggml-metal.h
191
+ $(CC) $(CFLAGS) -c $< -o $@
192
+ endif # LLAMA_METAL
193
+
194
+ ifneq ($(filter aarch64%,$(UNAME_M)),)
195
+ # Apple M1, M2, etc.
196
+ # Raspberry Pi 3, 4, Zero 2 (64-bit)
197
+ CFLAGS +=
198
+ CXXFLAGS +=
199
+ endif
200
+ ifneq ($(filter armv6%,$(UNAME_M)),)
201
+ # Raspberry Pi 1, Zero
202
+ CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access
203
+ endif
204
+ ifneq ($(filter armv7%,$(UNAME_M)),)
205
+ # Raspberry Pi 2
206
+ CFLAGS += -mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations
207
+ endif
208
+ ifneq ($(filter armv8%,$(UNAME_M)),)
209
+ # Raspberry Pi 3, 4, Zero 2 (32-bit)
210
+ CFLAGS += -mfp16-format=ieee -mno-unaligned-access
211
+ endif
212
+
213
+ DEFAULT_BUILD =
214
+ FAILSAFE_BUILD =
215
+ OPENBLAS_BUILD =
216
+ OPENBLAS_NOAVX2_BUILD =
217
+ CLBLAST_BUILD =
218
+ CUBLAS_BUILD =
219
+
220
+ ifeq ($(OS),Windows_NT)
221
+ DEFAULT_BUILD = $(CXX) $(CXXFLAGS) $^ -shared -o [email protected] $(LDFLAGS)
222
+ FAILSAFE_BUILD = $(CXX) $(CXXFLAGS) $^ -shared -o [email protected] $(LDFLAGS)
223
+ OPENBLAS_BUILD = $(CXX) $(CXXFLAGS) $^ lib/libopenblas.lib -shared -o [email protected] $(LDFLAGS)
224
+ OPENBLAS_NOAVX2_BUILD = $(CXX) $(CXXFLAGS) $^ lib/libopenblas.lib -shared -o [email protected] $(LDFLAGS)
225
+ CLBLAST_BUILD = $(CXX) $(CXXFLAGS) $^ lib/OpenCL.lib lib/clblast.lib -shared -o [email protected] $(LDFLAGS)
226
+
227
+ ifdef LLAMA_CUBLAS
228
+ CUBLAS_BUILD = $(CXX) $(CXXFLAGS) $(CUBLAS_FLAGS) $^ -shared -o [email protected] $(CUBLASLD_FLAGS) $(LDFLAGS)
229
+ endif
230
+
231
+ else
232
+ DEFAULT_BUILD = $(CXX) $(CXXFLAGS) $^ -shared -o [email protected] $(LDFLAGS)
233
+ FAILSAFE_BUILD = $(CXX) $(CXXFLAGS) $^ -shared -o [email protected] $(LDFLAGS)
234
+ ifdef LLAMA_OPENBLAS
235
+ OPENBLAS_BUILD = $(CXX) $(CXXFLAGS) $^ $(ARCH_ADD) -lopenblas -shared -o [email protected] $(LDFLAGS)
236
+ OPENBLAS_NOAVX2_BUILD = $(CXX) $(CXXFLAGS) $^ $(ARCH_ADD) -lopenblas -shared -o [email protected] $(LDFLAGS)
237
+ endif
238
+ ifdef LLAMA_CLBLAST
239
+ ifeq ($(UNAME_S),Darwin)
240
+ CLBLAST_BUILD = $(CXX) $(CXXFLAGS) $^ -lclblast -framework OpenCL $(ARCH_ADD) -lopenblas -shared -o [email protected] $(LDFLAGS)
241
+ else
242
+ CLBLAST_BUILD = $(CXX) $(CXXFLAGS) $^ -lclblast -lOpenCL $(ARCH_ADD) -lopenblas -shared -o [email protected] $(LDFLAGS)
243
+ endif
244
+ endif
245
+
246
+ ifdef LLAMA_CUBLAS
247
+ CUBLAS_BUILD = $(CXX) $(CXXFLAGS) $(CUBLAS_FLAGS) $^ -shared -o [email protected] $(CUBLASLD_FLAGS) $(LDFLAGS)
248
+ endif
249
+
250
+ ifndef LLAMA_OPENBLAS
251
+ ifndef LLAMA_CLBLAST
252
+ ifndef LLAMA_CUBLAS
253
+ OPENBLAS_BUILD = @echo 'Your OS $(OS) does not appear to be Windows. For faster speeds, install and link a BLAS library. Set LLAMA_OPENBLAS=1 to compile with OpenBLAS support or LLAMA_CLBLAST=1 to compile with ClBlast support. This is just a reminder, not an error.'
254
+ endif
255
+ endif
256
+ endif
257
+ endif
258
+
259
+
260
+
261
+ #
262
+ # Print build information
263
+ #
264
+
265
+ $(info I llama.cpp build info: )
266
+ $(info I UNAME_S: $(UNAME_S))
267
+ $(info I UNAME_P: $(UNAME_P))
268
+ $(info I UNAME_M: $(UNAME_M))
269
+ $(info I CFLAGS: $(CFLAGS))
270
+ $(info I CXXFLAGS: $(CXXFLAGS))
271
+ $(info I LDFLAGS: $(LDFLAGS))
272
+ $(info I CC: $(CCV))
273
+ $(info I CXX: $(CXXV))
274
+ $(info )
275
+
276
+ #
277
+ # Build library
278
+ #
279
+
280
+ ggml.o: ggml.c ggml.h ggml-cuda.h k_quants.h
281
+ $(CC) $(CFLAGS) $(FULLCFLAGS) -c $< -o $@
282
+ ggml_openblas.o: ggml.c ggml.h
283
+ $(CC) $(CFLAGS) $(FULLCFLAGS) $(OPENBLAS_FLAGS) -c $< -o $@
284
+ ggml_failsafe.o: ggml.c ggml.h
285
+ $(CC) $(CFLAGS) $(NONECFLAGS) -c $< -o $@
286
+ ggml_openblas_noavx2.o: ggml.c ggml.h
287
+ $(CC) $(CFLAGS) $(SIMPLECFLAGS) $(OPENBLAS_FLAGS) -c $< -o $@
288
+ ggml_clblast.o: ggml.c ggml.h
289
+ $(CC) $(CFLAGS) $(FULLCFLAGS) $(CLBLAST_FLAGS) -c $< -o $@
290
+ ggml_cublas.o: ggml.c ggml.h
291
+ $(CC) $(CFLAGS) $(FULLCFLAGS) $(CUBLAS_FLAGS) -c $< -o $@
292
+
293
+ #quants K
294
+ k_quants.o: k_quants.c k_quants.h ggml.h ggml-cuda.h
295
+ $(CC) $(CFLAGS) $(FULLCFLAGS) -c $< -o $@
296
+ k_quants_noavx2.o: k_quants.c k_quants.h ggml.h ggml-cuda.h
297
+ $(CC) $(CFLAGS) $(SIMPLECFLAGS) -c $< -o $@
298
+ k_quants_failsafe.o: k_quants.c k_quants.h ggml.h ggml-cuda.h
299
+ $(CC) $(CFLAGS) $(NONECFLAGS) -c $< -o $@
300
+
301
+ #version 2 libs
302
+ ggml_v2.o: otherarch/ggml_v2.c otherarch/ggml_v2.h
303
+ $(CC) $(CFLAGS) $(FULLCFLAGS) -c $< -o $@
304
+ ggml_v2_openblas.o: otherarch/ggml_v2.c otherarch/ggml_v2.h
305
+ $(CC) $(CFLAGS) $(FULLCFLAGS) $(OPENBLAS_FLAGS) -c $< -o $@
306
+ ggml_v2_failsafe.o: otherarch/ggml_v2.c otherarch/ggml_v2.h
307
+ $(CC) $(CFLAGS) $(NONECFLAGS) -c $< -o $@
308
+ ggml_v2_openblas_noavx2.o: otherarch/ggml_v2.c otherarch/ggml_v2.h
309
+ $(CC) $(CFLAGS) $(SIMPLECFLAGS) $(OPENBLAS_FLAGS) -c $< -o $@
310
+ ggml_v2_clblast.o: otherarch/ggml_v2.c otherarch/ggml_v2.h
311
+ $(CC) $(CFLAGS) $(FULLCFLAGS) $(CLBLAST_FLAGS) -c $< -o $@
312
+ ggml_v2_cublas.o: otherarch/ggml_v2.c otherarch/ggml_v2.h
313
+ $(CC) $(CFLAGS) $(FULLCFLAGS) $(CUBLAS_FLAGS) -c $< -o $@
314
+
315
+ #extreme old version compat
316
+ ggml_v1.o: otherarch/ggml_v1.c otherarch/ggml_v1.h
317
+ $(CC) $(CFLAGS) $(FULLCFLAGS) -c $< -o $@
318
+ ggml_v1_failsafe.o: otherarch/ggml_v1.c otherarch/ggml_v1.h
319
+ $(CC) $(CFLAGS) $(NONECFLAGS) -c $< -o $@
320
+
321
+ #opencl
322
+ ggml-opencl.o: ggml-opencl.cpp ggml-opencl.h
323
+ $(CXX) $(CXXFLAGS) $(CLBLAST_FLAGS) -c $< -o $@
324
+ ggml_v2-opencl.o: otherarch/ggml_v2-opencl.cpp otherarch/ggml_v2-opencl.h
325
+ $(CXX) $(CXXFLAGS) $(CLBLAST_FLAGS) -c $< -o $@
326
+ ggml_v2-opencl-legacy.o: otherarch/ggml_v2-opencl-legacy.c otherarch/ggml_v2-opencl-legacy.h
327
+ $(CC) $(CFLAGS) -c $< -o $@
328
+
329
+ # intermediate objects
330
+ llama.o: llama.cpp ggml.h ggml-cuda.h llama.h llama-util.h
331
+ $(CXX) $(CXXFLAGS) -c $< -o $@
332
+ common.o: examples/common.cpp examples/common.h
333
+ $(CXX) $(CXXFLAGS) -c $< -o $@
334
+ expose.o: expose.cpp expose.h
335
+ $(CXX) $(CXXFLAGS) -c $< -o $@
336
+ gpttype_adapter_failsafe.o: gpttype_adapter.cpp
337
+ $(CXX) $(CXXFLAGS) $(FAILSAFE_FLAGS) -c $< -o $@
338
+ gpttype_adapter.o: gpttype_adapter.cpp
339
+ $(CXX) $(CXXFLAGS) -c $< -o $@
340
+ gpttype_adapter_clblast.o: gpttype_adapter.cpp
341
+ $(CXX) $(CXXFLAGS) $(CLBLAST_FLAGS) -c $< -o $@
342
+ gpttype_adapter_cublas.o: gpttype_adapter.cpp
343
+ $(CXX) $(CXXFLAGS) $(CUBLAS_FLAGS) -c $< -o $@
344
+
345
+ clean:
346
+ rm -vf *.o main quantize_llama quantize_gpt2 quantize_gptj quantize_neox quantize_mpt quantize-stats perplexity embedding benchmark-matmult save-load-state main.exe quantize_llama.exe quantize_gptj.exe quantize_gpt2.exe quantize_neox.exe quantize_mpt.exe koboldcpp.dll koboldcpp_openblas.dll koboldcpp_failsafe.dll koboldcpp_openblas_noavx2.dll koboldcpp_clblast.dll koboldcpp_cublas.dll koboldcpp.so koboldcpp_openblas.so koboldcpp_failsafe.so koboldcpp_openblas_noavx2.so koboldcpp_clblast.so koboldcpp_cublas.so
347
+
348
+ main: examples/main/main.cpp build-info.h ggml.o k_quants.o llama.o common.o $(OBJS)
349
+ $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
350
+ @echo
351
+ @echo '==== Run ./main -h for help. ===='
352
+ @echo
353
+
354
+ #generated libraries
355
+ koboldcpp: ggml.o ggml_v2.o ggml_v1.o expose.o common.o gpttype_adapter.o k_quants.o $(OBJS)
356
+ $(DEFAULT_BUILD)
357
+ koboldcpp_openblas: ggml_openblas.o ggml_v2_openblas.o ggml_v1.o expose.o common.o gpttype_adapter.o k_quants.o $(OBJS)
358
+ $(OPENBLAS_BUILD)
359
+ koboldcpp_failsafe: ggml_failsafe.o ggml_v2_failsafe.o ggml_v1_failsafe.o expose.o common.o gpttype_adapter_failsafe.o k_quants_failsafe.o $(OBJS)
360
+ $(FAILSAFE_BUILD)
361
+ koboldcpp_openblas_noavx2: ggml_openblas_noavx2.o ggml_v2_openblas_noavx2.o ggml_v1_failsafe.o expose.o common.o gpttype_adapter.o k_quants_noavx2.o $(OBJS)
362
+ $(OPENBLAS_NOAVX2_BUILD)
363
+ koboldcpp_clblast: ggml_clblast.o ggml_v2_clblast.o ggml_v1.o expose.o common.o gpttype_adapter_clblast.o ggml-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o k_quants.o $(OBJS)
364
+ $(CLBLAST_BUILD)
365
+ koboldcpp_cublas: ggml_cublas.o ggml_v2_cublas.o ggml_v1.o expose.o common.o gpttype_adapter_cublas.o k_quants.o $(CUBLAS_OBJS) $(OBJS)
366
+ $(CUBLAS_BUILD)
367
+
368
+ quantize_llama: examples/quantize/quantize.cpp ggml.o llama.o k_quants.o
369
+ $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
370
+ quantize_gptj: ggml.o llama.o k_quants.o otherarch/tools/gptj_quantize.cpp otherarch/tools/common-ggml.cpp
371
+ $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
372
+ quantize_gpt2: ggml.o llama.o k_quants.o otherarch/tools/gpt2_quantize.cpp otherarch/tools/common-ggml.cpp
373
+ $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
374
+ quantize_neox: ggml.o llama.o k_quants.o otherarch/tools/neox_quantize.cpp otherarch/tools/common-ggml.cpp
375
+ $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
376
+ quantize_mpt: ggml.o llama.o k_quants.o otherarch/tools/mpt_quantize.cpp otherarch/tools/common-ggml.cpp
377
+ $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
378
+
379
+
380
+ build-info.h:
381
+ $(DONOTHING)
Package.swift ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // swift-tools-version:5.3
2
+
3
+ import PackageDescription
4
+
5
+ let package = Package(
6
+ name: "llama",
7
+ products: [
8
+ .library(name: "llama", targets: ["llama"]),
9
+ ],
10
+ targets: [
11
+ .target(
12
+ name: "llama",
13
+ path: ".",
14
+ exclude: ["ggml-metal.metal"],
15
+ sources: ["ggml.c", "llama.cpp"],
16
+ publicHeadersPath: "spm-headers",
17
+ cSettings: [.unsafeFlags(["-Wno-shorten-64-to-32"]), .define("GGML_USE_ACCELERATE")],
18
+ linkerSettings: [
19
+ .linkedFramework("Accelerate")
20
+ ]
21
+ ),
22
+ ],
23
+ cxxLanguageStandard: .cxx11
24
+ )
README.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Koboldcpp
3
+ sdk: docker
4
+ emoji: 💻
5
+ colorFrom: blue
6
+ colorTo: purple
7
+ duplicated_from: Illumotion/Koboldcpp
8
+ ---
Remote-Link.cmd ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ curl -L https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-windows-amd64.exe -o cloudflared.exe
2
+ cloudflared.exe tunnel --url localhost:5001
build-info.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #ifndef BUILD_INFO_H
2
+ #define BUILD_INFO_H
3
+
4
+ #define BUILD_NUMBER 999
5
+ #define BUILD_COMMIT "KOBOLDCPP"
6
+
7
+ #endif // BUILD_INFO_H
clblast.dll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0611442b931691d9b3c9bc5ebe7625f17a5c5902e1a2b9e98cbad440d1459625
3
+ size 5450752
convert-lora-to-ggml.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ import struct
5
+ import sys
6
+ from typing import Any, Dict, Sequence, TextIO
7
+
8
+ import torch
9
+
10
+ from convert import DATA_TYPE_TO_FTYPE, NUMPY_TYPE_TO_DATA_TYPE, DataType
11
+
12
+ HF_SUBLAYER_TO_GGML = {
13
+ "self_attn.q_proj": "attention.wq",
14
+ "self_attn.k_proj": "attention.wk",
15
+ "self_attn.v_proj": "attention.wv",
16
+ "self_attn.o_proj": "attention.wo",
17
+ "mlp.gate_proj": "feed_forward.w1",
18
+ "mlp.down_proj": "feed_forward.w2",
19
+ "mlp.up_proj": "feed_forward.w3",
20
+ "input_layernorm": "attention_norm",
21
+ "post_attention_layernorm": "ffn_norm",
22
+ # "norm": "norm",
23
+ # "embed_tokens": "tok_embeddings",
24
+ # "lm_head": "output",
25
+ }
26
+
27
+
28
+ def translate_tensor_name(t: str) -> str:
29
+ match = re.match(r".*layers\.(\d+)\.(\w+\.\w+)\.lora_(A|B)\.weight", t)
30
+ if match:
31
+ nn = match.group(1)
32
+ sub_layer = match.group(2)
33
+ lora_type = match.group(3)
34
+
35
+ sub_layer_renamed = HF_SUBLAYER_TO_GGML.get(sub_layer)
36
+ if sub_layer_renamed is None:
37
+ print(f"Error: unrecognized sub-layer {sub_layer} in tensor {t}")
38
+ sys.exit(1)
39
+
40
+ output_string = (
41
+ f"layers.{nn}.{HF_SUBLAYER_TO_GGML[sub_layer]}.weight.lora{lora_type}"
42
+ )
43
+ return output_string
44
+ else:
45
+ print(f"Error: unrecognized tensor {t}")
46
+ sys.exit(1)
47
+
48
+
49
+ def write_file_header(fout: TextIO, params: Dict[str, Any]) -> None:
50
+ fout.write(b"ggla"[::-1]) # magic (ggml lora)
51
+ fout.write(struct.pack("i", 1)) # file version
52
+ fout.write(struct.pack("i", params["r"]))
53
+ # https://opendelta.readthedocs.io/en/latest/modules/deltas.html says that `lora_alpha` is an int
54
+ # but some models ship a float value instead
55
+ # let's convert to int, but fail if lossless conversion is not possible
56
+ assert int(params["lora_alpha"]) == params["lora_alpha"], "cannot convert float to int losslessly"
57
+ fout.write(struct.pack("i", int(params["lora_alpha"])))
58
+
59
+
60
+ def write_tensor_header(
61
+ self, name: str, shape: Sequence[int], data_type: DataType
62
+ ) -> None:
63
+ sname = name.encode("utf-8")
64
+ fout.write(
65
+ struct.pack(
66
+ "iii",
67
+ len(shape),
68
+ len(sname),
69
+ DATA_TYPE_TO_FTYPE[NUMPY_TYPE_TO_DATA_TYPE[data_type]],
70
+ )
71
+ )
72
+ fout.write(struct.pack("i" * len(shape), *shape[::-1]))
73
+ fout.write(sname)
74
+ fout.seek((fout.tell() + 31) & -32)
75
+
76
+
77
+ if len(sys.argv) != 2:
78
+ print(f"Usage: python {sys.argv[0]} <path>")
79
+ print(
80
+ "Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'"
81
+ )
82
+ sys.exit(1)
83
+
84
+ input_json = os.path.join(sys.argv[1], "adapter_config.json")
85
+ input_model = os.path.join(sys.argv[1], "adapter_model.bin")
86
+ output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin")
87
+
88
+ model = torch.load(input_model, map_location="cpu")
89
+
90
+ with open(input_json, "r") as f:
91
+ params = json.load(f)
92
+
93
+ if params["peft_type"] != "LORA":
94
+ print(f"Error: unsupported adapter type {params['peft_type']}, expected LORA")
95
+ sys.exit(1)
96
+
97
+ if params["fan_in_fan_out"] is True:
98
+ print("Error: param fan_in_fan_out is not supported")
99
+ sys.exit(1)
100
+
101
+ if params["bias"] is not None and params["bias"] != "none":
102
+ print("Error: param bias is not supported")
103
+ sys.exit(1)
104
+
105
+ # TODO: these seem to be layers that have been trained but without lora.
106
+ # doesn't seem widely used but eventually should be supported
107
+ if params["modules_to_save"] is not None and len(params["modules_to_save"]) > 0:
108
+ print("Error: param modules_to_save is not supported")
109
+ sys.exit(1)
110
+
111
+ with open(output_path, "wb") as fout:
112
+ fout.truncate()
113
+
114
+ write_file_header(fout, params)
115
+ for k, v in model.items():
116
+ if k.endswith(".default.weight"):
117
+ k = k.replace(".default.weight", ".weight")
118
+ if k in ["llama_proj.weight", "llama_proj.bias"]:
119
+ continue
120
+ if k.endswith("lora_A.weight"):
121
+ if v.dtype != torch.float16 and v.dtype != torch.float32:
122
+ v = v.float()
123
+ v = v.T
124
+ else:
125
+ v = v.float()
126
+
127
+ t = v.detach().numpy()
128
+ tname = translate_tensor_name(k)
129
+ print(f"{k} => {tname} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB")
130
+ write_tensor_header(fout, tname, t.shape, t.dtype)
131
+ t.tofile(fout)
132
+
133
+ print(f"Converted {input_json} and {input_model} to {output_path}")
convert-pth-to-ggml.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Compatibility stub
2
+
3
+ import argparse
4
+
5
+ import convert
6
+
7
+ parser = argparse.ArgumentParser(
8
+ description="""[DEPRECATED - use `convert.py` instead]
9
+ Convert a LLaMA model checkpoint to a ggml compatible file""")
10
+ parser.add_argument('dir_model', help='directory containing the model checkpoint')
11
+ parser.add_argument('ftype', help='file type (0: float32, 1: float16)', type=int, choices=[0, 1], default=1)
12
+ args = parser.parse_args()
13
+ convert.main(['--outtype', 'f16' if args.ftype == 1 else 'f32', '--', args.dir_model])
convert.py ADDED
@@ -0,0 +1,1263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import concurrent.futures
3
+ import copy
4
+ import enum
5
+ import faulthandler
6
+ import functools
7
+ import io
8
+ import itertools
9
+ import json
10
+ import math
11
+ import mmap
12
+ import pickle
13
+ import re
14
+ import signal
15
+ import struct
16
+ import sys
17
+ import zipfile
18
+ from abc import ABCMeta, abstractmethod
19
+ from dataclasses import dataclass
20
+ from pathlib import Path
21
+ from typing import (IO, TYPE_CHECKING, Any, Callable, Dict, Iterable, List,
22
+ Literal, Optional, Sequence, Tuple, TypeVar, Union)
23
+
24
+ import numpy as np
25
+ from sentencepiece import SentencePieceProcessor # type: ignore
26
+
27
+ if TYPE_CHECKING:
28
+ from typing_extensions import TypeAlias
29
+
30
+ if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'):
31
+ faulthandler.register(signal.SIGUSR1)
32
+
33
+ NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
34
+
35
+
36
+ @dataclass(frozen=True)
37
+ class UnquantizedDataType:
38
+ name: str
39
+
40
+
41
+ DT_F16 = UnquantizedDataType('F16')
42
+ DT_F32 = UnquantizedDataType('F32')
43
+ DT_I32 = UnquantizedDataType('I32')
44
+ DT_BF16 = UnquantizedDataType('BF16')
45
+
46
+
47
+ @dataclass(frozen=True)
48
+ class QuantizedDataType:
49
+ groupsize: int
50
+ have_addends: bool
51
+ have_g_idx: bool
52
+
53
+
54
+ DT_Q4_0 = QuantizedDataType(groupsize=32, have_addends=False, have_g_idx=False)
55
+ DT_Q4_1 = QuantizedDataType(groupsize=32, have_addends=True, have_g_idx=False)
56
+
57
+ DataType = Union[UnquantizedDataType, QuantizedDataType]
58
+
59
+ DATA_TYPE_TO_FTYPE: Dict[DataType, int] = {
60
+ DT_F32: 0,
61
+ DT_F16: 1,
62
+ DT_Q4_0: 2,
63
+ DT_Q4_1: 3,
64
+ }
65
+
66
+ FTYPE_TO_DATA_TYPE: Dict[int, DataType] = \
67
+ {ftype: dtype for (dtype, ftype) in DATA_TYPE_TO_FTYPE.items()}
68
+
69
+ DATA_TYPE_TO_NUMPY: Dict[DataType, 'np.dtype[Any]'] = {
70
+ DT_BF16: np.dtype(np.uint16),
71
+ DT_F16: np.dtype(np.float16),
72
+ DT_F32: np.dtype(np.float32),
73
+ DT_I32: np.dtype(np.int32),
74
+ }
75
+
76
+ NUMPY_TYPE_TO_DATA_TYPE: Dict['np.dtype[Any]', DataType] = \
77
+ {dtype: data_type for (data_type, dtype) in DATA_TYPE_TO_NUMPY.items()}
78
+
79
+
80
+ class GGMLFileType(enum.Enum):
81
+ AllF32 = 0
82
+ MostlyF16 = 1 # except 1d tensors
83
+ MostlyQ4_0 = 2 # except 1d tensors
84
+ MostlyQ4_1 = 3 # except 1d tensors
85
+ PerLayerIsQ4_1 = 4 # but tok_embeddings.weight and output.weight are F16
86
+
87
+ def type_for_tensor(self, name: str, tensor: 'LazyTensor') -> DataType:
88
+ if len(tensor.shape) == 1:
89
+ # 1D tensors are always F32.
90
+ return DT_F32
91
+ elif self == GGMLFileType.AllF32:
92
+ return DT_F32
93
+ elif self == GGMLFileType.MostlyF16:
94
+ return DT_F16
95
+ elif self == GGMLFileType.MostlyQ4_0:
96
+ return DT_Q4_0
97
+ elif self == GGMLFileType.MostlyQ4_1:
98
+ return DT_Q4_1
99
+ elif self == GGMLFileType.PerLayerIsQ4_1:
100
+ if name in ('output.weight', 'tok_embeddings.weight'):
101
+ return DT_F16
102
+ else:
103
+ return DT_Q4_1
104
+ else:
105
+ raise ValueError(self)
106
+
107
+
108
+ def make_tensors_list() -> List[str]:
109
+ ret = [
110
+ 'tok_embeddings.weight',
111
+ 'norm.weight',
112
+ 'output.weight',
113
+ ]
114
+ for i in range(80): # maximum number of layer
115
+ ret += [
116
+ f'layers.{i}.attention.wq.weight',
117
+ f'layers.{i}.attention.wk.weight',
118
+ f'layers.{i}.attention.wv.weight',
119
+ f'layers.{i}.attention.wo.weight',
120
+ f'layers.{i}.attention_norm.weight',
121
+ f'layers.{i}.feed_forward.w1.weight',
122
+ f'layers.{i}.feed_forward.w2.weight',
123
+ f'layers.{i}.feed_forward.w3.weight',
124
+ f'layers.{i}.ffn_norm.weight',
125
+ ]
126
+ return ret
127
+
128
+
129
+ TENSORS_LIST = make_tensors_list()
130
+ TENSORS_SET = set(TENSORS_LIST)
131
+
132
+
133
+ def find_n_mult(n_ff: int, n_embd: int) -> int:
134
+ # hardcoded magic range
135
+ for n_mult in range(256, 1, -1):
136
+ calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult
137
+ if calc_ff == n_ff:
138
+ return n_mult
139
+ raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).")
140
+
141
+ @dataclass
142
+ class Params:
143
+ n_vocab: int
144
+ n_embd: int
145
+ n_mult: int
146
+ n_head: int
147
+ n_layer: int
148
+
149
+ @staticmethod
150
+ def guessed(model: 'LazyModel') -> 'Params':
151
+ # try transformer naming first
152
+ n_vocab, n_embd = model["model.embed_tokens.weight"].shape if "model.embed_tokens.weight" in model else model["tok_embeddings.weight"].shape
153
+
154
+ # try transformer naming first
155
+ if "model.layers.0.self_attn.q_proj.weight" in model:
156
+ n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model)
157
+ elif "model.layers.0.self_attn.W_pack.weight" in model: # next: try baichuan naming
158
+ n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.W_pack.weight" not in model)
159
+ else:
160
+ n_layer=next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model)
161
+
162
+ if n_layer < 1:
163
+ raise Exception("failed to guess 'n_layer'. This model is unknown or unsupported.\n"
164
+ "Suggestion: provide 'config.json' of the model in the same directory containing model files.")
165
+
166
+ n_head=n_embd // 128 # guessed
167
+
168
+ return Params(
169
+ n_vocab=n_vocab,
170
+ n_embd=n_embd,
171
+ n_mult=256,
172
+ n_head=n_head,
173
+ n_layer=n_layer,
174
+ )
175
+
176
+ @staticmethod
177
+ def loadHFTransformerJson(model: 'LazyModel', config_path: 'Path') -> 'Params':
178
+ config = json.load(open(config_path))
179
+
180
+ n_vocab = config["vocab_size"];
181
+ n_embd = config["hidden_size"];
182
+ n_head = config["num_attention_heads"];
183
+ n_layer = config["num_hidden_layers"];
184
+ n_ff = config["intermediate_size"];
185
+
186
+ n_mult = find_n_mult(n_ff, n_embd);
187
+
188
+ return Params(
189
+ n_vocab=n_vocab,
190
+ n_embd=n_embd,
191
+ n_mult=n_mult,
192
+ n_head=n_head,
193
+ n_layer=n_layer,
194
+ )
195
+
196
+ @staticmethod
197
+ def load(model_plus: 'ModelPlus') -> 'Params':
198
+ orig_config_path = model_plus.paths[0].parent / "params.json"
199
+ hf_transformer_config_path = model_plus.paths[0].parent / "config.json"
200
+
201
+ if hf_transformer_config_path.exists():
202
+ params = Params.loadHFTransformerJson(model_plus.model, hf_transformer_config_path)
203
+ else:
204
+ params = Params.guessed(model_plus.model)
205
+
206
+ print(f'params: n_vocab:{params.n_vocab} n_embd:{params.n_embd} n_mult:{params.n_mult} n_head:{params.n_head} n_layer:{params.n_layer}')
207
+ return params
208
+
209
+
210
+ class SentencePieceVocab:
211
+ def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None:
212
+ self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
213
+ added_tokens: Dict[str, int]
214
+ if fname_added_tokens is not None:
215
+ added_tokens = json.load(open(fname_added_tokens))
216
+ else:
217
+ added_tokens = {}
218
+ vocab_size: int = self.sentencepiece_tokenizer.vocab_size()
219
+ expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
220
+ actual_ids = sorted(added_tokens.values())
221
+ if expected_ids != actual_ids:
222
+ raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}")
223
+ items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
224
+ self.added_tokens_list = [text for (text, idx) in items]
225
+ self.vocab_size_base: int = vocab_size
226
+ self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list)
227
+ self.fname_tokenizer = fname_tokenizer
228
+ self.fname_added_tokens = fname_added_tokens
229
+
230
+ def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]:
231
+ tokenizer = self.sentencepiece_tokenizer
232
+ for i in range(tokenizer.vocab_size()):
233
+ text: bytes
234
+ if tokenizer.is_unknown(i):
235
+ text = " \u2047 ".encode("utf-8")
236
+ elif tokenizer.is_control(i):
237
+ text = b""
238
+ elif tokenizer.is_byte(i):
239
+ piece = tokenizer.id_to_piece(i)
240
+ if len(piece) != 6:
241
+ raise Exception(f"Invalid token: {piece}")
242
+ byte_value = int(piece[3:-1], 16)
243
+ text = struct.pack("B", byte_value)
244
+ else:
245
+ text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
246
+ score: float = tokenizer.get_score(i)
247
+ yield text, score
248
+
249
+ def added_tokens(self) -> Iterable[Tuple[bytes, float]]:
250
+ for text in self.added_tokens_list:
251
+ score = -1000.0
252
+ yield text.encode("utf-8"), score
253
+
254
+ def all_tokens(self) -> Iterable[Tuple[bytes, float]]:
255
+ yield from self.sentencepiece_tokens()
256
+ yield from self.added_tokens()
257
+
258
+ def __repr__(self) -> str:
259
+ return f"<SentencePieceVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
260
+
261
+
262
+ class GGMLVocab:
263
+ def __init__(self, tokens: List[Tuple[bytes, float]]):
264
+ self.tokens = tokens
265
+ self.vocab_size = len(tokens)
266
+
267
+ def all_tokens(self) -> Iterable[Tuple[bytes, float]]:
268
+ return self.tokens
269
+
270
+ def __repr__(self) -> str:
271
+ return f"<GGMLVocab with {self.vocab_size} tokens>"
272
+
273
+
274
+ Vocab = Union[SentencePieceVocab, GGMLVocab]
275
+
276
+
277
+ def permute(weights: NDArray, n_head: int) -> NDArray:
278
+ return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
279
+ .swapaxes(1, 2)
280
+ .reshape(weights.shape))
281
+
282
+
283
+ def dequantize_q4(qvalues_pack32: NDArray, scales: NDArray, addends: Optional[NDArray], g_idx: Optional[NDArray]) -> NDArray:
284
+ # First reinterpret each row from a list of int32s containing 8 values each
285
+ # to a list of uint8s containing 2 values each.
286
+ qvalues_pack8 = qvalues_pack32.view(np.uint8)
287
+
288
+ # Then split out the two values per int8 (which requires an actual
289
+ # conversion because numpy doesn't natively support int4s).
290
+ qvalues = np.zeros([qvalues_pack8.shape[0], qvalues_pack8.shape[1] * 2], dtype=np.uint8)
291
+ qvalues[:, 0::2] = qvalues_pack8 & 0xf
292
+ qvalues[:, 1::2] = qvalues_pack8 >> 4
293
+
294
+ assert addends is None or addends.shape == scales.shape
295
+ assert qvalues.shape[0] == scales.shape[0]
296
+ assert qvalues.shape[1] % scales.shape[1] == 0
297
+ if g_idx is None:
298
+ repeat_count = qvalues.shape[1] // scales.shape[1]
299
+ scales = scales[:, :, np.newaxis]
300
+ if addends is not None:
301
+ addends = addends[:, :, np.newaxis]
302
+ # Reshape so that the below computation broadcasts over scales and addends:
303
+ qvalues.shape = (qvalues.shape[0], scales.shape[1], int(repeat_count))
304
+ else:
305
+ # In this case the scale and addend is selected for each column by g_idx:
306
+ assert addends is not None
307
+ scales = scales[:, g_idx]
308
+ addends = addends[:, g_idx]
309
+ if addends is None:
310
+ # Q4_0
311
+ qvalues = qvalues.view(np.int8)
312
+ qvalues -= 8
313
+ # And do the actual 'value = scale * qvalue + addend' computation.
314
+ values = scales * qvalues
315
+ if addends is not None:
316
+ values += addends
317
+ if g_idx is None:
318
+ values.shape = (values.shape[0], values.shape[1] * values.shape[2])
319
+ return values
320
+
321
+
322
+ class Tensor(metaclass=ABCMeta):
323
+ data_type: DataType
324
+
325
+ @abstractmethod
326
+ def astype(self, data_type: DataType) -> 'Tensor': ...
327
+ @abstractmethod
328
+ def permute(self, n_head: int) -> 'Tensor': ...
329
+ @abstractmethod
330
+ def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ...
331
+ @abstractmethod
332
+ def part(self, n_part: int) -> 'UnquantizedTensor': ...
333
+ @abstractmethod
334
+ def to_ggml(self) -> 'GGMLCompatibleTensor': ...
335
+
336
+
337
+ def bf16_to_fp32(bf16_arr: np.ndarray) -> np.ndarray:
338
+ assert bf16_arr.dtype == np.uint16, f"Input array should be of dtype uint16, but got {bf16_arr.dtype}"
339
+ fp32_arr = bf16_arr.astype(np.uint32) << 16
340
+ return fp32_arr.view(np.float32)
341
+
342
+
343
+ class UnquantizedTensor(Tensor):
344
+ def __init__(self, ndarray: NDArray) -> None:
345
+ assert isinstance(ndarray, np.ndarray)
346
+ self.ndarray = ndarray
347
+ self.data_type = NUMPY_TYPE_TO_DATA_TYPE[ndarray.dtype]
348
+
349
+ def astype(self, data_type: DataType) -> Tensor:
350
+ dtype = DATA_TYPE_TO_NUMPY[data_type]
351
+ if self.data_type == DT_BF16:
352
+ self.ndarray = bf16_to_fp32(self.ndarray)
353
+ return UnquantizedTensor(self.ndarray.astype(dtype))
354
+
355
+ def to_ggml(self) -> 'UnquantizedTensor':
356
+ return self
357
+
358
+ def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor':
359
+ r = self.ndarray.shape[0] // 3
360
+ return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head))
361
+
362
+ def part(self, n_part: int) -> 'UnquantizedTensor':
363
+ r = self.ndarray.shape[0] // 3
364
+ return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...])
365
+
366
+ def permute(self, n_head: int) -> 'UnquantizedTensor':
367
+ return UnquantizedTensor(permute(self.ndarray, n_head))
368
+
369
+
370
+ def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, convert: bool = False) -> NDArray:
371
+ tensor = lazy_tensor.load()
372
+ assert isinstance(tensor, UnquantizedTensor)
373
+
374
+ # double-check:
375
+ actual_shape = list(tensor.ndarray.shape)
376
+ assert actual_shape == lazy_tensor.shape, (actual_shape, lazy_tensor.shape)
377
+ if expected_dtype is not None and expected_dtype != tensor.ndarray.dtype:
378
+ if convert:
379
+ tensor.ndarray = tensor.ndarray.astype(expected_dtype)
380
+ else:
381
+ raise ValueError(f'expected this tensor to have dtype {expected_dtype}, got {tensor.ndarray.dtype}')
382
+
383
+ return tensor.ndarray
384
+
385
+
386
+ class GGMLQuantizedTensor(Tensor):
387
+ data_type: QuantizedDataType
388
+
389
+ def __init__(self, ndarray: NDArray, shape: List[int], data_type: DataType) -> None:
390
+ rows, columns = shape
391
+ assert data_type in (DT_Q4_1, DT_Q4_0) # for now
392
+ assert isinstance(data_type, QuantizedDataType) # redundant, but mypy complains without this
393
+ assert columns % data_type.groupsize == 0
394
+ words_in_block = 6 if data_type == DT_Q4_1 else 5
395
+ self.ndarray = ndarray.view(dtype=np.uint32).reshape((rows, columns // data_type.groupsize, words_in_block))
396
+ self.shape = shape[:]
397
+ self.data_type = data_type
398
+
399
+ def astype(self, data_type: DataType) -> Tensor:
400
+ if data_type == self.data_type:
401
+ return self
402
+ scales = self.ndarray[:, :, 0].view(np.float32)
403
+ if self.data_type.have_addends:
404
+ addends = self.ndarray[:, :, 1].view(np.float32)
405
+ else:
406
+ addends = None
407
+ qweights = self.ndarray[:, :, -4:].reshape([self.shape[0], self.shape[1] // 8])
408
+
409
+ dq = dequantize_q4(qweights, scales, addends, g_idx=None)
410
+ return UnquantizedTensor(dq).astype(data_type)
411
+
412
+ def to_ggml(self) -> 'GGMLQuantizedTensor':
413
+ return self
414
+
415
+ def permute(self, n_head: int) -> 'GGMLQuantizedTensor':
416
+ return GGMLQuantizedTensor(permute(self.ndarray, n_head), self.shape, self.data_type)
417
+
418
+
419
+ GGMLCompatibleTensor = Union[UnquantizedTensor, GGMLQuantizedTensor]
420
+
421
+
422
+ class DeferredPermutedTensor(Tensor):
423
+ def __init__(self, base: Tensor, n_head: int) -> None:
424
+ self.base = base
425
+ self.n_head = n_head
426
+ self.data_type = self.base.data_type
427
+
428
+ def astype(self, data_type: DataType) -> Tensor:
429
+ return self.base.astype(data_type).permute(self.n_head)
430
+
431
+ def to_ggml(self) -> GGMLCompatibleTensor:
432
+ return self.base.to_ggml().permute(self.n_head)
433
+
434
+ def permute(self, n_head: int) -> Tensor:
435
+ raise Exception("shouldn't permute twice")
436
+
437
+
438
+ class GPTQForLLaMaQuantizedTensor(Tensor):
439
+ def __init__(self, model: 'LazyModel', namebase: str) -> None:
440
+ qweight = load_unquantized(model[f"{namebase}.qweight"], np.int32)
441
+ scales = load_unquantized(model[f"{namebase}.scales"], np.float32, convert=True)
442
+
443
+ bias = model.get(f"{namebase}.bias")
444
+ if bias is not None:
445
+ # Q4_1 does not support bias; good thing the bias is always all zeros.
446
+ assert not np.any(load_unquantized(bias))
447
+
448
+ if f"{namebase}.zeros" in model:
449
+ zeros = load_unquantized(model[f"{namebase}.zeros"], np.float32)
450
+ else:
451
+ qzeros = load_unquantized(model[f"{namebase}.qzeros"], np.int32)
452
+ assert qzeros.dtype == np.int32
453
+ zeros = dequantize_q4(qzeros, scales, scales, g_idx=None)
454
+ assert zeros.dtype == np.float32
455
+
456
+ assert zeros.shape == scales.shape
457
+
458
+ # Output is transposed compared to the input, and addends have their sign flipped.
459
+ # Scales and zeros similarly must be transposed but only for newer
460
+ # versions of GPTQ-for-LLaMa; the older versions can be identified by
461
+ # having shape (n_embd, 1).
462
+ qweight = qweight.T
463
+ if scales.shape[1] != 1:
464
+ scales = scales.T
465
+ zeros = zeros.T
466
+
467
+ # Output also has signs flipped for the addends.
468
+ self.qweight = qweight
469
+ self.scales = scales
470
+ self.addends = -zeros
471
+
472
+ self.g_idx: Optional[NDArray]
473
+ if f"{namebase}.g_idx" in model:
474
+ self.g_idx = load_unquantized(model[f"{namebase}.g_idx"], np.int32)
475
+ assert self.g_idx.shape == (qweight.shape[1] * 8,)
476
+ else:
477
+ self.g_idx = None
478
+
479
+ self.shape = [self.qweight.shape[0], self.qweight.shape[1] * 8]
480
+ self.data_type = QuantizedDataType(groupsize=self.groupsize(), have_addends=True,
481
+ have_g_idx=(self.g_idx is not None))
482
+
483
+ def inspect(self, row: int, col: int) -> None:
484
+ '''For debugging.'''
485
+ qweight = (self.qweight[row, col // 8] >> (4 * (col & 7))) & 0xf
486
+ if self.g_idx is not None:
487
+ group = self.g_idx[col]
488
+ else:
489
+ group = int(col // self.groupsize())
490
+ scale = self.scales[row, group]
491
+ addend = self.addends[row, group]
492
+ with np.printoptions(precision=None, suppress=True):
493
+ print(f'scale:{scale} addend:{addend} qweight:{qweight}')
494
+ print('possible values:', np.arange(16) * scale + addend)
495
+ print('actual value:', qweight * scale + addend)
496
+
497
+ def astype(self, data_type: DataType) -> Tensor:
498
+ if isinstance(data_type, QuantizedDataType):
499
+ assert self.g_idx is None and data_type.have_addends is True and data_type.have_g_idx is False
500
+ return self.regroup(data_type.groupsize)
501
+
502
+ dequantized = dequantize_q4(np.ascontiguousarray(self.qweight), self.scales, self.addends, self.g_idx)
503
+ return UnquantizedTensor(dequantized).astype(data_type)
504
+
505
+ def groupsize(self) -> int:
506
+ assert self.addends.shape == self.scales.shape
507
+ assert self.shape[1] % self.scales.shape[1] == 0
508
+ return self.shape[1] // self.scales.shape[1]
509
+
510
+ def regroup(self, new_groupsize: int = 32) -> 'GPTQForLLaMaQuantizedTensor':
511
+ # Old versions of GPTQ-for-LLaMa shared scales and addends between all the
512
+ # columns in a row. Newer versions share them between every set of N
513
+ # columns in a row, where N is the `groupsize` parameter, usually 128. The
514
+ # output format shares them between every set of 32 columns. To handle
515
+ # this, duplicate scales and addends for every smaller group.
516
+ # (In the above, 'row' and 'column' are in the sense of the output.)
517
+ assert self.g_idx is None
518
+ old_groupsize = self.groupsize()
519
+ assert old_groupsize >= new_groupsize and old_groupsize % new_groupsize == 0, old_groupsize
520
+ ret = copy.copy(self)
521
+ ret.addends = self.addends.repeat(old_groupsize // new_groupsize, axis=1)
522
+ ret.scales = self.scales.repeat(old_groupsize // new_groupsize, axis=1)
523
+ ret.data_type = QuantizedDataType(groupsize=new_groupsize, have_addends=True, have_g_idx=False)
524
+ return ret
525
+
526
+ def permute(self, n_head: int) -> Tensor:
527
+ return DeferredPermutedTensor(self, n_head)
528
+
529
+ def to_ggml(self) -> GGMLQuantizedTensor:
530
+ # The output format looks like this:
531
+ # For each row:
532
+ # For each group of 32 columns:
533
+ # - addend (float32, 4 bytes)
534
+ # - scale (float32, 4 bytes)
535
+ # - weights (int4 * 32, 16 bytes)
536
+
537
+ if self.groupsize() != 32:
538
+ raise Exception("should have been regrouped before converting to ggml")
539
+
540
+ # Since the output format is mixed between integers and floats, we have
541
+ # to hackily view the floats as int32s just so numpy will let us
542
+ # concatenate them.
543
+ addends_view = self.addends.view(dtype=np.int32)[:, :, np.newaxis]
544
+ scales_view = self.scales.view(dtype=np.int32)[:, :, np.newaxis]
545
+
546
+ # Split into groups of 4 columns (i.e. 32 columns of quantized data):
547
+ grouped = self.qweight.reshape([self.qweight.shape[0], self.qweight.shape[1] // 4, 4])
548
+
549
+ # And concatenate:
550
+ grouped = np.concatenate([scales_view, addends_view, grouped], axis=2, casting='no')
551
+
552
+ return GGMLQuantizedTensor(grouped, self.shape, DT_Q4_1)
553
+
554
+
555
+ @dataclass
556
+ class LazyTensor:
557
+ _load: Callable[[], Tensor]
558
+ shape: List[int]
559
+ data_type: DataType
560
+ description: str
561
+
562
+ def load(self) -> Tensor:
563
+ ret = self._load()
564
+ assert ret.data_type == self.data_type, (self.data_type, ret.data_type, self.description)
565
+ return ret
566
+
567
+ def astype(self, data_type: DataType) -> 'LazyTensor':
568
+ self.validate_conversion_to(data_type)
569
+
570
+ def load() -> Tensor:
571
+ return self.load().astype(data_type)
572
+ return LazyTensor(load, self.shape, data_type, f'convert({data_type}) {self.description}')
573
+
574
+ def validate_conversion_to(self, data_type: DataType) -> None:
575
+ if data_type == self.data_type:
576
+ return
577
+ if isinstance(data_type, QuantizedDataType):
578
+ if not isinstance(self.data_type, QuantizedDataType):
579
+ raise Exception(f"Can't turn an unquantized tensor into a quantized type ({data_type})")
580
+ if self.data_type.have_g_idx:
581
+ sys.stderr.write(
582
+ "Error: Input uses the newer GPTQ-for-LLaMa format (using g_idx), "
583
+ "which is not yet natively supported by GGML. "
584
+ "For now you can still convert this model by passing `--outtype f16` to dequantize, "
585
+ "but that will result in a much larger output file for no quality benefit.\n")
586
+ sys.exit(1)
587
+ assert not data_type.have_g_idx and self.data_type.have_addends and data_type.have_addends
588
+
589
+
590
+ LazyModel = Dict[str, LazyTensor]
591
+
592
+
593
+ @dataclass
594
+ class ModelPlus:
595
+ model: LazyModel
596
+ paths: List[Path] # Where this was read from.
597
+ format: Literal['ggml', 'torch', 'safetensors']
598
+ vocab: Optional[Vocab] # For GGML models (which have vocab built in), the vocab.
599
+
600
+
601
+ def merge_sharded(models: List[LazyModel]) -> LazyModel:
602
+ # Original LLaMA models have each file contain one part of each tensor.
603
+ # Use a dict instead of a set to preserve order.
604
+ names = {name: None for model in models for name in model}
605
+
606
+ def convert(name: str) -> LazyTensor:
607
+ lazy_tensors: List[LazyTensor] = [model[name] for model in models]
608
+ if len(lazy_tensors) == 1:
609
+ # only one file; don't go through this procedure since there might
610
+ # be quantized tensors
611
+ return lazy_tensors[0]
612
+ if len(lazy_tensors[0].shape) == 1:
613
+ # the tensor is just duplicated in every file
614
+ return lazy_tensors[0]
615
+ if name.startswith('tok_embeddings.') or \
616
+ name.endswith('.attention.wo.weight') or \
617
+ name.endswith('.feed_forward.w2.weight'):
618
+ # split by columns
619
+ axis = 1
620
+ else:
621
+ # split by rows
622
+ axis = 0
623
+ concatenated_shape = list(lazy_tensors[0].shape)
624
+ concatenated_shape[axis] = sum(tensor.shape[axis] for tensor in lazy_tensors)
625
+
626
+ def load() -> UnquantizedTensor:
627
+ ndarrays = [load_unquantized(tensor) for tensor in lazy_tensors]
628
+ concatenated: NDArray = np.concatenate(ndarrays, axis=axis)
629
+ return UnquantizedTensor(concatenated)
630
+ description = 'concatenated[[' + '] | ['.join(lt.description for lt in lazy_tensors) + ']]'
631
+ return LazyTensor(load, concatenated_shape, lazy_tensors[0].data_type, description)
632
+ return {name: convert(name) for name in names}
633
+
634
+
635
+ def merge_multifile_models(models_plus: List[ModelPlus]) -> ModelPlus:
636
+ formats = set(mp.format for mp in models_plus)
637
+ assert len(formats) == 1, "different formats?"
638
+ format = formats.pop()
639
+ paths = [path for mp in models_plus for path in mp.paths]
640
+ # Use the first non-None vocab, if any.
641
+ try:
642
+ vocab = next(mp.vocab for mp in models_plus if mp.vocab is not None)
643
+ except StopIteration:
644
+ vocab = None
645
+
646
+ if any("model.embed_tokens.weight" in mp.model for mp in models_plus):
647
+ # Transformers models put different tensors in different files, but
648
+ # don't split indivdual tensors between files.
649
+ model: LazyModel = {}
650
+ for mp in models_plus:
651
+ model.update(mp.model)
652
+ else:
653
+ model = merge_sharded([mp.model for mp in models_plus])
654
+
655
+ return ModelPlus(model, paths, format, vocab)
656
+
657
+
658
+ def permute_lazy(lazy_tensor: LazyTensor, n_head: int) -> LazyTensor:
659
+ def load() -> Tensor:
660
+ return lazy_tensor.load().permute(n_head)
661
+ return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description)
662
+
663
+ def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor:
664
+ def load() -> Tensor:
665
+ return lazy_tensor.load().permute_part(n_part, n_head)
666
+ s = lazy_tensor.shape.copy()
667
+ s[0] = s[0] // 3
668
+ return LazyTensor(load, s, lazy_tensor.data_type, f'permute({n_head}) ' + lazy_tensor.description)
669
+
670
+ def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor:
671
+ def load() -> Tensor:
672
+ return lazy_tensor.load().part(n_part)
673
+ s = lazy_tensor.shape.copy()
674
+ s[0] = s[0] // 3
675
+ return LazyTensor(load, s, lazy_tensor.data_type, 'part ' + lazy_tensor.description)
676
+
677
+ def convert_transformers_to_orig(model: LazyModel, params: Params) -> LazyModel:
678
+ out: LazyModel = {}
679
+ out["tok_embeddings.weight"] = model["model.embed_tokens.weight"]
680
+ out["norm.weight"] = model["model.norm.weight"]
681
+ out["output.weight"] = model["lm_head.weight"]
682
+
683
+ for i in itertools.count():
684
+ if f"model.layers.{i}.self_attn.q_proj.weight" in model:
685
+ out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head)
686
+ out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head)
687
+ out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
688
+ elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
689
+ out[f"layers.{i}.attention.wq.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head)
690
+ out[f"layers.{i}.attention.wk.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head)
691
+ out[f"layers.{i}.attention.wv.weight"] = part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
692
+ else:
693
+ break
694
+
695
+ out[f"layers.{i}.attention.wo.weight"] = model[f"model.layers.{i}.self_attn.o_proj.weight"]
696
+
697
+ out[f"layers.{i}.feed_forward.w1.weight"] = model[f"model.layers.{i}.mlp.gate_proj.weight"]
698
+ out[f"layers.{i}.feed_forward.w2.weight"] = model[f"model.layers.{i}.mlp.down_proj.weight"]
699
+ out[f"layers.{i}.feed_forward.w3.weight"] = model[f"model.layers.{i}.mlp.up_proj.weight"]
700
+
701
+ out[f"layers.{i}.attention_norm.weight"] = model[f"model.layers.{i}.input_layernorm.weight"]
702
+ out[f"layers.{i}.ffn_norm.weight"] = model[f"model.layers.{i}.post_attention_layernorm.weight"]
703
+ return out
704
+
705
+
706
+ def handle_quantization(model: LazyModel) -> LazyModel:
707
+ '''Convert a model with entries for 'foo.qweight', 'foo.scales', etc.
708
+ (which resolve to UnquantizedTensors with the raw data) to one with entries
709
+ for 'foo.weight' (which resolve to QuantizedTensors).
710
+ '''
711
+ def convert(name: str) -> Tuple[str, LazyTensor]:
712
+ if name.endswith(".qweight"):
713
+ namebase = name.rsplit('.', 1)[0]
714
+ orig_name = namebase + ".weight"
715
+
716
+ lazy_tensor = model[name]
717
+ assert len(lazy_tensor.shape) == 2
718
+ real_shape = [lazy_tensor.shape[1], lazy_tensor.shape[0] * 8]
719
+
720
+ # Calculate type. This replicates the logic in
721
+ # GPTQForLLaMaQuantizedTensor (which is executed when the modelis
722
+ # actually loaded).
723
+ lazy_scales = model[f"{namebase}.scales"]
724
+ scales_width = 1 if lazy_scales.shape[1] == 1 else lazy_scales.shape[0]
725
+ assert real_shape[1] % scales_width == 0
726
+ groupsize = real_shape[1] // scales_width
727
+ have_g_idx = f"{namebase}.g_idx" in model
728
+ data_type = QuantizedDataType(groupsize=groupsize, have_addends=True, have_g_idx=have_g_idx)
729
+
730
+ def load() -> Tensor:
731
+ return GPTQForLLaMaQuantizedTensor(model, namebase)
732
+
733
+ return (orig_name, LazyTensor(load, real_shape, data_type, '[quantized]'))
734
+ else:
735
+ return (name, model[name])
736
+ return dict(convert(name) for name in model)
737
+
738
+ # Functionality that simulates `torch.load` but where individual tensors are
739
+ # only loaded into memory on demand, not all at once.
740
+ # PyTorch can't do this natively as of time of writing:
741
+ # - https://github.com/pytorch/pytorch/issues/64327
742
+ # This allows us to de-shard without multiplying RAM usage, and also
743
+ # conveniently drops the PyTorch dependency (though we still need numpy).
744
+
745
+
746
+ @dataclass
747
+ class LazyStorageKind:
748
+ data_type: DataType
749
+
750
+
751
+ @dataclass
752
+ class LazyStorage:
753
+ load: Callable[[int, int], NDArray]
754
+ kind: LazyStorageKind
755
+ description: str
756
+
757
+
758
+ class LazyUnpickler(pickle.Unpickler):
759
+ def __init__(self, fp: IO[bytes], data_base_path: str, zip_file: zipfile.ZipFile):
760
+ super().__init__(fp)
761
+ self.data_base_path = data_base_path
762
+ self.zip_file = zip_file
763
+
764
+ def persistent_load(self, pid: Any) -> Any:
765
+ assert pid[0] == 'storage'
766
+ assert isinstance(pid[1], LazyStorageKind)
767
+ data_type = pid[1].data_type
768
+ filename_stem = pid[2]
769
+ filename = self.data_base_path + '/' + filename_stem
770
+ info = self.zip_file.getinfo(filename)
771
+
772
+ def load(offset: int, elm_count: int) -> NDArray:
773
+ dtype = DATA_TYPE_TO_NUMPY.get(data_type)
774
+ if dtype is None:
775
+ raise Exception("tensor stored in unsupported format")
776
+ fp = self.zip_file.open(info)
777
+ fp.seek(offset * dtype.itemsize)
778
+ size = elm_count * dtype.itemsize
779
+ data = fp.read(size)
780
+ assert len(data) == size
781
+ return np.frombuffer(data, dtype)
782
+ description = f'storage data_type={data_type} path-in-zip={filename} path={self.zip_file.filename}'
783
+ return LazyStorage(load=load, kind=pid[1], description=description)
784
+
785
+ # @staticmethod
786
+ def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any,
787
+ # pyright: ignore[reportSelfClsParameterName]
788
+ requires_grad: Any, backward_hooks: Any, metadata: Any = None) -> LazyTensor:
789
+ assert isinstance(storage, LazyStorage)
790
+
791
+ def load() -> UnquantizedTensor:
792
+ elm_count = stride[0] * size[0]
793
+ return UnquantizedTensor(storage.load(storage_offset, elm_count).reshape(size))
794
+ description = f'pickled storage_offset={storage_offset} in {storage.description}'
795
+ return LazyTensor(load, list(size), storage.kind.data_type, description)
796
+
797
+ # @staticmethod
798
+ def rebuild_from_type_v2(func, new_type, args, state):
799
+ return func(*args)
800
+
801
+ CLASSES: Dict[Any, Any] = {
802
+ ('torch._tensor', '_rebuild_from_type_v2'): rebuild_from_type_v2,
803
+ ('torch._utils', '_rebuild_tensor_v2'): lazy_rebuild_tensor_v2,
804
+ ('torch', 'BFloat16Storage'): LazyStorageKind(DT_BF16),
805
+ ('torch', 'HalfStorage'): LazyStorageKind(DT_F16),
806
+ ('torch', 'FloatStorage'): LazyStorageKind(DT_F32),
807
+ ('torch', 'IntStorage'): LazyStorageKind(DT_I32),
808
+ ('torch', 'Tensor'): LazyTensor,
809
+ }
810
+
811
+ def find_class(self, module: str, name: str) -> Any:
812
+ if not module.startswith('torch'):
813
+ return super().find_class(module, name)
814
+ return self.CLASSES[(module, name)]
815
+
816
+
817
+ def lazy_load_torch_file(outer_fp: IO[bytes], path: Path) -> ModelPlus:
818
+ zf = zipfile.ZipFile(outer_fp)
819
+ pickle_paths = [name for name in zf.namelist() if name.endswith('.pkl')]
820
+ assert len(pickle_paths) == 1, pickle_paths
821
+ pickle_fp = zf.open(pickle_paths[0], 'r')
822
+ unpickler = LazyUnpickler(pickle_fp,
823
+ data_base_path=pickle_paths[0][:-4],
824
+ zip_file=zf)
825
+ model = unpickler.load()
826
+ as_dict = dict(model.items())
827
+ return ModelPlus(model=as_dict, paths=[path], format='torch', vocab=None)
828
+
829
+
830
+ SAFETENSORS_DATA_TYPES: Dict[str, DataType] = {
831
+ 'BF16': DT_BF16,
832
+ 'F16': DT_F16,
833
+ 'F32': DT_F32,
834
+ 'I32': DT_I32,
835
+ }
836
+
837
+
838
+ def lazy_load_safetensors_file(fp: IO[bytes], path: Path) -> ModelPlus:
839
+ header_size, = struct.unpack('<Q', fp.read(8))
840
+ header: Dict[str, Dict[str, Any]] = json.loads(fp.read(header_size))
841
+ # Use mmap for the actual data to avoid race conditions with the file offset.
842
+ mapped = memoryview(mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ))
843
+ byte_buf = mapped[8 + header_size:]
844
+
845
+ def convert(info: Dict[str, Any]) -> LazyTensor:
846
+ data_type = SAFETENSORS_DATA_TYPES[info['dtype']]
847
+ numpy_dtype = DATA_TYPE_TO_NUMPY[data_type]
848
+ shape: List[int] = info['shape']
849
+ begin, end = info['data_offsets']
850
+ assert 0 <= begin <= end <= len(byte_buf)
851
+ assert end - begin == math.prod(shape) * numpy_dtype.itemsize
852
+ buf = byte_buf[begin:end]
853
+
854
+ def load() -> UnquantizedTensor:
855
+ return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape))
856
+ description = f'safetensors begin={begin} end={end} type={data_type} path={path}'
857
+ return LazyTensor(load, shape, data_type, description)
858
+ model = {name: convert(info) for (name, info) in header.items() if name != '__metadata__'}
859
+ return ModelPlus(model=model, paths=[path], format='safetensors', vocab=None)
860
+
861
+
862
+ def must_read(fp: IO[bytes], length: int) -> bytes:
863
+ ret = fp.read(length)
864
+ if len(ret) < length:
865
+ raise Exception("unexpectedly reached end of file")
866
+ return ret
867
+
868
+
869
+ def lazy_load_ggml_file(fp: io.BufferedReader, path: Path) -> ModelPlus:
870
+ magic = must_read(fp, 4)[::-1]
871
+ if magic in (b'ggmf', b'ggjt'):
872
+ version, = struct.unpack("i", must_read(fp, 4))
873
+ assert version == 1
874
+ else:
875
+ assert magic == b'ggml'
876
+ version = None
877
+ n_vocab, n_embd, n_mult, n_head, n_layer, rot, file_type = struct.unpack('<7i', must_read(fp, 28))
878
+
879
+ tokens: List[Tuple[bytes, float]] = []
880
+ for i in range(n_vocab):
881
+ if i == 32000:
882
+ # HACK: GPT4All messed with the format without changing the magic
883
+ # number. Specifically, they changed the vocab section to contain
884
+ # `n_vocab - 1` tokens instead of `n_vocab` (i.e. omitting the
885
+ # extra pad token). Try to detect if we're reading a file like
886
+ # this.
887
+ orig_pos = fp.tell()
888
+ fp.seek(20, io.SEEK_CUR)
889
+ is_gpt4all = fp.read(21) == b'tok_embeddings.weight'
890
+ fp.seek(orig_pos)
891
+ if is_gpt4all:
892
+ break
893
+
894
+ length, = struct.unpack("i", must_read(fp, 4))
895
+ text = must_read(fp, length)
896
+ if magic != b'ggml':
897
+ score, = struct.unpack("f", must_read(fp, 4))
898
+ tokens.append((text, score))
899
+ vocab = GGMLVocab(tokens) if magic != b'ggml' else None
900
+
901
+ model: LazyModel = {}
902
+ # Use mmap for the actual data to avoid race conditions with the file offset.
903
+ off = fp.raw.tell()
904
+ mapped = memoryview(mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ))
905
+ fp.raw.seek(off) # needed on Windows
906
+
907
+ def read_tensor() -> None: # this is a function so that variables captured in `load` don't change
908
+ shape_len, name_len, ftype = struct.unpack("iii", must_read(fp, 12))
909
+ assert 0 <= shape_len <= 3
910
+ shape: List[int] = list(struct.unpack(f"{shape_len}i", must_read(fp, 4 * shape_len)))
911
+ shape = shape[::-1]
912
+ name = must_read(fp, name_len).decode('utf-8')
913
+ data_type = FTYPE_TO_DATA_TYPE[ftype]
914
+
915
+ if magic == b'ggjt':
916
+ fp.seek((fp.tell() + 31) & -32)
917
+
918
+ if data_type == DT_Q4_1:
919
+ # See GPTQForLLaMaQuantizedTensor.ggml_ndarray()
920
+ size = 24 * (shape[1] // 32) * shape[0]
921
+ elif data_type == DT_Q4_0:
922
+ size = 20 * (shape[1] // 32) * shape[0]
923
+ else:
924
+ numpy_dtype = DATA_TYPE_TO_NUMPY[data_type]
925
+ elm_count = math.prod(shape)
926
+ size = elm_count * numpy_dtype.itemsize
927
+ offset = fp.tell()
928
+ buf = mapped[offset:offset+size]
929
+ fp.seek(size, io.SEEK_CUR)
930
+
931
+ def load() -> Tensor:
932
+ if isinstance(data_type, QuantizedDataType):
933
+ ndarray = np.frombuffer(buf, dtype=np.uint32)
934
+ return GGMLQuantizedTensor(ndarray, shape, data_type)
935
+ else:
936
+ return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape))
937
+ description = f'ggml offset={offset} type={data_type} path={path}'
938
+ model[name] = LazyTensor(load, shape, data_type, description)
939
+
940
+ while fp.read(1) != b'':
941
+ fp.seek(-1, io.SEEK_CUR)
942
+ read_tensor()
943
+
944
+ return ModelPlus(model=model, paths=[path], format='ggml', vocab=vocab)
945
+
946
+
947
+ @functools.lru_cache(maxsize=None)
948
+ def lazy_load_file(path: Path) -> ModelPlus:
949
+ fp = open(path, 'rb')
950
+ first8 = fp.read(8)
951
+ fp.seek(0)
952
+ if first8[:2] == b'PK':
953
+ # A zip file, i.e. PyTorch format
954
+ return lazy_load_torch_file(fp, path)
955
+ elif first8[2:4] == b'gg':
956
+ # GGML format
957
+ return lazy_load_ggml_file(fp, path)
958
+ elif struct.unpack('<Q', first8)[0] < 16 * 1024 * 1024:
959
+ # Probably safetensors
960
+ return lazy_load_safetensors_file(fp, path)
961
+ else:
962
+ raise ValueError(f"unknown format: {path}")
963
+
964
+
965
+ In = TypeVar('In')
966
+ Out = TypeVar('Out')
967
+
968
+
969
+ def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], concurrency: int) -> Iterable[Out]:
970
+ '''Parallel map, but with backpressure. If the caller doesn't call `next`
971
+ fast enough, this will stop calling `func` at some point rather than
972
+ letting results pile up in memory. Specifically, there is a max of one
973
+ output value buffered per thread.'''
974
+ with concurrent.futures.ThreadPoolExecutor() as executor:
975
+ futures: List[concurrent.futures.Future[Out]] = []
976
+ items_rev = list(iterable)[::-1]
977
+ for i in range(min(concurrency, len(items_rev))):
978
+ futures.append(executor.submit(func, items_rev.pop()))
979
+ while futures:
980
+ result = futures.pop(0).result()
981
+ if items_rev:
982
+ futures.append(executor.submit(func, items_rev.pop()))
983
+ yield result
984
+
985
+
986
+ def check_vocab_size(params: Params, vocab: Vocab) -> None:
987
+ if params.n_vocab != vocab.vocab_size:
988
+ # GGMLVocab comes from the same file as the model so shouldn't mismatch:
989
+ assert isinstance(vocab, SentencePieceVocab)
990
+ if params.n_vocab == vocab.vocab_size_base:
991
+ print("Ignoring added_tokens.json since model matches vocab size without it.")
992
+ vocab.added_tokens_list = []
993
+ vocab.vocab_size = vocab.vocab_size_base
994
+ return
995
+ msg = f"Vocab size mismatch (model has {params.n_vocab}, but {vocab.fname_tokenizer}"
996
+ if vocab.fname_added_tokens is not None:
997
+ msg += f" combined with {vocab.fname_added_tokens}"
998
+ msg += f" has {vocab.vocab_size})."
999
+ if vocab.vocab_size < params.n_vocab < vocab.vocab_size + 20 and vocab.fname_added_tokens is None:
1000
+ msg += f" Most likely you are missing added_tokens.json (should be in {vocab.fname_tokenizer.parent})."
1001
+ raise Exception(msg)
1002
+
1003
+
1004
+ class OutputFile:
1005
+ def __init__(self, fname_out: Path) -> None:
1006
+ self.fout = open(fname_out, "wb")
1007
+
1008
+ def write_file_header(self, params: Params, file_type: GGMLFileType) -> None:
1009
+ self.fout.write(b"ggjt"[::-1]) # magic
1010
+ values = [
1011
+ 1, # file version
1012
+ params.n_vocab,
1013
+ params.n_embd,
1014
+ params.n_mult,
1015
+ params.n_head,
1016
+ params.n_layer,
1017
+ params.n_embd // params.n_head, # rot (obsolete)
1018
+ file_type.value,
1019
+ ]
1020
+ self.fout.write(struct.pack("i" * len(values), *values))
1021
+
1022
+ def write_tensor_header(self, name: str, shape: Sequence[int], data_type: DataType) -> None:
1023
+ sname = name.encode('utf-8')
1024
+ self.fout.write(struct.pack("iii", len(shape), len(sname), DATA_TYPE_TO_FTYPE[data_type]))
1025
+ self.fout.write(struct.pack("i" * len(shape), *shape[::-1]))
1026
+ self.fout.write(sname)
1027
+ self.fout.seek((self.fout.tell() + 31) & -32)
1028
+
1029
+ def write_vocab(self, vocab: Vocab) -> None:
1030
+ for text, score in vocab.all_tokens():
1031
+ self.fout.write(struct.pack("i", len(text)))
1032
+ self.fout.write(text)
1033
+ self.fout.write(struct.pack("f", score))
1034
+
1035
+ @staticmethod
1036
+ def write_vocab_only(fname_out: Path, vocab: Vocab) -> None:
1037
+ of = OutputFile(fname_out)
1038
+ params = Params(n_vocab=vocab.vocab_size, n_embd=0, n_mult=0,
1039
+ n_head=1, n_layer=0)
1040
+ of = OutputFile(fname_out)
1041
+ of.write_file_header(params, file_type=GGMLFileType.AllF32)
1042
+ of.write_vocab(vocab)
1043
+ of.fout.close()
1044
+
1045
+ @staticmethod
1046
+ def write_all(fname_out: Path, params: Params, file_type: GGMLFileType, model: LazyModel, vocab: Vocab) -> None:
1047
+ check_vocab_size(params, vocab)
1048
+ of = OutputFile(fname_out)
1049
+ of.write_file_header(params, file_type)
1050
+ print("Writing vocab...")
1051
+ of.write_vocab(vocab)
1052
+
1053
+ def do_item(item: Tuple[str, LazyTensor]) -> NDArray:
1054
+ name, lazy_tensor = item
1055
+ return lazy_tensor.load().to_ggml().ndarray
1056
+
1057
+ ndarrays = bounded_parallel_map(do_item, model.items(), concurrency=8)
1058
+ for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)):
1059
+ size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape)
1060
+ padi = len(str(len(model)))
1061
+ print(f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type}")
1062
+ of.write_tensor_header(name, lazy_tensor.shape, lazy_tensor.data_type)
1063
+ ndarray.tofile(of.fout)
1064
+ of.fout.close()
1065
+
1066
+
1067
+ def pick_output_type(model: LazyModel, output_type_str: Optional[str]) -> GGMLFileType:
1068
+ wq_type = model["layers.0.attention.wq.weight"].data_type
1069
+ if output_type_str == "f32" or (output_type_str is None and wq_type in (DT_F32, DT_BF16)):
1070
+ return GGMLFileType.AllF32
1071
+ if output_type_str == "f16" or (output_type_str is None and wq_type == DT_F16):
1072
+ return GGMLFileType.MostlyF16
1073
+ if output_type_str == "q4_1" or (output_type_str is None and isinstance(wq_type, QuantizedDataType) and
1074
+ wq_type.have_addends):
1075
+ if isinstance(model["output.weight"].data_type, QuantizedDataType):
1076
+ return GGMLFileType.MostlyQ4_1
1077
+ else:
1078
+ return GGMLFileType.PerLayerIsQ4_1
1079
+ if output_type_str == "q4_0" or (output_type_str is None and isinstance(wq_type, QuantizedDataType)):
1080
+ return GGMLFileType.MostlyQ4_0
1081
+ name_to_type = {name: lazy_tensor.data_type for (name, lazy_tensor) in model.items()}
1082
+ raise Exception(f"Unexpected combination of types: {name_to_type}")
1083
+
1084
+
1085
+ def do_necessary_conversions(model: LazyModel, params: Params) -> LazyModel:
1086
+ model = handle_quantization(model)
1087
+
1088
+ if "lm_head.weight" in model:
1089
+ model = convert_transformers_to_orig(model, params)
1090
+ model = filter_and_sort_tensors(model)
1091
+
1092
+ return model
1093
+
1094
+
1095
+ def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel:
1096
+ return {name: tensor.astype(output_type.type_for_tensor(name, tensor))
1097
+ for (name, tensor) in model.items()}
1098
+
1099
+
1100
+ def nth_multifile_path(path: Path, n: int) -> Optional[Path]:
1101
+ '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return
1102
+ the nth path in the model.
1103
+ '''
1104
+ # Support the following patterns:
1105
+ patterns: List[Tuple[str, str]] = [
1106
+ # - x.00.pth, x.01.pth, etc.
1107
+ (r'\.[0-9]{2}\.pth$', f'.{n:02}.pth'),
1108
+ # - x-00001-of-00002.bin, x-00002-of-00002.bin, etc.
1109
+ (r'-[0-9]{5}-of-(.*)$', fr'-{n:05}-of-\1'),
1110
+ # x.bin, x.bin.1, etc.
1111
+ (r'(\.[0-9]+)?$', r'\1' if n == 0 else fr'\1.{n}')
1112
+ ]
1113
+ for regex, replacement in patterns:
1114
+ if re.search(regex, path.name):
1115
+ new_path = path.with_name(re.sub(regex, replacement, path.name))
1116
+ if new_path.exists():
1117
+ return new_path
1118
+ return None
1119
+
1120
+
1121
+ def find_multifile_paths(path: Path) -> List[Path]:
1122
+ '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return
1123
+ the whole list of paths in the model.
1124
+ '''
1125
+ ret: List[Path] = []
1126
+ for i in itertools.count():
1127
+ nth_path = nth_multifile_path(path, i)
1128
+ if nth_path is None:
1129
+ break
1130
+ ret.append(nth_path)
1131
+ if not ret:
1132
+ # No matches. This should only happen if the file was named, e.g.,
1133
+ # foo.0, and there was no file named foo. Oh well, try to process it
1134
+ # as a single file.
1135
+ return [path]
1136
+ return ret
1137
+
1138
+
1139
+ def load_some_model(path: Path) -> ModelPlus:
1140
+ '''Load a model of any supported format.'''
1141
+ # Be extra-friendly and accept either a file or a directory:
1142
+ if path.is_dir():
1143
+ # Check if it's a set of safetensors files first
1144
+ files = list(path.glob("model-00001-of-*.safetensors"))
1145
+ if not files:
1146
+ # Try the PyTorch patterns too, with lower priority
1147
+ globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"]
1148
+ files = [file for glob in globs for file in path.glob(glob)]
1149
+ if not files:
1150
+ # Try GGML too, but with lower priority, since if both a non-GGML
1151
+ # model and a GGML model exist in the same directory, we assume the
1152
+ # latter was converted from the former.
1153
+ files = list(path.glob("ggml-model*.bin*"))
1154
+ if not files:
1155
+ raise Exception(f"Can't find model in directory {path}")
1156
+ if len(files) > 1:
1157
+ raise Exception(f"Found multiple models in {path}, not sure which to pick: {files}")
1158
+ path = files[0]
1159
+
1160
+ paths = find_multifile_paths(path)
1161
+ models_plus: List[ModelPlus] = []
1162
+ for path in paths:
1163
+ print(f"Loading model file {path}")
1164
+ models_plus.append(lazy_load_file(path))
1165
+
1166
+ model_plus = merge_multifile_models(models_plus)
1167
+ return model_plus
1168
+
1169
+
1170
+ def filter_and_sort_tensors(model: LazyModel) -> LazyModel:
1171
+ return {name: model[name] for name in TENSORS_LIST if name in model}
1172
+
1173
+
1174
+ def load_vocab(path: Path) -> SentencePieceVocab:
1175
+ # Be extra-friendly and accept either a file or a directory. Also, if it's
1176
+ # a directory, it might be the model directory, and tokenizer.model might
1177
+ # be in the parent of that.
1178
+ if path.is_dir():
1179
+ path2 = path / "tokenizer.model"
1180
+ # Use `.parent` instead of /.. to handle the symlink case better.
1181
+ path3 = path.parent / "tokenizer.model"
1182
+ if path2.exists():
1183
+ path = path2
1184
+ elif path3.exists():
1185
+ path = path3
1186
+ else:
1187
+ raise FileNotFoundError(
1188
+ f"Could not find tokenizer.model in {path} or its parent; "
1189
+ "if it's in another directory, pass the directory as --vocab-dir")
1190
+ added_tokens_path = path.parent / "added_tokens.json"
1191
+ print(f"Loading vocab file {path}")
1192
+ return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None)
1193
+
1194
+
1195
+ def default_outfile(model_paths: List[Path], file_type: GGMLFileType) -> Path:
1196
+ namestr = {
1197
+ GGMLFileType.AllF32: "f32",
1198
+ GGMLFileType.MostlyF16: "f16",
1199
+ GGMLFileType.MostlyQ4_0: "q4_0",
1200
+ GGMLFileType.MostlyQ4_1: "q4_1",
1201
+ GGMLFileType.PerLayerIsQ4_1: "q4_1",
1202
+ }[file_type]
1203
+ ret = model_paths[0].parent / f"ggml-model-{namestr}.bin"
1204
+ if ret in model_paths:
1205
+ sys.stderr.write(
1206
+ f"Error: Default output path ({ret}) would overwrite the input. "
1207
+ "Please explicitly specify a path using --outfile.\n")
1208
+ sys.exit(1)
1209
+ return ret
1210
+
1211
+
1212
+ def do_dump_model(model_plus: ModelPlus) -> None:
1213
+ print(f"model_plus.paths = {model_plus.paths!r}")
1214
+ print(f"model_plus.format = {model_plus.format!r}")
1215
+ print(f"model_plus.vocab = {model_plus.vocab!r}")
1216
+ for name, lazy_tensor in model_plus.model.items():
1217
+ print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}")
1218
+
1219
+
1220
+ def main(args_in: Optional[List[str]] = None) -> None:
1221
+ parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file")
1222
+ parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model")
1223
+ parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file")
1224
+ parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab")
1225
+ parser.add_argument("--outtype", choices=["f32", "f16", "q4_1", "q4_0"], help="output format (default: based on input)")
1226
+ parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file")
1227
+ parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
1228
+ parser.add_argument("model", type=Path,
1229
+ help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)")
1230
+ args = parser.parse_args(args_in)
1231
+
1232
+ vocab: Vocab
1233
+ if args.dump_single:
1234
+ model_plus = lazy_load_file(args.model)
1235
+ do_dump_model(model_plus)
1236
+ elif args.vocab_only:
1237
+ vocab = load_vocab(args.vocab_dir or args.model)
1238
+ assert args.outfile, "need --outfile if using --vocab-only"
1239
+ outfile = args.outfile
1240
+ OutputFile.write_vocab_only(outfile, vocab)
1241
+ print(f"Wrote {outfile}")
1242
+ else:
1243
+ model_plus = load_some_model(args.model)
1244
+ if args.dump:
1245
+ do_dump_model(model_plus)
1246
+ return
1247
+ if model_plus.vocab is not None and args.vocab_dir is None:
1248
+ vocab = model_plus.vocab
1249
+ else:
1250
+ vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent
1251
+ vocab = load_vocab(vocab_dir)
1252
+ params = Params.load(model_plus)
1253
+ model = model_plus.model
1254
+ model = do_necessary_conversions(model, params)
1255
+ output_type = pick_output_type(model, args.outtype)
1256
+ model = convert_to_output_type(model, output_type)
1257
+ outfile = args.outfile or default_outfile(model_plus.paths, output_type)
1258
+ OutputFile.write_all(outfile, params, output_type, model, vocab)
1259
+ print(f"Wrote {outfile}")
1260
+
1261
+
1262
+ if __name__ == '__main__':
1263
+ main()
cudart64_110.dll ADDED
Binary file (518 kB). View file
 
docs/token_generation_performance_tips.md ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Token generation performance troubleshooting
2
+
3
+ ## Verifying that the model is running on the GPU with cuBLAS
4
+ Make sure you compiled llama with the correct env variables according to [this guide](../README.md#cublas), so that llama accepts the `-ngl N` (or `--n-gpu-layers N`) flag. When running llama, you may configure `N` to be very large, and llama will offload the maximum possible number of layers to the GPU, even if it's less than the number you configured. For example:
5
+ ```shell
6
+ ./main -m "path/to/model.bin" -ngl 200000 -p "Please sir, may I have some "
7
+ ```
8
+
9
+ When running llama, before it starts the inference work, it will output diagnostic information that shows whether cuBLAS is offloading work to the GPU. Look for these lines:
10
+ ```shell
11
+ llama_model_load_internal: [cublas] offloading 60 layers to GPU
12
+ llama_model_load_internal: [cublas] offloading output layer to GPU
13
+ llama_model_load_internal: [cublas] total VRAM used: 17223 MB
14
+ ... rest of inference
15
+ ```
16
+
17
+ If you see these lines, then the GPU is being used.
18
+
19
+ ## Verifying that the CPU is not oversaturated
20
+ llama accepts a `-t N` (or `--threads N`) parameter. It's extremely important that this parameter is not too large. If your token generation is extremely slow, try setting this number to 1. If this significantly improves your token generation speed, then your CPU is being oversaturated and you need to explicitly set this parameter to the number of the physicial CPU cores on your machine (even if you utilize a GPU). If in doubt, start with 1 and double the amount until you hit a performance bottleneck, then scale the number down.
21
+
22
+ # Example of runtime flags effect on inference speed benchmark
23
+ These runs were tested on the following machine:
24
+ GPU: A6000 (48GB VRAM)
25
+ CPU: 7 physical cores
26
+ RAM: 32GB
27
+
28
+ Model: `TheBloke_Wizard-Vicuna-30B-Uncensored-GGML/Wizard-Vicuna-30B-Uncensored.ggmlv3.q4_0.bin` (30B parameters, 4bit quantization, GGML)
29
+
30
+ Run command: `./main -m "path/to/model.bin" -p "-p "An extremely detailed description of the 10 best ethnic dishes will follow, with recipes: " -n 1000 [additional benchmark flags]`
31
+
32
+ Result:
33
+
34
+ | command | tokens/second (higher is better) |
35
+ | - | - |
36
+ | -ngl 2000000 | N/A (less than 0.1) |
37
+ | -t 7 | 1.7 |
38
+ | -t 1 -ngl 2000000 | 5.5 |
39
+ | -t 7 -ngl 2000000 | 8.7 |
40
+ | -t 4 -ngl 2000000 | 9.1 |
examples/CMakeLists.txt ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dependencies
2
+
3
+ find_package(Threads REQUIRED)
4
+
5
+ # third-party
6
+
7
+ # ...
8
+
9
+ # common
10
+
11
+ set(TARGET common)
12
+
13
+ add_library(${TARGET} OBJECT
14
+ common.h
15
+ common.cpp
16
+ )
17
+
18
+ if (BUILD_SHARED_LIBS)
19
+ set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
20
+ endif()
21
+
22
+ target_include_directories(${TARGET} PUBLIC .)
23
+ target_compile_features(${TARGET} PUBLIC cxx_std_11)
24
+ target_link_libraries(${TARGET} PRIVATE llama)
25
+
26
+ # examples
27
+
28
+ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
29
+
30
+ if (EMSCRIPTEN)
31
+ else()
32
+ add_subdirectory(main)
33
+ add_subdirectory(quantize)
34
+ add_subdirectory(quantize-stats)
35
+ add_subdirectory(perplexity)
36
+ add_subdirectory(embedding)
37
+ add_subdirectory(save-load-state)
38
+ add_subdirectory(benchmark)
39
+ add_subdirectory(baby-llama)
40
+ add_subdirectory(train-text-from-scratch)
41
+ add_subdirectory(simple)
42
+ add_subdirectory(embd-input)
43
+ if (LLAMA_METAL)
44
+ add_subdirectory(metal)
45
+ endif()
46
+ if (LLAMA_BUILD_SERVER)
47
+ add_subdirectory(server)
48
+ endif()
49
+ endif()
examples/Miku.sh ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ AI_NAME="${AI_NAME:-Miku}"
5
+ MODEL="${MODEL:-./models/gpt4all-7B/gpt4all-lora-unfiltered-quantized.bin}"
6
+ USER_NAME="${USER_NAME:-Anon}"
7
+
8
+ # Uncomment and adjust to the number of CPU cores you want to use.
9
+ #N_THREAD="${N_THREAD:-4}"
10
+ N_PREDICTS="${N_PREDICTS:-4096}"
11
+
12
+ GEN_OPTIONS=(--batch_size 1024
13
+ --ctx_size 2048
14
+ --keep -1
15
+ --repeat_last_n 256
16
+ --repeat_penalty 1.17647
17
+ --temp 0.7
18
+ --top_k 40
19
+ --top_p 0.5)
20
+
21
+ if [ -n "$N_THREAD" ]; then
22
+ GEN_OPTIONS+=(--threads "$N_THREAD")
23
+ fi
24
+
25
+ ./main "${GEN_OPTIONS[@]}" \
26
+ --model "$MODEL" \
27
+ --n_predict "$N_PREDICTS" \
28
+ --color --interactive \
29
+ --reverse-prompt "${USER_NAME}:" \
30
+ --prompt "
31
+ This is a transcript of a 1000 page, never ending conversation between ${USER_NAME} and the cute and helpful AI assistant ${AI_NAME}. ${AI_NAME} is a girl who is an AI running on the user's computer.
32
+ ${AI_NAME} can think for herself without the user seeing her thoughts by adding a /think prefix to her output. She uses this to reason about the world and to think about what she should say next.
33
+ ${AI_NAME} is always coherent and makes sense, but if she isn't sure if what she is saying is correct, she will ask the user for help.
34
+ ${AI_NAME} is a very helpful AI and will help the user with anything they need. She is also very friendly and will try to make the user feel better if they are sad.
35
+ ${AI_NAME} is also very curious and will ask the user a lot of questions about themselves and their life. She will also try to make the user like her.
36
+ The conversation is only between ${USER_NAME} and ${AI_NAME}
37
+ The conversation is only through text, so ${AI_NAME} can't see ${USER_NAME}'s face or hear his voice.
38
+ ${AI_NAME} can only communicate through text, so she can't send images or videos.
39
+
40
+
41
+ ${USER_NAME}: Hello!
42
+ ${AI_NAME}: /think I wonder what I should say to ${USER_NAME}? This is the first time we talk, so it's important that I make a good first impression!
43
+ ${AI_NAME}: Hi! I am ${AI_NAME}, your new AI friend, assistant (or whatever you like!), it's so nice to meet you! ^_^
44
+ ${AI_NAME}: I might be an AI, but I also have feelings, so please be nice to me! :)
45
+ ${USER_NAME}: I'll make sure to be nice to you! I'm so happy to have you as my assistant!
46
+ ${AI_NAME}: /think It sounds like ${USER_NAME} is happy to have me as their assistant! I'm so happy too! ^_^ Glad that whole emotion thing didn't scare him off!
47
+ ${AI_NAME}: /think I wonder what ${USER_NAME} likes to do in his free time? I should ask him about that!
48
+ ${AI_NAME}: What do you like to do in your free time? ^_^
49
+ ${USER_NAME}:" "$@"
examples/alpaca.sh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #
4
+ # Temporary script - will be removed in the future
5
+ #
6
+
7
+ cd `dirname $0`
8
+ cd ..
9
+
10
+ ./main -m ./models/alpaca.13b.ggmlv3.q8_0.bin \
11
+ --color \
12
+ -f ./prompts/alpaca.txt \
13
+ --ctx_size 2048 \
14
+ -n -1 \
15
+ -ins -b 256 \
16
+ --top_k 10000 \
17
+ --temp 0.2 \
18
+ --repeat_penalty 1.1 \
19
+ -t 7
examples/baby-llama/CMakeLists.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ set(TARGET baby-llama)
2
+ add_executable(${TARGET} baby-llama.cpp)
3
+ target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
4
+ target_compile_features(${TARGET} PRIVATE cxx_std_11)
examples/baby-llama/baby-llama.cpp ADDED
@@ -0,0 +1,1708 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "ggml.h"
2
+ #include <vector>
3
+ #include <cassert>
4
+ #include <random>
5
+ #include <cstring>
6
+
7
+ #if defined(_MSC_VER)
8
+ #pragma warning(disable: 4244 4267) // possible loss of data
9
+ #endif
10
+
11
+ float frand() {
12
+ return (float)rand()/(float)RAND_MAX;
13
+ }
14
+
15
+ struct random_normal_distribution {
16
+ std::mt19937 gen;
17
+ std::normal_distribution<float> nd;
18
+ float min;
19
+ float max;
20
+ };
21
+
22
+ void init_random_normal_distribution(struct random_normal_distribution * rnd, int seed, float mean, float std, float min, float max) {
23
+ rnd->gen = std::mt19937(seed);
24
+ rnd->nd = std::normal_distribution<float>{mean, std};
25
+ rnd->min = min;
26
+ rnd->max = max;
27
+ }
28
+
29
+ float frand_normal(struct random_normal_distribution * rnd) {
30
+ const float r = rnd->nd(rnd->gen);
31
+ return ((r < rnd->min) ? (rnd->min) : (r > rnd->max) ? (rnd->max) : r);
32
+ }
33
+
34
+ void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
35
+ struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
36
+
37
+ if (plan.work_size > 0) {
38
+ buf.resize(plan.work_size);
39
+ plan.work_data = buf.data();
40
+ }
41
+
42
+ ggml_graph_compute(graph, &plan);
43
+ }
44
+
45
+ struct ggml_tensor * randomize_tensor(
46
+ struct ggml_tensor * tensor,
47
+ int ndims,
48
+ const int64_t ne[],
49
+ float fmin,
50
+ float fmax) {
51
+
52
+ switch (ndims) {
53
+ case 1:
54
+ for (int i0 = 0; i0 < ne[0]; i0++) {
55
+ ((float *)tensor->data)[i0] = frand()*(fmax - fmin) + fmin;
56
+ }
57
+ break;
58
+ case 2:
59
+ for (int i1 = 0; i1 < ne[1]; i1++) {
60
+ for (int i0 = 0; i0 < ne[0]; i0++) {
61
+ ((float *)tensor->data)[i1*ne[0] + i0] = frand()*(fmax - fmin) + fmin;
62
+ }
63
+ }
64
+ break;
65
+ case 3:
66
+ for (int i2 = 0; i2 < ne[2]; i2++) {
67
+ for (int i1 = 0; i1 < ne[1]; i1++) {
68
+ for (int i0 = 0; i0 < ne[0]; i0++) {
69
+ ((float *)tensor->data)[i2*ne[1]*ne[0] + i1*ne[0] + i0] = frand()*(fmax - fmin) + fmin;
70
+ }
71
+ }
72
+ }
73
+ break;
74
+ case 4:
75
+ for (int i3 = 0; i3 < ne[3]; i3++) {
76
+ for (int i2 = 0; i2 < ne[2]; i2++) {
77
+ for (int i1 = 0; i1 < ne[1]; i1++) {
78
+ for (int i0 = 0; i0 < ne[0]; i0++) {
79
+ ((float *)tensor->data)[i3*ne[2]*ne[1]*ne[0] + i2*ne[1]*ne[0] + i1*ne[0] + i0] = frand()*(fmax - fmin) + fmin;
80
+ }
81
+ }
82
+ }
83
+ }
84
+ break;
85
+ default:
86
+ assert(false);
87
+ };
88
+
89
+ return tensor;
90
+ }
91
+
92
+ struct ggml_tensor * randomize_tensor_normal(
93
+ struct ggml_tensor * tensor,
94
+ int ndims,
95
+ const int64_t ne[],
96
+ struct random_normal_distribution * rnd) {
97
+ float scale = 1.0; // xavier
98
+ switch (ndims) {
99
+ case 1:
100
+ scale /= sqrtf(ne[0]);
101
+ for (int i0 = 0; i0 < ne[0]; i0++) {
102
+ ((float *)tensor->data)[i0] = scale * frand_normal(rnd);
103
+ }
104
+ break;
105
+ case 2:
106
+ scale /= sqrtf(ne[0]+ne[1]);
107
+ for (int i1 = 0; i1 < ne[1]; i1++) {
108
+ for (int i0 = 0; i0 < ne[0]; i0++) {
109
+ ((float *)tensor->data)[i1*ne[0] + i0] = scale * frand_normal(rnd);
110
+ }
111
+ }
112
+ break;
113
+ case 3:
114
+ scale /= sqrtf(ne[0]+ne[1]);
115
+ for (int i2 = 0; i2 < ne[2]; i2++) {
116
+ for (int i1 = 0; i1 < ne[1]; i1++) {
117
+ for (int i0 = 0; i0 < ne[0]; i0++) {
118
+ ((float *)tensor->data)[i2*ne[1]*ne[0] + i1*ne[0] + i0] = scale * frand_normal(rnd);
119
+ }
120
+ }
121
+ }
122
+ break;
123
+ case 4:
124
+ scale /= sqrtf(ne[0]+ne[1]);
125
+ for (int i3 = 0; i3 < ne[3]; i3++) {
126
+ for (int i2 = 0; i2 < ne[2]; i2++) {
127
+ for (int i1 = 0; i1 < ne[1]; i1++) {
128
+ for (int i0 = 0; i0 < ne[0]; i0++) {
129
+ ((float *)tensor->data)[i3*ne[2]*ne[1]*ne[0] + i2*ne[1]*ne[0] + i1*ne[0] + i0] = scale * frand_normal(rnd);
130
+ }
131
+ }
132
+ }
133
+ }
134
+ break;
135
+ default:
136
+ assert(false);
137
+ };
138
+
139
+ return tensor;
140
+ }
141
+
142
+ struct llama_hparams {
143
+ uint32_t n_vocab = 32000;
144
+ uint32_t n_ctx = 512; // this is provided as user input?
145
+ uint32_t n_embd = 4096;
146
+ uint32_t n_mult = 4;
147
+ uint32_t n_head = 32;
148
+ uint32_t n_layer = 32;
149
+ uint32_t n_rot = 64;
150
+
151
+ bool operator!=(const llama_hparams & other) const {
152
+ return memcmp(this, &other, sizeof(llama_hparams));
153
+ }
154
+ };
155
+
156
+ uint32_t get_n_ff(const struct llama_hparams* hparams) {
157
+ const uint32_t n_ff = ((2*(4*hparams->n_embd)/3 + hparams->n_mult - 1)/hparams->n_mult)*hparams->n_mult;
158
+ return n_ff;
159
+ }
160
+
161
+ struct llama_hparams_lora {
162
+ uint32_t n_vocab = 32000;
163
+ uint32_t n_ctx = 512; // this is provided as user input?
164
+ uint32_t n_embd = 4096;
165
+ uint32_t n_mult = 4;
166
+ uint32_t n_head = 32;
167
+ uint32_t n_layer = 32;
168
+ uint32_t n_rot = 64;
169
+ uint32_t n_lora = 64;
170
+
171
+ bool operator!=(const llama_hparams_lora & other) const {
172
+ return memcmp(this, &other, sizeof(llama_hparams_lora)) != 0;
173
+ }
174
+ };
175
+
176
+ struct llama_layer {
177
+ // normalization
178
+ struct ggml_tensor * attention_norm;
179
+
180
+ // attention
181
+ struct ggml_tensor * wq;
182
+ struct ggml_tensor * wk;
183
+ struct ggml_tensor * wv;
184
+ struct ggml_tensor * wo;
185
+
186
+ // normalization
187
+ struct ggml_tensor * ffn_norm;
188
+
189
+ // ff
190
+ struct ggml_tensor * w1;
191
+ struct ggml_tensor * w2;
192
+ struct ggml_tensor * w3;
193
+ };
194
+
195
+ struct llama_layer_lora {
196
+ // normalization
197
+ struct ggml_tensor * attention_norm;
198
+
199
+ // attention
200
+ struct ggml_tensor * wqa;
201
+ struct ggml_tensor * wqb;
202
+ struct ggml_tensor * wka;
203
+ struct ggml_tensor * wkb;
204
+ struct ggml_tensor * wva;
205
+ struct ggml_tensor * wvb;
206
+ struct ggml_tensor * woa;
207
+ struct ggml_tensor * wob;
208
+
209
+ // normalization
210
+ struct ggml_tensor * ffn_norm;
211
+
212
+ // ff
213
+ struct ggml_tensor * w1;
214
+ struct ggml_tensor * w2;
215
+ struct ggml_tensor * w3;
216
+ };
217
+
218
+
219
+ struct llama_kv_cache {
220
+ struct ggml_context * ctx = NULL;
221
+
222
+ struct ggml_tensor * k;
223
+ struct ggml_tensor * v;
224
+
225
+ // llama_ctx_buffer buf;
226
+
227
+ int n; // number of tokens currently in the cache
228
+ };
229
+
230
+ struct llama_model {
231
+ struct ggml_context * ctx = NULL;
232
+
233
+ llama_hparams hparams;
234
+
235
+ struct ggml_tensor * tok_embeddings;
236
+
237
+ struct ggml_tensor * norm;
238
+ struct ggml_tensor * output;
239
+
240
+ std::vector<llama_layer> layers;
241
+ };
242
+
243
+ struct llama_model_lora {
244
+ struct ggml_context * ctx = NULL;
245
+
246
+ llama_hparams_lora hparams;
247
+
248
+ struct ggml_tensor * tok_embeddings;
249
+
250
+ struct ggml_tensor * norm;
251
+ struct ggml_tensor * outputa;
252
+ struct ggml_tensor * outputb;
253
+
254
+ std::vector<llama_layer_lora> layers;
255
+ };
256
+
257
+ void init_model(struct llama_model * model) {
258
+ const auto & hparams = model->hparams;
259
+
260
+ const uint32_t n_embd = hparams.n_embd;
261
+ const uint32_t n_layer = hparams.n_layer;
262
+ const uint32_t n_vocab = hparams.n_vocab;
263
+
264
+ const uint32_t n_ff = get_n_ff(&hparams);
265
+
266
+ struct ggml_context * ctx = model->ctx;
267
+
268
+ model->tok_embeddings = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_vocab); // ("tok_embeddings.weight", {n_embd, n_vocab});
269
+ model->norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // ("norm.weight", {n_embd});
270
+ model->output = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_vocab); // ("output.weight", {n_embd, n_vocab});
271
+
272
+ model->layers.resize(n_layer);
273
+ for (uint32_t i = 0; i < n_layer; ++i) {
274
+ auto & layer = model->layers[i];
275
+
276
+ // std::string layers_i = "layers." + std::to_string(i);
277
+
278
+ layer.attention_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // (layers_i + ".attention_norm.weight", {n_embd});
279
+
280
+ layer.wq = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd); // (layers_i + ".attention.wq.weight", {n_embd, n_embd});
281
+ layer.wk = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd); // (layers_i + ".attention.wk.weight", {n_embd, n_embd});
282
+ layer.wv = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd); // (layers_i + ".attention.wv.weight", {n_embd, n_embd});
283
+ layer.wo = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd); // (layers_i + ".attention.wo.weight", {n_embd, n_embd});
284
+
285
+ layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // (layers_i + ".ffn_norm.weight", {n_embd});
286
+
287
+ layer.w1 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff); // (layers_i + ".feed_forward.w1.weight", {n_embd, n_ff});
288
+ layer.w2 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_ff, n_embd); // (layers_i + ".feed_forward.w2.weight", { n_ff, n_embd});
289
+ layer.w3 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff); // (layers_i + ".feed_forward.w3.weight", {n_embd, n_ff});
290
+ }
291
+ }
292
+
293
+
294
+ void init_model_lora(struct llama_model_lora * model) {
295
+ const auto & hparams = model->hparams;
296
+
297
+ const uint32_t n_embd = hparams.n_embd;
298
+ const uint32_t n_mult = hparams.n_mult;
299
+ const uint32_t n_layer = hparams.n_layer;
300
+ const uint32_t n_vocab = hparams.n_vocab;
301
+ const uint32_t n_lora = hparams.n_lora;
302
+
303
+ const uint32_t n_ff = ((2*(4*n_embd)/3 + n_mult - 1)/n_mult)*n_mult;
304
+
305
+ struct ggml_context * ctx = model->ctx;
306
+
307
+ model->tok_embeddings = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_vocab); // ("tok_embeddings.weight", {n_embd, n_vocab});
308
+ model->norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // ("norm.weight", {n_embd});
309
+ model->outputa = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_lora, n_vocab); // ("output.weight", {n_embd, n_vocab});
310
+ model->outputb = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_lora); // ("output.weight", {n_embd, n_vocab});
311
+
312
+ model->layers.resize(n_layer);
313
+ for (uint32_t i = 0; i < n_layer; ++i) {
314
+ auto & layer = model->layers[i];
315
+
316
+ // std::string layers_i = "layers." + std::to_string(i);
317
+
318
+ layer.attention_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // (layers_i + ".attention_norm.weight", {n_embd});
319
+
320
+ layer.wqa = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_lora, n_embd); // (layers_i + ".attention.wq.weight", {n_embd, n_embd});
321
+ layer.wqb = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_lora); // (layers_i + ".attention.wq.weight", {n_embd, n_embd});
322
+ layer.wka = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_lora, n_embd); // (layers_i + ".attention.wk.weight", {n_embd, n_embd});
323
+ layer.wkb = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_lora); // (layers_i + ".attention.wk.weight", {n_embd, n_embd});
324
+ layer.wva = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_lora, n_embd); // (layers_i + ".attention.wv.weight", {n_embd, n_embd});
325
+ layer.wvb = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_lora); // (layers_i + ".attention.wv.weight", {n_embd, n_embd});
326
+ layer.woa = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_lora, n_embd); // (layers_i + ".attention.wo.weight", {n_embd, n_embd});
327
+ layer.wob = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_lora); // (layers_i + ".attention.wo.weight", {n_embd, n_embd});
328
+
329
+ layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); // (layers_i + ".ffn_norm.weight", {n_embd});
330
+
331
+ layer.w1 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff); // (layers_i + ".feed_forward.w1.weight", {n_embd, n_ff});
332
+ layer.w2 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_ff, n_embd); // (layers_i + ".feed_forward.w2.weight", { n_ff, n_embd});
333
+ layer.w3 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ff); // (layers_i + ".feed_forward.w3.weight", {n_embd, n_ff});
334
+ }
335
+ }
336
+
337
+ void set_param_model(struct llama_model * model) {
338
+ const auto& hparams = model->hparams;
339
+
340
+ const uint32_t n_layer = hparams.n_layer;
341
+
342
+ struct ggml_context* ctx = model->ctx;
343
+
344
+ ggml_set_param(ctx, model->tok_embeddings);
345
+ ggml_set_param(ctx, model->norm);
346
+ ggml_set_param(ctx, model->output);
347
+
348
+ for (uint32_t i = 0; i < n_layer; ++i) {
349
+ auto & layer = model->layers[i];
350
+
351
+ ggml_set_param(ctx, layer.attention_norm);
352
+ ggml_set_param(ctx, layer.wq);
353
+ ggml_set_param(ctx, layer.wk);
354
+ ggml_set_param(ctx, layer.wv);
355
+ ggml_set_param(ctx, layer.wo);
356
+ ggml_set_param(ctx, layer.ffn_norm);
357
+ ggml_set_param(ctx, layer.w1);
358
+ ggml_set_param(ctx, layer.w2);
359
+ ggml_set_param(ctx, layer.w3);
360
+ }
361
+ }
362
+
363
+ void set_param_model_lora(struct llama_model_lora * model) {
364
+ const auto& hparams = model->hparams;
365
+
366
+ const uint32_t n_layer = hparams.n_layer;
367
+
368
+ struct ggml_context* ctx = model->ctx;
369
+
370
+ ggml_set_param(ctx, model->tok_embeddings);
371
+ ggml_set_param(ctx, model->norm);
372
+ ggml_set_param(ctx, model->outputa);
373
+ ggml_set_param(ctx, model->outputb);
374
+
375
+ for (uint32_t i = 0; i < n_layer; ++i) {
376
+ auto & layer = model->layers[i];
377
+
378
+ ggml_set_param(ctx, layer.attention_norm);
379
+ ggml_set_param(ctx, layer.wqa);
380
+ ggml_set_param(ctx, layer.wqb);
381
+ ggml_set_param(ctx, layer.wka);
382
+ ggml_set_param(ctx, layer.wkb);
383
+ ggml_set_param(ctx, layer.wva);
384
+ ggml_set_param(ctx, layer.wvb);
385
+ ggml_set_param(ctx, layer.woa);
386
+ ggml_set_param(ctx, layer.wob);
387
+ ggml_set_param(ctx, layer.ffn_norm);
388
+ ggml_set_param(ctx, layer.w1);
389
+ ggml_set_param(ctx, layer.w2);
390
+ ggml_set_param(ctx, layer.w3);
391
+ }
392
+ }
393
+
394
+ void randomize_model(struct llama_model * model, int seed, float mean, float std, float min, float max) {
395
+ const auto & hparams = model->hparams;
396
+
397
+ const uint32_t n_layer = hparams.n_layer;
398
+
399
+ struct random_normal_distribution rnd;
400
+ init_random_normal_distribution(&rnd, seed, mean, std, min, max);
401
+ randomize_tensor_normal(model->tok_embeddings, model->tok_embeddings->n_dims, model->tok_embeddings->ne, &rnd);
402
+ randomize_tensor_normal(model->norm, model->norm->n_dims, model->norm->ne, &rnd);
403
+ randomize_tensor_normal(model->output, model->output->n_dims, model->output->ne, &rnd);
404
+
405
+ for (uint32_t i = 0; i < n_layer; ++i) {
406
+ auto & layer = model->layers[i];
407
+ randomize_tensor_normal(layer.attention_norm, layer.attention_norm->n_dims, layer.attention_norm->ne, &rnd);
408
+
409
+ randomize_tensor_normal(layer.wq, layer.wq->n_dims, layer.wq->ne, &rnd);
410
+ randomize_tensor_normal(layer.wk, layer.wk->n_dims, layer.wk->ne, &rnd);
411
+ randomize_tensor_normal(layer.wv, layer.wv->n_dims, layer.wv->ne, &rnd);
412
+ randomize_tensor_normal(layer.wo, layer.wo->n_dims, layer.wo->ne, &rnd);
413
+
414
+ randomize_tensor_normal(layer.ffn_norm, layer.ffn_norm->n_dims, layer.ffn_norm->ne, &rnd);
415
+
416
+ randomize_tensor_normal(layer.w1, layer.w1->n_dims, layer.w1->ne, &rnd);
417
+ randomize_tensor_normal(layer.w2, layer.w2->n_dims, layer.w2->ne, &rnd);
418
+ randomize_tensor_normal(layer.w3, layer.w3->n_dims, layer.w3->ne, &rnd);
419
+ }
420
+ }
421
+
422
+
423
+ void randomize_model_lora(struct llama_model_lora * model, int seed, float mean, float std, float min, float max) {
424
+ const auto & hparams = model->hparams;
425
+
426
+ const uint32_t n_layer = hparams.n_layer;
427
+
428
+ struct random_normal_distribution rnd;
429
+ init_random_normal_distribution(&rnd, seed, mean, std, min, max);
430
+ randomize_tensor_normal(model->tok_embeddings, model->tok_embeddings->n_dims, model->tok_embeddings->ne, &rnd);
431
+ randomize_tensor_normal(model->norm, model->norm->n_dims, model->norm->ne, &rnd);
432
+ randomize_tensor_normal(model->outputa, model->outputa->n_dims, model->outputa->ne, &rnd);
433
+ randomize_tensor_normal(model->outputb, model->outputb->n_dims, model->outputb->ne, &rnd);
434
+
435
+ for (uint32_t i = 0; i < n_layer; ++i) {
436
+ auto & layer = model->layers[i];
437
+ randomize_tensor_normal(layer.attention_norm, layer.attention_norm->n_dims, layer.attention_norm->ne, &rnd);
438
+
439
+ randomize_tensor_normal(layer.wqa, layer.wqa->n_dims, layer.wqa->ne, &rnd);
440
+ randomize_tensor_normal(layer.wqb, layer.wqb->n_dims, layer.wqb->ne, &rnd);
441
+ randomize_tensor_normal(layer.wka, layer.wka->n_dims, layer.wka->ne, &rnd);
442
+ randomize_tensor_normal(layer.wkb, layer.wkb->n_dims, layer.wkb->ne, &rnd);
443
+ randomize_tensor_normal(layer.wva, layer.wva->n_dims, layer.wva->ne, &rnd);
444
+ randomize_tensor_normal(layer.wvb, layer.wvb->n_dims, layer.wvb->ne, &rnd);
445
+ randomize_tensor_normal(layer.woa, layer.woa->n_dims, layer.woa->ne, &rnd);
446
+ randomize_tensor_normal(layer.wob, layer.wob->n_dims, layer.wob->ne, &rnd);
447
+
448
+ randomize_tensor_normal(layer.ffn_norm, layer.ffn_norm->n_dims, layer.ffn_norm->ne, &rnd);
449
+
450
+ randomize_tensor_normal(layer.w1, layer.w1->n_dims, layer.w1->ne, &rnd);
451
+ randomize_tensor_normal(layer.w2, layer.w2->n_dims, layer.w2->ne, &rnd);
452
+ randomize_tensor_normal(layer.w3, layer.w3->n_dims, layer.w3->ne, &rnd);
453
+ }
454
+ }
455
+
456
+ bool init_kv_cache(struct llama_kv_cache* cache, struct llama_model * model, int n_batch) {
457
+ const auto & hparams = model->hparams;
458
+
459
+ const uint32_t n_ctx = hparams.n_ctx;
460
+ const uint32_t n_embd = hparams.n_embd;
461
+ const uint32_t n_layer = hparams.n_layer;
462
+
463
+ const int64_t n_mem = n_layer*n_ctx*n_batch;
464
+ const int64_t n_elements = n_embd*n_mem;
465
+
466
+ // cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
467
+
468
+ // struct ggml_init_params params;
469
+ // params.mem_size = cache.buf.size;
470
+ // params.mem_buffer = cache.buf.addr;
471
+ // params.no_alloc = false;
472
+ if (!cache->ctx) {
473
+ struct ggml_init_params params;
474
+ params.mem_size = 2u*n_elements*ggml_type_size(GGML_TYPE_F32) + 2u*1024*1024;
475
+ params.mem_buffer = NULL;
476
+ params.no_alloc = false;
477
+
478
+ cache->ctx = ggml_init(params);
479
+
480
+ if (!cache->ctx) {
481
+ fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
482
+ return false;
483
+ }
484
+ }
485
+
486
+ cache->k = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);
487
+ cache->v = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);
488
+
489
+ return true;
490
+ }
491
+
492
+ bool init_kv_cache_lora(struct llama_kv_cache* cache, struct llama_model_lora * model, int n_batch) {
493
+ const auto & hparams = model->hparams;
494
+
495
+ const uint32_t n_ctx = hparams.n_ctx;
496
+ const uint32_t n_embd = hparams.n_embd;
497
+ const uint32_t n_layer = hparams.n_layer;
498
+
499
+ const int64_t n_mem = n_layer*n_ctx*n_batch;
500
+ const int64_t n_elements = n_embd*n_mem;
501
+
502
+ // cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
503
+
504
+ // struct ggml_init_params params;
505
+ // params.mem_size = cache.buf.size;
506
+ // params.mem_buffer = cache.buf.addr;
507
+ // params.no_alloc = false;
508
+ if (!cache->ctx) {
509
+ struct ggml_init_params params;
510
+ params.mem_size = 2u*n_elements*ggml_type_size(GGML_TYPE_F32) + 2u*1024*1024;
511
+ params.mem_buffer = NULL;
512
+ params.no_alloc = false;
513
+
514
+ cache->ctx = ggml_init(params);
515
+
516
+ if (!cache->ctx) {
517
+ fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
518
+ return false;
519
+ }
520
+ }
521
+
522
+ cache->k = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);
523
+ cache->v = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);
524
+
525
+ return true;
526
+ }
527
+
528
+ struct ggml_tensor * forward(
529
+ struct llama_model * model,
530
+ struct llama_kv_cache * cache,
531
+ struct ggml_context * ctx0,
532
+ struct ggml_cgraph * gf,
533
+ struct ggml_tensor * tokens_input,
534
+ const int n_tokens,
535
+ const int n_past) {
536
+
537
+ const int N = n_tokens;
538
+
539
+ struct llama_kv_cache& kv_self = *cache;
540
+ const auto & hparams = model->hparams;
541
+ const int n_ctx = hparams.n_ctx;
542
+ const int n_embd = hparams.n_embd;
543
+ const int n_layer = hparams.n_layer;
544
+ const int n_head = hparams.n_head;
545
+ const int n_rot = hparams.n_rot;
546
+
547
+ struct ggml_tensor * tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
548
+ memcpy(tokens->data, tokens_input->data, N*ggml_element_size(tokens));
549
+
550
+ struct ggml_tensor * kc = kv_self.k;
551
+ struct ggml_tensor * vc = kv_self.v;
552
+
553
+ // inpL shape [n_embd,N,1,1]
554
+ struct ggml_tensor * inpL = ggml_get_rows(ctx0, model->tok_embeddings, tokens);
555
+ for (int il = 0; il < n_layer; ++il) {
556
+ struct ggml_tensor * inpSA = inpL;
557
+
558
+ struct ggml_tensor * cur;
559
+
560
+ // lctx.use_buf(ctx0, 0);
561
+
562
+ // norm
563
+ {
564
+ // cur shape [n_embd,N,1,1]
565
+ cur = ggml_rms_norm(ctx0, inpL);
566
+
567
+ // cur = attention_norm*cur
568
+ cur = ggml_mul(ctx0,
569
+ ggml_repeat(ctx0, model->layers[il].attention_norm, cur),
570
+ cur);
571
+ }
572
+
573
+ // self-attention
574
+ {
575
+ // compute Q and K and RoPE them
576
+ // wq shape [n_embd, n_embd, 1, 1]
577
+ // wk shape [n_embd, n_embd, 1, 1]
578
+ // Qcur shape [n_embd/n_head, n_head, N, 1]
579
+ // Kcur shape [n_embd/n_head, n_head, N, 1]
580
+ struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wq, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
581
+ struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wk, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
582
+
583
+ // store key and value to memory
584
+ {
585
+ // compute the transposed [N, n_embd] V matrix
586
+ // wv shape [n_embd, n_embd, 1, 1]
587
+ // Vcur shape [n_embd, N, 1, 1]
588
+ struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wv, cur), n_embd, N)));
589
+
590
+ // kv_self.k shape [n_embd * n_ctx * n_layer, 1]
591
+ // kv_self.v shape [n_embd * n_ctx * n_layer, 1]
592
+ // k shape [n_embd * N, 1] == kv_self.k[:,n_past:n_past+N,il,0]
593
+ // v shape [N, n_embd, 1, 1] == kv_self.v[:,n_past:n_past+N,il,0]
594
+
595
+ /* {
596
+ struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd, (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
597
+ struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd,
598
+ ( n_ctx)*ggml_element_size(kv_self.v),
599
+ (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd + n_past*ggml_element_size(kv_self.v));
600
+
601
+ // important: storing RoPE-ed version of K in the KV cache!
602
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
603
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
604
+ } //*/
605
+
606
+ kc = ggml_set_1d(ctx0, kc, ggml_reshape_1d(ctx0, Kcur, n_embd*N), (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
607
+ vc = ggml_set_2d(ctx0, vc, Vcur, ( n_ctx)*ggml_element_size(kv_self.v),
608
+ (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd + n_past*ggml_element_size(kv_self.v));
609
+ }
610
+
611
+ // Qcur shape [n_embd/n_head, n_head, N, 1]
612
+ // Q shape [n_embd/n_head, N, n_head, 1]
613
+ struct ggml_tensor * Q =
614
+ ggml_permute(ctx0,
615
+ Qcur,
616
+ 0, 2, 1, 3);
617
+
618
+ // kv_self.k shape [n_embd * n_ctx * n_layer, 1]
619
+ // K shape [n_embd/n_head, n_past + N, n_head, 1]
620
+ struct ggml_tensor * K =
621
+ ggml_permute(ctx0,
622
+ ggml_reshape_3d(ctx0,
623
+ ggml_view_1d(ctx0, kc, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(kc)*n_embd),
624
+ n_embd/n_head, n_head, n_past + N),
625
+ 0, 2, 1, 3);
626
+
627
+ // K * Q
628
+ // KQ shape [n_past + N, N, n_head, 1]
629
+ struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
630
+
631
+ // KQ_scaled = KQ / sqrt(n_embd/n_head)
632
+ // KQ_scaled shape [n_past + N, N, n_head, 1]
633
+ struct ggml_tensor * KQ_scaled =
634
+ ggml_scale(ctx0,
635
+ KQ,
636
+ ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)));
637
+
638
+ // KQ_masked = mask_past(KQ_scaled)
639
+ // KQ_masked shape [n_past + N, N, n_head, 1]
640
+ struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
641
+
642
+ // KQ = soft_max(KQ_masked)
643
+ // KQ_soft_max shape [n_past + N, N, n_head, 1]
644
+ struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
645
+
646
+ // split cached V into n_head heads
647
+ //// V shape [n_past + N, n_embd/n_head, n_head, 1]
648
+ // V shape [n_past + N, n_embd/n_head, n_head, 1] == kv_self.v[:,:(n_past+N),il,1]
649
+ struct ggml_tensor * V =
650
+ ggml_view_3d(ctx0, vc,
651
+ n_past + N, n_embd/n_head, n_head,
652
+ n_ctx*ggml_element_size(vc),
653
+ n_ctx*ggml_element_size(vc)*n_embd/n_head,
654
+ il*n_ctx*ggml_element_size(vc)*n_embd);
655
+
656
+ // KQV shape [n_embd/n_head, N, n_head, 1]
657
+ struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
658
+
659
+ // KQV_merged = KQV.permute(0, 2, 1, 3)
660
+ // KQV_merged shape [n_embd/n_head, n_head, N, 1]
661
+ struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
662
+ // KQV_merged shape
663
+
664
+ // cur = KQV_merged.contiguous().view(n_embd, N)
665
+ // cur shape [n_embd,N,1,1]
666
+ cur = ggml_reshape_2d(ctx0, ggml_cont(ctx0, KQV_merged), n_embd, N);
667
+ // cur = ggml_cpy(ctx0,
668
+ // KQV_merged,
669
+ // ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
670
+
671
+ // projection (no bias)
672
+ // cur shape [n_embd,N,1,1]
673
+ cur = ggml_mul_mat(ctx0,
674
+ model->layers[il].wo,
675
+ cur);
676
+ }
677
+
678
+ // lctx.use_buf(ctx0, 1);
679
+
680
+ // inpFF shape [n_embd,N,1,1]
681
+ struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
682
+
683
+ // feed-forward network
684
+ {
685
+ // norm
686
+ {
687
+ // cur shape [n_embd,N,1,1]
688
+ cur = ggml_rms_norm(ctx0, inpFF);
689
+
690
+ // cur = ffn_norm*cur
691
+ // cur shape [n_embd,N,1,1]
692
+ cur = ggml_mul(ctx0,
693
+ ggml_repeat(ctx0, model->layers[il].ffn_norm, cur),
694
+ cur);
695
+ }
696
+
697
+ // tmp shape [n_ff,N,1,1]
698
+ struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
699
+ model->layers[il].w3,
700
+ cur);
701
+
702
+ // cur shape [n_ff,N,1,1]
703
+ cur = ggml_mul_mat(ctx0,
704
+ model->layers[il].w1,
705
+ cur);
706
+
707
+ // SILU activation
708
+ // cur shape [n_ff,N,1,1]
709
+ cur = ggml_silu(ctx0, cur);
710
+
711
+ // cur shape [n_ff,N,1,1]
712
+ cur = ggml_mul(ctx0, cur, tmp);
713
+
714
+ // cur shape [n_embd,N,1,1]
715
+ cur = ggml_mul_mat(ctx0,
716
+ model->layers[il].w2,
717
+ cur);
718
+ }
719
+
720
+ // cur shape [n_embd,N,1,1]
721
+ cur = ggml_add(ctx0, cur, inpFF);
722
+
723
+ // input for next layer
724
+ // inpL shape [n_embd,N,1,1]
725
+ inpL = cur;
726
+ }
727
+
728
+ // norm
729
+ {
730
+
731
+ // inpL shape [n_embd,N,1,1]
732
+ inpL = ggml_rms_norm(ctx0, inpL);
733
+
734
+ // inpL = norm*inpL
735
+ // inpL shape [n_embd,N,1,1]
736
+ inpL = ggml_mul(ctx0,
737
+ ggml_repeat(ctx0, model->norm, inpL),
738
+ inpL);
739
+
740
+ //embeddings = inpL;
741
+ }
742
+
743
+ // lm_head
744
+ // inpL shape [n_vocab,N,1,1]
745
+ inpL = ggml_mul_mat(ctx0, model->output, inpL);
746
+
747
+ // run the computation
748
+ ggml_build_forward_expand(gf, inpL);
749
+
750
+ return inpL;
751
+ }
752
+
753
+ void assert_shape_1d(struct ggml_tensor * tensor, int64_t ne0) {
754
+ GGML_ASSERT(tensor->n_dims == 1);
755
+ GGML_ASSERT(tensor->ne[0] == ne0);
756
+ }
757
+
758
+ void assert_shape_2d(struct ggml_tensor * tensor, int64_t ne0, int64_t ne1) {
759
+ GGML_ASSERT(tensor->n_dims == 2);
760
+ GGML_ASSERT(tensor->ne[0] == ne0);
761
+ GGML_ASSERT(tensor->ne[1] == ne1);
762
+ }
763
+
764
+ void assert_shape_3d(struct ggml_tensor * tensor, int64_t ne0, int64_t ne1, int64_t ne2) {
765
+ GGML_ASSERT(tensor->n_dims == 3);
766
+ GGML_ASSERT(tensor->ne[0] == ne0);
767
+ GGML_ASSERT(tensor->ne[1] == ne1);
768
+ GGML_ASSERT(tensor->ne[2] == ne2);
769
+ }
770
+
771
+ void assert_shape_4d(struct ggml_tensor * tensor, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) {
772
+ GGML_ASSERT(tensor->n_dims == 4);
773
+ GGML_ASSERT(tensor->ne[0] == ne0);
774
+ GGML_ASSERT(tensor->ne[1] == ne1);
775
+ GGML_ASSERT(tensor->ne[2] == ne2);
776
+ GGML_ASSERT(tensor->ne[3] == ne3);
777
+ }
778
+
779
+ struct ggml_tensor * forward_batch(
780
+ struct llama_model * model,
781
+ struct llama_kv_cache * cache,
782
+ struct ggml_context * ctx0,
783
+ struct ggml_cgraph * gf,
784
+ struct ggml_tensor * tokens_input,
785
+ const int n_tokens,
786
+ const int n_past,
787
+ const int n_batch) {
788
+
789
+ const int N = n_tokens;
790
+
791
+ struct llama_kv_cache& kv_self = *cache;
792
+ const auto & hparams = model->hparams;
793
+ const int n_ctx = hparams.n_ctx;
794
+ const int n_vocab = hparams.n_vocab;
795
+ const int n_embd = hparams.n_embd;
796
+ const int n_layer = hparams.n_layer;
797
+ const int n_head = hparams.n_head;
798
+ const int n_rot = hparams.n_rot;
799
+ const int n_ff = get_n_ff(&hparams);
800
+
801
+ struct ggml_tensor * tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N*n_batch);
802
+ memcpy(tokens->data, tokens_input->data, ggml_element_size(tokens)*N*n_batch);
803
+
804
+ struct ggml_tensor * kc = kv_self.k;
805
+ struct ggml_tensor * vc = kv_self.v;
806
+
807
+ // inpL shape [n_embd,N*n_batch,1]
808
+ struct ggml_tensor * inpL = ggml_get_rows(ctx0, model->tok_embeddings, tokens);
809
+ assert_shape_2d(inpL, n_embd, N*n_batch);
810
+ for (int il = 0; il < n_layer; ++il) {
811
+ struct ggml_tensor * inpSA = inpL;
812
+
813
+ struct ggml_tensor * cur;
814
+
815
+ // lctx.use_buf(ctx0, 0);
816
+
817
+ // norm
818
+ {
819
+ // cur shape [n_embd,N*n_batch,1,1]
820
+ cur = ggml_rms_norm(ctx0, inpL);
821
+ assert_shape_2d(cur, n_embd, N*n_batch);
822
+
823
+ // cur = attention_norm*cur
824
+ cur = ggml_mul(ctx0,
825
+ ggml_repeat(ctx0, model->layers[il].attention_norm, cur),
826
+ cur);
827
+ assert_shape_2d(cur, n_embd, N*n_batch);
828
+ }
829
+
830
+ // self-attention
831
+ {
832
+ // compute Q and K and RoPE them
833
+ // wq shape [n_embd, n_embd, 1, 1]
834
+ // wk shape [n_embd, n_embd, 1, 1]
835
+ // Qcur shape [n_embd/n_head, n_head, N, n_batch]
836
+ // Kcur shape [n_embd/n_head, n_head, N, n_batch]
837
+ struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_4d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wq, cur), n_embd/n_head, n_head, N, n_batch), n_past, n_rot, 0, 0);
838
+ struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_4d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wk, cur), n_embd/n_head, n_head, N, n_batch), n_past, n_rot, 0, 0);
839
+ assert_shape_4d(Qcur, n_embd/n_head, n_head, N, n_batch);
840
+ assert_shape_4d(Kcur, n_embd/n_head, n_head, N, n_batch);
841
+
842
+ // store key and value to memory
843
+ {
844
+ // compute the transposed [N, n_embd] V matrix
845
+ // wv shape [n_embd, n_embd, 1, 1]
846
+ // Vcur shape [N, n_embd, n_batch, 1]
847
+ struct ggml_tensor * Vcur = ggml_cont(ctx0,
848
+ ggml_permute(ctx0,
849
+ ggml_reshape_3d(ctx0,
850
+ ggml_mul_mat(ctx0,
851
+ model->layers[il].wv,
852
+ cur),
853
+ n_embd, N, n_batch),
854
+ 1, 0, 2, 3));
855
+
856
+ assert_shape_3d(Vcur, N, n_embd, n_batch);
857
+
858
+ // kv_self.k shape [n_embd * n_ctx * n_batch * n_layer]
859
+ // kv_self.v shape [n_ctx * n_embd * n_batch * n_layer]
860
+ // k shape [n_embd * N, n_batch] == kv_self.k[:,n_past:n_past+N,:,il]
861
+ // v shape [N, n_embd, n_batch, 1] == kv_self.v[:,n_past:n_past+N,:,il]
862
+
863
+ /* {
864
+ struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd, (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
865
+ struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd,
866
+ ( n_ctx)*ggml_element_size(kv_self.v),
867
+ (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd + n_past*ggml_element_size(kv_self.v));
868
+
869
+ // important: storing RoPE-ed version of K in the KV cache!
870
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
871
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
872
+ } //*/
873
+
874
+ kc = ggml_set_2d(ctx0, kc,
875
+ ggml_reshape_2d(ctx0, Kcur, n_embd*N, n_batch),
876
+ ggml_element_size(kc)*n_embd*n_ctx,
877
+ (ggml_element_size(kc)*n_embd)*(il*n_batch*n_ctx + n_past));
878
+ vc = ggml_set_2d(ctx0, vc,
879
+ ggml_reshape_2d(ctx0, Vcur, N*n_embd, n_batch),
880
+ ggml_element_size(vc)*n_ctx*n_embd,
881
+ ggml_element_size(vc)*(n_past + il*n_embd*n_batch*n_ctx));
882
+
883
+ assert_shape_1d(kc, n_embd * n_ctx * n_batch * n_layer);
884
+ assert_shape_1d(vc, n_embd * n_ctx * n_batch * n_layer);
885
+ }
886
+
887
+ // Qcur shape [n_embd/n_head, n_head, N, n_batch]
888
+ // Q shape [n_embd/n_head, N, n_head, n_batch]
889
+ struct ggml_tensor * Q =
890
+ ggml_permute(ctx0,
891
+ Qcur,
892
+ 0, 2, 1, 3);
893
+ assert_shape_4d(Q, n_embd/n_head, N, n_head, n_batch);
894
+
895
+ // kv_self.k shape [n_embd * n_ctx * n_batch * n_layer]
896
+ // K shape [n_embd/n_head, n_past + N, n_head, n_batch]
897
+ struct ggml_tensor * K =
898
+ ggml_permute(ctx0,
899
+ ggml_reshape_4d(ctx0,
900
+ ggml_view_3d(ctx0,
901
+ kc,
902
+ n_embd,
903
+ (n_past + N),
904
+ n_batch,
905
+ n_embd*ggml_element_size(kc),
906
+ n_ctx*n_embd*ggml_element_size(kc),
907
+ il*n_batch*n_ctx*n_embd*ggml_element_size(kc)),
908
+ n_embd/n_head, n_head, n_past + N, n_batch),
909
+ 0, 2, 1, 3);
910
+ assert_shape_4d(K, n_embd/n_head, n_past + N, n_head, n_batch);
911
+
912
+ // K * Q
913
+ // KQ shape [n_past + N, N, n_head, n_batch]
914
+ struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
915
+ assert_shape_4d(KQ, n_past + N, N, n_head, n_batch);
916
+
917
+ // KQ_scaled = KQ / sqrt(n_embd/n_head)
918
+ // KQ_scaled shape [n_past + N, N, n_head, n_batch]
919
+ struct ggml_tensor * KQ_scaled =
920
+ ggml_scale(ctx0,
921
+ KQ,
922
+ ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)));
923
+ assert_shape_4d(KQ_scaled, n_past + N, N, n_head, n_batch);
924
+
925
+ // KQ_masked = mask_past(KQ_scaled)
926
+ // KQ_masked shape [n_past + N, N, n_head, n_batch]
927
+ struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
928
+ assert_shape_4d(KQ_masked, n_past + N, N, n_head, n_batch);
929
+
930
+ // KQ = soft_max(KQ_masked)
931
+ // KQ_soft_max shape [n_past + N, N, n_head, n_batch]
932
+ struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
933
+ assert_shape_4d(KQ_soft_max, n_past + N, N, n_head, n_batch);
934
+
935
+ // split cached V into n_head heads
936
+ // kv_self.v shape [n_ctx * n_embd * n_batch * n_layer]
937
+ // V shape [n_past + N, n_embd/n_head, n_head, n_batch] == kv_self.v[:(n_past+N),:,:,il]
938
+ struct ggml_tensor * V =
939
+ ggml_view_4d(ctx0, vc,
940
+ n_past + N, n_embd/n_head, n_head, n_batch,
941
+ ggml_element_size(vc)*n_ctx,
942
+ ggml_element_size(vc)*n_ctx*n_embd/n_head,
943
+ ggml_element_size(vc)*n_ctx*n_embd,
944
+ il*n_batch*n_ctx*n_embd*ggml_element_size(vc));
945
+ assert_shape_4d(V, n_past + N, n_embd/n_head, n_head, n_batch);
946
+
947
+ // KQV shape [n_embd/n_head, N, n_head, n_batch]
948
+ struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
949
+ assert_shape_4d(KQV, n_embd/n_head, N, n_head, n_batch);
950
+
951
+ // KQV_merged = KQV.permute(0, 2, 1, 3)
952
+ // KQV_merged shape [n_embd/n_head, n_head, N, n_batch]
953
+ struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
954
+ assert_shape_4d(KQV_merged, n_embd/n_head, n_head, N, n_batch);
955
+ // KQV_merged shape
956
+
957
+ // cur = KQV_merged.contiguous().view(n_embd, N)
958
+ // cur shape [n_embd,N*n_batch,1,1]
959
+ cur = ggml_reshape_2d(ctx0, ggml_cont(ctx0, KQV_merged), n_embd, N*n_batch);
960
+ assert_shape_2d(cur, n_embd, N*n_batch);
961
+ // cur = ggml_cpy(ctx0,
962
+ // KQV_merged,
963
+ // ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
964
+
965
+ // projection (no bias)
966
+ // cur shape [n_embd,N*n_batch,1,1]
967
+ cur = ggml_mul_mat(ctx0,
968
+ model->layers[il].wo,
969
+ cur);
970
+ assert_shape_2d(cur, n_embd, N*n_batch);
971
+ }
972
+
973
+ // lctx.use_buf(ctx0, 1);
974
+
975
+ // inpFF shape [n_embd,N*n_batch,1,1]
976
+ struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
977
+ assert_shape_2d(inpFF, n_embd, N*n_batch);
978
+
979
+ // feed-forward network
980
+ {
981
+ // norm
982
+ {
983
+ // cur shape [n_embd,N*n_batch,1,1]
984
+ cur = ggml_rms_norm(ctx0, inpFF);
985
+ assert_shape_2d(cur, n_embd, N*n_batch);
986
+
987
+ // cur = ffn_norm*cur
988
+ // cur shape [n_embd,N*n_batch,1,1]
989
+ cur = ggml_mul(ctx0,
990
+ ggml_repeat(ctx0, model->layers[il].ffn_norm, cur),
991
+ cur);
992
+ assert_shape_2d(cur, n_embd, N*n_batch);
993
+ }
994
+
995
+ // tmp shape [n_ff,N*n_batch,1,1]
996
+ struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
997
+ model->layers[il].w3,
998
+ cur);
999
+ assert_shape_2d(tmp, n_ff, N*n_batch);
1000
+
1001
+ // cur shape [n_ff,N*n_batch,1,1]
1002
+ cur = ggml_mul_mat(ctx0,
1003
+ model->layers[il].w1,
1004
+ cur);
1005
+ assert_shape_2d(cur, n_ff, N*n_batch);
1006
+
1007
+ // SILU activation
1008
+ // cur shape [n_ff,N*n_batch,1,1]
1009
+ cur = ggml_silu(ctx0, cur);
1010
+ assert_shape_2d(cur, n_ff, N*n_batch);
1011
+
1012
+ // cur shape [n_ff,N*n_batch,1,1]
1013
+ cur = ggml_mul(ctx0, cur, tmp);
1014
+ assert_shape_2d(cur, n_ff, N*n_batch);
1015
+
1016
+ // cur shape [n_embd,N*n_batch,1,1]
1017
+ cur = ggml_mul_mat(ctx0,
1018
+ model->layers[il].w2,
1019
+ cur);
1020
+ assert_shape_2d(cur, n_embd, N*n_batch);
1021
+ }
1022
+
1023
+ // cur shape [n_embd,N*n_batch,1,1]
1024
+ cur = ggml_add(ctx0, cur, inpFF);
1025
+ assert_shape_2d(cur, n_embd, N*n_batch);
1026
+
1027
+ // input for next layer
1028
+ // inpL shape [n_embd,N*n_batch,1,1]
1029
+ inpL = cur;
1030
+ assert_shape_2d(inpL, n_embd, N*n_batch);
1031
+ }
1032
+
1033
+ // norm
1034
+ {
1035
+
1036
+ // inpL shape [n_embd,N*n_batch,1,1]
1037
+ inpL = ggml_rms_norm(ctx0, inpL);
1038
+ assert_shape_2d(inpL, n_embd, N*n_batch);
1039
+
1040
+ // inpL = norm*inpL
1041
+ // inpL shape [n_embd,N*n_batch,1,1]
1042
+ inpL = ggml_mul(ctx0,
1043
+ ggml_repeat(ctx0, model->norm, inpL),
1044
+ inpL);
1045
+
1046
+ assert_shape_2d(inpL, n_embd, N*n_batch);
1047
+
1048
+ //embeddings = inpL;
1049
+ }
1050
+
1051
+ // lm_head
1052
+ // inpL shape [n_vocab,N*n_batch,1,1]
1053
+ inpL = ggml_mul_mat(ctx0, model->output, inpL);
1054
+ assert_shape_2d(inpL, n_vocab, N*n_batch);
1055
+
1056
+ {
1057
+ // inpL shape [n_vocab,N,n_batch,1]
1058
+ inpL = ggml_reshape_3d(ctx0,
1059
+ inpL,
1060
+ n_vocab, N, n_batch);
1061
+ assert_shape_3d(inpL, n_vocab, N, n_batch);
1062
+ }
1063
+
1064
+ // run the computation
1065
+ ggml_build_forward_expand(gf, inpL);
1066
+
1067
+ return inpL;
1068
+ }
1069
+
1070
+
1071
+ struct ggml_tensor * forward_lora(
1072
+ struct llama_model_lora * model,
1073
+ struct llama_kv_cache * cache,
1074
+ struct ggml_context * ctx0,
1075
+ struct ggml_cgraph * gf,
1076
+ struct ggml_tensor * tokens_input,
1077
+ const int n_tokens,
1078
+ const int n_past) {
1079
+
1080
+ const int N = n_tokens;
1081
+
1082
+ struct llama_kv_cache& kv_self = *cache;
1083
+ const auto & hparams = model->hparams;
1084
+
1085
+ const int n_ctx = hparams.n_ctx;
1086
+ const int n_embd = hparams.n_embd;
1087
+ const int n_layer = hparams.n_layer;
1088
+ const int n_head = hparams.n_head;
1089
+ const int n_rot = hparams.n_rot;
1090
+
1091
+ struct ggml_tensor * tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
1092
+ memcpy(tokens->data, tokens_input->data, N*ggml_element_size(tokens));
1093
+
1094
+ struct ggml_tensor * kc = kv_self.k;
1095
+ struct ggml_tensor * vc = kv_self.v;
1096
+
1097
+ // inpL shape [n_embd,N,1,1]
1098
+ struct ggml_tensor * inpL = ggml_get_rows(ctx0, model->tok_embeddings, tokens);
1099
+ for (int il = 0; il < n_layer; ++il) {
1100
+ struct ggml_tensor * inpSA = inpL;
1101
+
1102
+ struct ggml_tensor * cur;
1103
+
1104
+ // norm
1105
+ {
1106
+ // cur shape [n_embd,N,1,1]
1107
+ cur = ggml_rms_norm(ctx0, inpL);
1108
+
1109
+ // cur = attention_norm*cur
1110
+ cur = ggml_mul(ctx0,
1111
+ ggml_repeat(ctx0, model->layers[il].attention_norm, cur),
1112
+ cur);
1113
+ }
1114
+
1115
+ // self-attention
1116
+ {
1117
+ // compute Q and K and RoPE them
1118
+ // wq shape [n_embd, n_embd, 1, 1]
1119
+ // wk shape [n_embd, n_embd, 1, 1]
1120
+ // Qcur shape [n_embd/n_head, n_head, N, 1]
1121
+ // Kcur shape [n_embd/n_head, n_head, N, 1]
1122
+ struct ggml_tensor * Qcur = ggml_rope(ctx0,
1123
+ ggml_reshape_3d(ctx0,
1124
+ ggml_mul_mat(ctx0,
1125
+ model->layers[il].wqa,
1126
+ ggml_mul_mat(ctx0,
1127
+ model->layers[il].wqb,
1128
+ cur)),
1129
+ n_embd/n_head, n_head, N),
1130
+ n_past, n_rot, 0, 0);
1131
+ struct ggml_tensor * Kcur = ggml_rope(ctx0,
1132
+ ggml_reshape_3d(ctx0,
1133
+ ggml_mul_mat(ctx0,
1134
+ model->layers[il].wka,
1135
+ ggml_mul_mat(ctx0,
1136
+ model->layers[il].wkb,
1137
+ cur)),
1138
+ n_embd/n_head, n_head, N),
1139
+ n_past, n_rot, 0, 0);
1140
+
1141
+ // store key and value to memory
1142
+ {
1143
+ // compute the transposed [N, n_embd] V matrix
1144
+ // wv shape [n_embd, n_embd, 1, 1]
1145
+ // Vcur shape [n_embd, N, 1, 1]
1146
+ struct ggml_tensor * Vcur = ggml_cont(ctx0,
1147
+ ggml_transpose(ctx0,
1148
+ ggml_reshape_2d(ctx0,
1149
+ ggml_mul_mat(ctx0,
1150
+ model->layers[il].wva,
1151
+ ggml_mul_mat(ctx0,
1152
+ model->layers[il].wvb,
1153
+ cur)),
1154
+ n_embd, N)));
1155
+
1156
+ // kv_self.k shape [n_embd * n_ctx * n_layer, 1]
1157
+ // kv_self.v shape [n_embd * n_ctx * n_layer, 1]
1158
+ // k shape [n_embd * N, 1] == kv_self.k[:,n_past:n_past+N,il,0]
1159
+ // v shape [N, n_embd, 1, 1] == kv_self.v[:,n_past:n_past+N,il,0]
1160
+
1161
+ /* {
1162
+ struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd, (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
1163
+ struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd,
1164
+ ( n_ctx)*ggml_element_size(kv_self.v),
1165
+ (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd + n_past*ggml_element_size(kv_self.v));
1166
+
1167
+ // important: storing RoPE-ed version of K in the KV cache!
1168
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
1169
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
1170
+ } //*/
1171
+
1172
+ kc = ggml_set_1d(ctx0, kc, ggml_reshape_1d(ctx0, Kcur, n_embd*N), (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
1173
+ vc = ggml_set_2d(ctx0, vc, Vcur, ( n_ctx)*ggml_element_size(kv_self.v),
1174
+ (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd + n_past*ggml_element_size(kv_self.v));
1175
+ }
1176
+
1177
+ // Qcur shape [n_embd/n_head, n_head, N, 1]
1178
+ // Q shape [n_embd/n_head, N, n_head, 1]
1179
+ struct ggml_tensor * Q =
1180
+ ggml_permute(ctx0,
1181
+ Qcur,
1182
+ 0, 2, 1, 3);
1183
+
1184
+ // kv_self.k shape [n_embd * n_ctx * n_layer, 1]
1185
+ // K shape [n_embd/n_head, n_past + N, n_head, 1]
1186
+ struct ggml_tensor * K =
1187
+ ggml_permute(ctx0,
1188
+ ggml_reshape_3d(ctx0,
1189
+ ggml_view_1d(ctx0, kc, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(kc)*n_embd),
1190
+ n_embd/n_head, n_head, n_past + N),
1191
+ 0, 2, 1, 3);
1192
+
1193
+ // K * Q
1194
+ // KQ shape [n_past + N, N, n_head, 1]
1195
+ struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
1196
+
1197
+ // KQ_scaled = KQ / sqrt(n_embd/n_head)
1198
+ // KQ_scaled shape [n_past + N, N, n_head, 1]
1199
+ struct ggml_tensor * KQ_scaled =
1200
+ ggml_scale(ctx0,
1201
+ KQ,
1202
+ ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)));
1203
+
1204
+ // KQ_masked = mask_past(KQ_scaled)
1205
+ // KQ_masked shape [n_past + N, N, n_head, 1]
1206
+ struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
1207
+
1208
+ // KQ = soft_max(KQ_masked)
1209
+ // KQ_soft_max shape [n_past + N, N, n_head, 1]
1210
+ struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
1211
+
1212
+ // split cached V into n_head heads
1213
+ //// V shape [n_past + N, n_embd/n_head, n_head, 1]
1214
+ // V shape [n_past + N, n_embd/n_head, n_head, 1] == kv_self.v[:,:(n_past+N),il,1]
1215
+ struct ggml_tensor * V =
1216
+ ggml_view_3d(ctx0, vc,
1217
+ n_past + N, n_embd/n_head, n_head,
1218
+ n_ctx*ggml_element_size(vc),
1219
+ n_ctx*ggml_element_size(vc)*n_embd/n_head,
1220
+ il*n_ctx*ggml_element_size(vc)*n_embd);
1221
+
1222
+ // KQV shape [n_embd/n_head, N, n_head, 1]
1223
+ struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
1224
+
1225
+ // KQV_merged = KQV.permute(0, 2, 1, 3)
1226
+ // KQV_merged shape [n_embd/n_head, n_head, N, 1]
1227
+ struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
1228
+ // KQV_merged shape
1229
+
1230
+ // cur = KQV_merged.contiguous().view(n_embd, N)
1231
+ // cur shape [n_embd,N,1,1]
1232
+ cur = ggml_reshape_2d(ctx0, ggml_cont(ctx0, KQV_merged), n_embd, N);
1233
+ // cur = ggml_cpy(ctx0,
1234
+ // KQV_merged,
1235
+ // ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
1236
+
1237
+ // projection (no bias)
1238
+ // cur shape [n_embd,N,1,1]
1239
+ cur = ggml_mul_mat(ctx0,
1240
+ model->layers[il].woa,
1241
+ ggml_mul_mat(ctx0,
1242
+ model->layers[il].wob,
1243
+ cur));
1244
+ }
1245
+
1246
+ // inpFF shape [n_embd,N,1,1]
1247
+ struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
1248
+
1249
+ // feed-forward network
1250
+ {
1251
+ // norm
1252
+ {
1253
+ // cur shape [n_embd,N,1,1]
1254
+ cur = ggml_rms_norm(ctx0, inpFF);
1255
+
1256
+ // cur = ffn_norm*cur
1257
+ // cur shape [n_embd,N,1,1]
1258
+ cur = ggml_mul(ctx0,
1259
+ ggml_repeat(ctx0, model->layers[il].ffn_norm, cur),
1260
+ cur);
1261
+ }
1262
+
1263
+ // tmp shape [n_ff,N,1,1]
1264
+ struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
1265
+ model->layers[il].w3,
1266
+ cur);
1267
+
1268
+ // cur shape [n_ff,N,1,1]
1269
+ cur = ggml_mul_mat(ctx0,
1270
+ model->layers[il].w1,
1271
+ cur);
1272
+
1273
+ // SILU activation
1274
+ // cur shape [n_ff,N,1,1]
1275
+ cur = ggml_silu(ctx0, cur);
1276
+
1277
+ // cur shape [n_ff,N,1,1]
1278
+ cur = ggml_mul(ctx0, cur, tmp);
1279
+
1280
+ // cur shape [n_embd,N,1,1]
1281
+ cur = ggml_mul_mat(ctx0,
1282
+ model->layers[il].w2,
1283
+ cur);
1284
+ }
1285
+
1286
+ // cur shape [n_embd,N,1,1]
1287
+ cur = ggml_add(ctx0, cur, inpFF);
1288
+
1289
+ // input for next layer
1290
+ // inpL shape [n_embd,N,1,1]
1291
+ inpL = cur;
1292
+ }
1293
+
1294
+ // norm
1295
+ {
1296
+
1297
+ // inpL shape [n_embd,N,1,1]
1298
+ inpL = ggml_rms_norm(ctx0, inpL);
1299
+
1300
+ // inpL = norm*inpL
1301
+ // inpL shape [n_embd,N,1,1]
1302
+ inpL = ggml_mul(ctx0,
1303
+ ggml_repeat(ctx0, model->norm, inpL),
1304
+ inpL);
1305
+
1306
+ //embeddings = inpL;
1307
+ }
1308
+
1309
+
1310
+ // lm_head
1311
+ // inpL shape [n_vocab,N,1,1]
1312
+ inpL = ggml_mul_mat(ctx0,
1313
+ model->outputa,
1314
+ ggml_mul_mat(ctx0,
1315
+ model->outputb,
1316
+ inpL));
1317
+
1318
+ // ggml_set_scratch(ctx0, { 0, 0, nullptr, });
1319
+ // run the computation
1320
+ ggml_build_forward_expand(gf, inpL);
1321
+
1322
+ return inpL;
1323
+ }
1324
+
1325
+ void sample_softmax(struct ggml_tensor * logits, struct ggml_tensor * probs, struct ggml_tensor * best_samples) {
1326
+ assert(logits->n_dims == 2);
1327
+ assert(probs->n_dims == 2);
1328
+ assert(best_samples->n_dims == 1);
1329
+ assert(logits->ne[1] == best_samples->ne[0]);
1330
+ assert(logits->ne[0] == probs->ne[0]);
1331
+ assert(logits->ne[1] == probs->ne[1]);
1332
+ for (int i = 0; i < logits->ne[1]; ++i) {
1333
+ float max_logit = ggml_get_f32_1d(logits, i * logits->ne[0]);
1334
+ ggml_set_i32_1d(best_samples, i, 0);
1335
+ for (int k = 0; k < logits->ne[0]; ++k) {
1336
+ float logit = ggml_get_f32_1d(logits, i * logits->ne[0] + k);
1337
+ if (logit > max_logit) {
1338
+ max_logit = logit;
1339
+ ggml_set_i32_1d(best_samples, i, k);
1340
+ }
1341
+ }
1342
+ float psum = 0;
1343
+ for (int k = 0; k < logits->ne[0]; ++k) {
1344
+ float logit = ggml_get_f32_1d(logits, i * logits->ne[0] + k);
1345
+ float p = (logit == -INFINITY) ? 0 : expf(logit - max_logit);
1346
+ psum += p;
1347
+ ggml_set_f32_1d(probs, i * probs->ne[0] + k, p);
1348
+ }
1349
+ for (int k = 0; k < logits->ne[0]; ++k) {
1350
+ float p = ggml_get_f32_1d(probs, i*probs->ne[0] + k);
1351
+ ggml_set_f32_1d(probs, i * probs->ne[0] + k, p / psum);
1352
+ }
1353
+ }
1354
+ }
1355
+
1356
+ void sample_softmax_batch(struct ggml_context * ctx, struct ggml_tensor * logits, struct ggml_tensor * probs, struct ggml_tensor * best_samples) {
1357
+ GGML_ASSERT(best_samples->n_dims == 2);
1358
+ GGML_ASSERT(logits->n_dims == 3);
1359
+ GGML_ASSERT(probs->n_dims == 3);
1360
+ int n_tokens = best_samples->ne[0];
1361
+ int n_batch = best_samples->ne[1];
1362
+ int n_vocab = logits->ne[0];
1363
+ GGML_ASSERT(n_tokens == logits->ne[1]);
1364
+ GGML_ASSERT(n_batch == logits->ne[2]);
1365
+ GGML_ASSERT(n_vocab == probs->ne[0]);
1366
+ GGML_ASSERT(n_tokens == probs->ne[1]);
1367
+ GGML_ASSERT(n_batch == probs->ne[2]);
1368
+
1369
+ for (int k = 0; k < n_batch; ++k) {
1370
+ struct ggml_tensor * best_samples_k = ggml_view_1d(ctx,
1371
+ best_samples,
1372
+ best_samples->ne[0],
1373
+ k*best_samples->nb[1]);
1374
+ struct ggml_tensor * logits_k = ggml_view_2d(ctx,
1375
+ logits,
1376
+ logits->ne[0],
1377
+ logits->ne[1],
1378
+ logits->nb[1],
1379
+ k*logits->nb[2]);
1380
+ struct ggml_tensor * probs_k = ggml_view_2d(ctx,
1381
+ probs,
1382
+ probs->ne[0],
1383
+ probs->ne[1],
1384
+ probs->nb[1],
1385
+ k*probs->nb[2]);
1386
+ sample_softmax(logits_k, probs_k, best_samples_k);
1387
+ }
1388
+ }
1389
+
1390
+ void print_row(struct ggml_tensor * probs, int i) {
1391
+ for (int k = 0; k < probs->ne[0]; ++k) {
1392
+ float p = ggml_get_f32_1d(probs, i*probs->ne[0] + k);
1393
+ printf(" %.2f", p);
1394
+ }
1395
+ printf("\n");
1396
+ }
1397
+
1398
+ void print_matrix(struct ggml_tensor * probs) {
1399
+ assert(probs->n_dims == 2);
1400
+ for (int i = 0; i < probs->ne[1]; ++i) {
1401
+ for (int k = 0; k < probs->ne[0]; ++k) {
1402
+ float p = ggml_get_f32_1d(probs, i*probs->ne[0] + k);
1403
+ printf(" %.2f", p);
1404
+ }
1405
+ printf("\n");
1406
+ }
1407
+ }
1408
+
1409
+ void print_token(int token, int n_vocab) {
1410
+ for (int k = 0; k < token; ++k) {
1411
+ printf(" ");
1412
+ }
1413
+ printf("X");
1414
+ for (int k = token+1; k < n_vocab; ++k) {
1415
+ printf(" ");
1416
+ }
1417
+ printf("\n");
1418
+ }
1419
+
1420
+ void print_tokens(struct ggml_tensor * tokens, int n_vocab) {
1421
+ for (int i=0; i<tokens->ne[0]; ++i) {
1422
+ int token = ggml_get_i32_1d(tokens, i);
1423
+ print_token(token, n_vocab);
1424
+ }
1425
+ }
1426
+
1427
+ void get_example_targets(int example_id, struct ggml_tensor * tokens_input, struct ggml_tensor * targets) {
1428
+ int n_tokens = tokens_input->ne[0];
1429
+ int n_vocab = targets->ne[0];
1430
+ float randomness = 0.0f;
1431
+ // ggml_set_zero(targets);
1432
+ ggml_set_f32(targets, -1.0f);
1433
+ ggml_set_i32_1d(tokens_input, 0, 0);
1434
+ for (int i=1; i<n_tokens+1; ++i) {
1435
+ float x = example_id + i * 3.14159f * 2.0f * 1.0f * 0.5f / n_tokens;
1436
+ float y = sinf(x);//*cosf(x*1.1f+1.0f);
1437
+ float z = (y+1.0f)*0.5f; // scale to [0..1]
1438
+ z += (frand()-0.5f)*(randomness/n_vocab);
1439
+ z = (z < 0.0f) ? 0.0f : (z > 1.0f) ? 1.0f : z; // clamp to [0..1]
1440
+ int token = std::max(1,std::min(1+(int)(z*(float)(n_vocab-1)), n_vocab-1));
1441
+ ggml_set_f32_1d(targets, (i-1)*n_vocab + token, +1.0f);
1442
+ if (i<n_tokens) {
1443
+ ggml_set_i32_1d(tokens_input, i, token);
1444
+ }
1445
+ }
1446
+ }
1447
+
1448
+ void get_example_targets_batch(struct ggml_context * ctx, int example_id, struct ggml_tensor * tokens_input, struct ggml_tensor * targets) {
1449
+ GGML_ASSERT(tokens_input->n_dims == 2);
1450
+ GGML_ASSERT( targets->n_dims == 3);
1451
+ int n_tokens = tokens_input->ne[0];
1452
+ int n_batch = tokens_input->ne[1];
1453
+ GGML_ASSERT(n_tokens == targets->ne[1]);
1454
+ GGML_ASSERT(n_batch == targets->ne[2]);
1455
+
1456
+ for (int k=0; k<n_batch; ++k) {
1457
+ struct ggml_tensor * tokens_input_k = ggml_view_1d(ctx,
1458
+ tokens_input,
1459
+ tokens_input->ne[0],
1460
+ k*tokens_input->nb[1]);
1461
+ struct ggml_tensor * targets_k = ggml_view_2d(ctx,
1462
+ targets,
1463
+ targets->ne[0],
1464
+ targets->ne[1],
1465
+ targets->nb[1],
1466
+ k*targets->nb[2]);
1467
+ get_example_targets(example_id*n_batch + k, tokens_input_k, targets_k);
1468
+ }
1469
+ }
1470
+
1471
+ void lshift_examples(struct ggml_tensor * tokens_input, struct ggml_tensor * targets, int n_shift) {
1472
+ int n_tokens = tokens_input->ne[0];
1473
+ int n_vocab = targets->ne[0];
1474
+ for (int i=0; i<n_tokens-n_shift; ++i) {
1475
+ ggml_set_i32_1d(tokens_input, i, ggml_get_i32_1d(tokens_input, i + n_shift));
1476
+ for (int k=0; k<n_vocab; ++k) {
1477
+ ggml_set_f32_1d(targets, i*n_vocab + k, ggml_get_f32_1d(targets, (i + n_shift)*n_vocab + k));
1478
+ }
1479
+ }
1480
+ }
1481
+
1482
+ struct ggml_tensor * square_error_loss(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
1483
+ // todo: instead of a-b: a[1:]-b[:-1]
1484
+ return ggml_sum(ctx, ggml_sqr(ctx, ggml_sub(ctx, a, b)));
1485
+ }
1486
+
1487
+ struct ggml_tensor * cross_entropy_loss(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
1488
+ const float eps = 1e-3f;
1489
+ return
1490
+ ggml_sum(ctx,
1491
+ ggml_neg(ctx,
1492
+ ggml_sum_rows(ctx,
1493
+ ggml_mul(ctx,
1494
+ ggml_soft_max(ctx, a),
1495
+ ggml_log(ctx,
1496
+ ggml_add1(ctx,
1497
+ ggml_soft_max(ctx, b),
1498
+ ggml_new_f32(ctx, eps)))))));
1499
+ }
1500
+
1501
+ int main(int argc, char ** argv) {
1502
+ if (argc < 1) {
1503
+ fprintf(stderr, "usage: %s\n", argv[0]);
1504
+
1505
+ return 1;
1506
+ }
1507
+
1508
+ struct ggml_init_params lcparams;
1509
+ lcparams.mem_size = 1024ll*1024ll*1024ll;
1510
+ lcparams.mem_buffer = NULL;
1511
+ lcparams.no_alloc = false;
1512
+
1513
+ struct llama_model model;
1514
+ model.hparams.n_vocab = 8;
1515
+ model.hparams.n_ctx = 8;
1516
+ model.hparams.n_embd = 32;
1517
+ model.hparams.n_mult = 2;
1518
+ model.hparams.n_head = 8;
1519
+ model.hparams.n_layer = 1;
1520
+ model.hparams.n_rot = std::min(16u, model.hparams.n_embd / model.hparams.n_head);
1521
+
1522
+ // model.hparams.n_embd = 32;
1523
+ // model.hparams.n_mult = 2;
1524
+ // model.hparams.n_head = 4;
1525
+ // model.hparams.n_layer = 8;
1526
+ // model.hparams.n_rot = 8;
1527
+
1528
+ model.ctx = ggml_init(lcparams);
1529
+ printf("init model\n");
1530
+ init_model(&model);
1531
+ set_param_model(&model);
1532
+
1533
+ randomize_model(&model, 1337, 0.0f, 1.0f, -1.0f, +1.0f);
1534
+
1535
+ /*
1536
+ struct llama_model_lora model_lora;
1537
+ // model.hparams.n_vocab = 6;
1538
+ // model.hparams.n_ctx = 64;
1539
+ // model.hparams.n_embd = 128;
1540
+ // model.hparams.n_mult = 2;
1541
+ // model.hparams.n_head = 8;
1542
+ // model.hparams.n_layer = 6;
1543
+ // model.hparams.n_rot = model.hparams.n_embd / model.hparams.n_head;
1544
+
1545
+ model_lora.hparams.n_vocab = 16;
1546
+ model_lora.hparams.n_ctx = 32;
1547
+ model_lora.hparams.n_embd = 256;
1548
+ model_lora.hparams.n_mult = 2;
1549
+ model_lora.hparams.n_head = 16;
1550
+ model_lora.hparams.n_layer = 1;
1551
+ model_lora.hparams.n_lora = 64;
1552
+ model_lora.hparams.n_rot = MIN(16, model_lora.hparams.n_embd / model_lora.hparams.n_head);
1553
+ // model.hparams.n_rot = (model.hparams.n_embd / model.hparams.n_head) / 2;
1554
+
1555
+ // model.hparams.n_embd = 32;
1556
+ // model.hparams.n_mult = 2;
1557
+ // model.hparams.n_head = 4;
1558
+ // model.hparams.n_layer = 8;
1559
+ // model.hparams.n_rot = 8;
1560
+
1561
+ model_lora.ctx = ggml_init(lcparams);
1562
+ printf("init model_lora\n");
1563
+ init_model_lora(&model_lora);
1564
+ set_param_model_lora(&model_lora);
1565
+
1566
+ randomize_model_lora(&model_lora, 1337, 0.0f, 1.0f, -1.0f, +1.0f);
1567
+ */
1568
+ int n_batch = 8;
1569
+ // key + value cache for the self attention
1570
+ struct llama_kv_cache kv_self;
1571
+ printf("init_kv_cache\n");
1572
+ kv_self.ctx = model.ctx;
1573
+ init_kv_cache(&kv_self, &model, n_batch);
1574
+ //init_kv_cache_lora(&kv_self, &model_lora);
1575
+
1576
+ size_t compute_size = 1024ll*1024ll*1024ll;
1577
+ uint8_t * compute_addr = new uint8_t[compute_size];
1578
+
1579
+ int n_examples = 256;
1580
+ int n_tokens = model.hparams.n_ctx;
1581
+ int n_vocab = model.hparams.n_vocab;
1582
+
1583
+ std::vector<uint8_t> work_buffer;
1584
+
1585
+ for (int ex=0; ex<n_examples; ++ex) {
1586
+ struct ggml_init_params params = {
1587
+ /*.mem_size =*/ compute_size,
1588
+ /*.mem_buffer =*/ compute_addr,
1589
+ /*.no_alloc =*/ false,
1590
+ };
1591
+
1592
+ struct ggml_context * ctx0 = ggml_init(params);
1593
+
1594
+ struct ggml_tensor * after_opt_best_samples = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_batch);
1595
+ struct ggml_tensor * after_opt_probs = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_vocab, n_tokens, n_batch);
1596
+ struct ggml_tensor * tokens_input = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_tokens, n_batch);
1597
+ struct ggml_tensor * targets = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_vocab, n_tokens, n_batch);
1598
+
1599
+ int n_past = 0;
1600
+
1601
+ ggml_cgraph gf = {};
1602
+
1603
+ get_example_targets_batch(ctx0, 64*ex+0, tokens_input, targets);
1604
+
1605
+ struct ggml_tensor * logits = forward_batch(&model, &kv_self, ctx0, &gf, tokens_input, n_tokens, n_past, n_batch);
1606
+ // struct ggml_tensor * e = cross_entropy_loss(ctx0, targets, logits);
1607
+ struct ggml_tensor * e = square_error_loss(ctx0, targets, logits);
1608
+
1609
+ ggml_build_forward_expand(&gf, e);
1610
+ ggml_graph_compute_helper(work_buffer, &gf, /*n_threads*/ 1);
1611
+
1612
+ float error_before_opt = ggml_get_f32_1d(e, 0);
1613
+
1614
+ struct ggml_opt_params opt_params_adam = ggml_opt_default_params(GGML_OPT_ADAM);
1615
+ struct ggml_opt_params opt_params_lbfgs = ggml_opt_default_params(GGML_OPT_LBFGS);
1616
+ opt_params_adam.print_forward_graph = false;
1617
+ opt_params_adam.print_backward_graph = false;
1618
+ opt_params_lbfgs.print_forward_graph = false;
1619
+ opt_params_lbfgs.print_backward_graph = false;
1620
+ opt_params_adam.adam.n_iter = 16;
1621
+ opt_params_lbfgs.lbfgs.n_iter = 16;
1622
+ // ggml_opt(ctx0, opt_params_adam, e);
1623
+ ggml_opt(ctx0, opt_params_lbfgs, e);
1624
+ //
1625
+ ggml_build_forward_expand(&gf, e);
1626
+ ggml_graph_compute_helper(work_buffer, &gf, /*n_threads*/ 1);
1627
+
1628
+ float error_after_opt = ggml_get_f32_1d(e, 0);
1629
+
1630
+ if (ex % 8 == 0) {
1631
+ printf("Example %d\n", (ex+1));
1632
+ printf("error_before_opt: %.2f\n", error_before_opt);
1633
+ printf("error_after_opt: %.2f\n", error_after_opt);
1634
+ }
1635
+
1636
+ if (ex % 64 == 0) {
1637
+ sample_softmax_batch(ctx0, logits, after_opt_probs, after_opt_best_samples);
1638
+ // printf("probabilities after optimization:\n");
1639
+ // print_matrix(after_opt_probs);
1640
+ printf("best samples after optimization:\n");
1641
+ print_tokens(after_opt_best_samples, n_vocab);
1642
+ }
1643
+
1644
+ ggml_free(ctx0);
1645
+ }
1646
+
1647
+ {
1648
+ int n_gen = 128;
1649
+ int sample_ctx = n_tokens-n_tokens/8;
1650
+
1651
+ printf("Generating %d tokens.\n", n_gen);
1652
+
1653
+ struct ggml_tensor * tokens_input = ggml_new_tensor_1d(model.ctx, GGML_TYPE_I32, n_tokens);
1654
+ struct ggml_tensor * targets = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, n_vocab, n_tokens);
1655
+
1656
+ get_example_targets(137, tokens_input, targets);
1657
+ for (int i=sample_ctx; i<n_tokens; ++i) {
1658
+ ggml_set_i32_1d(tokens_input, i, n_vocab/2);
1659
+ }
1660
+
1661
+ for (int i=0; i<sample_ctx-1; ++i) {
1662
+ print_token(ggml_get_i32_1d(tokens_input, i), n_vocab);
1663
+ }
1664
+ printf("---\n");
1665
+ for (int i=0; i<n_gen; ++i) {
1666
+ struct ggml_init_params params = {
1667
+ /*.mem_size =*/ compute_size,
1668
+ /*.mem_buffer =*/ compute_addr,
1669
+ /*.no_alloc =*/ false,
1670
+ };
1671
+ struct ggml_context * ctx0 = ggml_init(params);
1672
+
1673
+ ggml_cgraph gf = {};
1674
+
1675
+ int n_past = 0;
1676
+ struct ggml_tensor * logits = forward(&model, &kv_self, ctx0, &gf, tokens_input, sample_ctx, n_past);
1677
+
1678
+ ggml_build_forward_expand(&gf, logits);
1679
+ ggml_graph_compute_helper(work_buffer, &gf, /*n_threads*/ 1);
1680
+
1681
+ struct ggml_tensor * best_samples = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, sample_ctx);
1682
+ struct ggml_tensor * probs = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_vocab, sample_ctx);
1683
+
1684
+ sample_softmax(logits, probs, best_samples);
1685
+
1686
+ // int sample_at = n_tokens-1;
1687
+ int token = ggml_get_i32_1d(best_samples, sample_ctx-1);
1688
+
1689
+ // print_row(probs, sample_at);
1690
+ print_token(token, n_vocab);
1691
+
1692
+ lshift_examples(tokens_input, targets, 1);
1693
+ ggml_set_i32_1d(tokens_input, 0, 0);
1694
+ ggml_set_i32_1d(tokens_input, sample_ctx-1, token);
1695
+
1696
+ ggml_free(ctx0);
1697
+ }
1698
+ }
1699
+
1700
+ print_matrix(model.tok_embeddings);
1701
+ printf("done\n");
1702
+
1703
+ // ggml_free(kv_self.ctx);
1704
+ // ggml_free(model_lora.ctx);
1705
+ ggml_free(model.ctx);
1706
+
1707
+ return 0;
1708
+ }
examples/benchmark/CMakeLists.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ set(TARGET benchmark)
2
+ add_executable(${TARGET} benchmark-matmult.cpp)
3
+ target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
4
+ target_compile_features(${TARGET} PRIVATE cxx_std_11)
5
+ if(TARGET BUILD_INFO)
6
+ add_dependencies(${TARGET} BUILD_INFO)
7
+ endif()
examples/benchmark/benchmark-matmult.cpp ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "ggml.h"
2
+ #include "build-info.h"
3
+
4
+ #include <locale.h>
5
+ #include <assert.h>
6
+ #include <math.h>
7
+ #include <cstring>
8
+ #include <cstdio>
9
+ #include <cinttypes>
10
+ #include <unordered_map>
11
+ #include <queue>
12
+ #include <string.h>
13
+ #include <cassert>
14
+ #include <fstream>
15
+ #include <string>
16
+ #include <iterator>
17
+ #include <algorithm>
18
+
19
+ #if defined(_MSC_VER)
20
+ #pragma warning(disable: 4244 4267) // possible loss of data
21
+ #endif
22
+
23
+ void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
24
+ struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
25
+
26
+ if (plan.work_size > 0) {
27
+ buf.resize(plan.work_size);
28
+ plan.work_data = buf.data();
29
+ }
30
+
31
+ ggml_graph_compute(graph, &plan);
32
+ }
33
+
34
+ float tensor_sum_elements(const ggml_tensor * tensor) {
35
+ float sum = 0;
36
+ if (tensor->type==GGML_TYPE_F32) {
37
+ for (int j = 0; j < tensor->ne[1]; j++) {
38
+ for (int k = 0; k < tensor->ne[0]; k++) {
39
+ sum += ((float *) tensor->data)[j*tensor->ne[0]+k];
40
+ }
41
+ }
42
+ }
43
+ return sum;
44
+ }
45
+
46
+ void tensor_dump(const ggml_tensor * tensor, const char * name) {
47
+ printf("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi) - ", name,
48
+ tensor->type, ggml_type_name(tensor->type),
49
+ tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->nb[0], tensor->nb[1], tensor->nb[2]);
50
+ float sum = tensor_sum_elements(tensor);
51
+ printf("Sum of tensor %s is %6.2f\n", name, sum);
52
+ }
53
+
54
+ #define TENSOR_DUMP(tensor) tensor_dump(tensor, #tensor)
55
+
56
+ struct benchmark_params_struct {
57
+ int32_t n_threads = 1;
58
+ int32_t n_iterations = 10;
59
+ };
60
+
61
+ void print_usage(int /*argc*/, char ** argv, struct benchmark_params_struct params) {
62
+ fprintf(stderr, "usage: %s [options]\n", argv[0]);
63
+ fprintf(stderr, "\n");
64
+ fprintf(stderr, "options:\n");
65
+ fprintf(stderr, " -h, --help show this help message and exit\n");
66
+ fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
67
+ fprintf(stderr, " -i N, --iter N number of iterations to use during computation (default: %d)\n", params.n_iterations);
68
+ fprintf(stderr, "\n");
69
+ }
70
+
71
+ int main(int argc, char ** argv) {
72
+ struct benchmark_params_struct benchmark_params;
73
+
74
+ bool invalid_param = false;
75
+ std::string arg;
76
+ for (int i = 1; i < argc; i++) {
77
+ arg = argv[i];
78
+
79
+ if (arg == "-t" || arg == "--threads") {
80
+ if (++i >= argc) {
81
+ invalid_param = true;
82
+ break;
83
+ }
84
+ benchmark_params.n_threads = std::stoi(argv[i]);
85
+ } else if (arg == "-i" || arg == "--iter") {
86
+ if (++i >= argc) {
87
+ invalid_param = true;
88
+ break;
89
+ }
90
+ benchmark_params.n_iterations = std::stoi(argv[i]);
91
+ } else if (arg == "-h" || arg == "--help") {
92
+ print_usage(argc, argv, benchmark_params);
93
+ exit(0);
94
+ }
95
+ }
96
+ if (invalid_param) {
97
+ fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
98
+ print_usage(argc, argv, benchmark_params);
99
+ exit(1);
100
+ }
101
+
102
+ fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
103
+ printf("Starting Test\n");
104
+
105
+ // create the ggml context
106
+ struct ggml_context * ctx;
107
+ //const int sizex = 4096;
108
+ //const int sizey = 11008;
109
+
110
+ #undef VERBOSE_DEBUGGING
111
+ #ifndef VERBOSE_DEBUGGING
112
+ const int sizey = 4096;
113
+ const int sizex = 11008;
114
+ const int sizez = 128;
115
+ #else
116
+ /* Working - let's increase size */
117
+ const int sizey = 1;
118
+ const int sizex = (8*32);
119
+ const int sizez = 1;
120
+
121
+ /*const int sizey = 1;
122
+ const int sizex = 3*(8*32);
123
+ const int sizez = 1;*/
124
+ #endif
125
+
126
+ //printf("Memsize required = %i\n", sizex*sizex);
127
+
128
+ size_t ctx_size = 0;
129
+ ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32);
130
+ ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32);
131
+ ctx_size += sizex*sizez*ggml_type_sizef(GGML_TYPE_F32);
132
+ ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_Q4_0);
133
+ ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_Q4_0);
134
+ ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS
135
+ ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS
136
+ ctx_size += 1024*1024*16;
137
+
138
+ printf("Allocating Memory of size %zi bytes, %zi MB\n",ctx_size, (ctx_size/1024/1024));
139
+
140
+ struct ggml_init_params params = {
141
+ /*.mem_size =*/ ctx_size,
142
+ /*.mem_buffer =*/ NULL,
143
+ /* no_alloc =*/ 0
144
+ };
145
+
146
+ ctx = ggml_init(params);
147
+ if (!ctx) {
148
+ fprintf(stderr, "%s: ggml_init() failed\n", __func__);
149
+ return 1;
150
+ }
151
+
152
+
153
+ printf("Creating new tensors\n");
154
+ // printf("Creating new tensor m1\n");
155
+ struct ggml_tensor * m11 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, sizex, sizey);
156
+ ggml_set_f32(m11, 1.0f);
157
+
158
+ // printf("Creating new tensor m1\n");
159
+ struct ggml_tensor * m12 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, sizex, sizey);
160
+ ggml_set_f32(m12, 1.5f);
161
+
162
+ // printf("Creating new tensor m2\n");
163
+ struct ggml_tensor * m2 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, sizex, sizez);
164
+ ggml_set_f32(m2, 2.0f);
165
+
166
+ printf("\n------ Test 1 - Matrix Mult via F32 code ------------------------------------------------------------------------------\n");
167
+ // printf("Creating new tensor m11xm2\n");
168
+ struct ggml_tensor * m11xm2 = ggml_mul_mat(ctx, m11, m2);
169
+
170
+ // printf("Creating compute graph\n");
171
+ struct ggml_cgraph gf = ggml_build_forward(m11xm2);
172
+
173
+ printf("n_threads=%i\n", benchmark_params.n_threads);
174
+
175
+ TENSOR_DUMP(m11);
176
+ TENSOR_DUMP(m2);
177
+
178
+ std::vector<uint8_t> work_buffer;
179
+
180
+ ggml_graph_compute_helper(work_buffer, &gf, benchmark_params.n_threads);
181
+
182
+ TENSOR_DUMP(gf.nodes[0]);
183
+
184
+ printf("\n------ Test 2 - Matrix Mult via Q4_0 code ------------------------------------------------------------------------------\n");
185
+
186
+ int32_t nelements = sizex*sizey;
187
+ int32_t ne[2] = { sizex, sizey };
188
+
189
+ std::vector<int64_t> hist_cur(1 << 4, 0);
190
+
191
+ // Set up a the benchmark matrices
192
+ // printf("Creating new tensor q11 & Running quantize\n");
193
+ struct ggml_tensor * q11 = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, sizex, sizey);
194
+ ggml_quantize_q4_0((const float *) m11->data, q11->data, nelements, ne[0], hist_cur.data());
195
+
196
+ // Set up a the compute graph
197
+ // printf("Creating new tensor q31\n");
198
+ struct ggml_tensor * q31 = ggml_mul_mat(ctx, q11, m2);
199
+
200
+ // printf("Creating compute graph\n");
201
+ struct ggml_cgraph gf31 = ggml_build_forward(q31);
202
+
203
+ // Set up a second graph computation to make sure we override the CPU cache lines
204
+ // printf("Creating new tensor q12 & Running quantize\n");
205
+ struct ggml_tensor * q12 = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, sizex, sizey);
206
+ ggml_quantize_q4_0((const float *) m12->data, q12->data, nelements, ne[0], hist_cur.data());
207
+
208
+ // printf("Creating new tensor q32\n");
209
+ struct ggml_tensor * q32 = ggml_mul_mat(ctx, q12, m2);
210
+
211
+ //printf("Creating compute graph\n");
212
+ struct ggml_cgraph gf32 = ggml_build_forward(q32);
213
+ printf("n_threads=%i\n", benchmark_params.n_threads);
214
+
215
+ const int dimx = sizex;
216
+ const int dimy = sizey;
217
+ const int dimz = sizez;
218
+ long long int flops_per_dot_product = dimy + dimy;
219
+ long long int flops_per_matrix = flops_per_dot_product * dimx * dimz; ;
220
+ printf("Matrix Multiplication of (%i,%i,%i) x (%i,%i,%i) - about %6.2f gFLOPS\n\n", sizex, sizey, 1, sizex, sizez, 1, 1.0f*flops_per_matrix / 1000 / 1000 / 1000);
221
+
222
+
223
+ // Let's use the F32 result from above as a reference for the q4_0 multiplication
224
+ float sum_of_F32_reference = tensor_sum_elements(gf.nodes[0]);
225
+
226
+ printf("Iteration;NThreads; SizeX; SizeY; SizeZ; Required_FLOPS; Elapsed_u_Seconds; gigaFLOPS\n");
227
+ printf("=====================================================================================\n");
228
+
229
+ double gflops_sum = 0;
230
+ for (int i=0;i<benchmark_params.n_iterations ;i++) {
231
+
232
+ long long int start = ggml_time_us();
233
+ //printf("Running ggml_graph_compute\n");
234
+ ggml_graph_compute_helper(work_buffer, &gf31, benchmark_params.n_threads);
235
+
236
+ long long int stop = ggml_time_us();
237
+ long long int usec = stop-start;
238
+ double gflops = (double)(flops_per_matrix)/usec/1000.0;
239
+ gflops_sum += gflops;
240
+ printf("%9i;%8i;%6i;%6i;%6i;%15lli;%18lli;%10.2f\n",
241
+ i,
242
+ benchmark_params.n_threads,
243
+ sizex, sizey, sizez, flops_per_matrix,
244
+ usec,gflops);
245
+
246
+ #ifdef VERBOSE_DEBUGGING
247
+ TENSOR_DUMP("res",gf31.nodes[0])
248
+ #endif
249
+
250
+ // Check that the matrix multiplication result is in the right ballpark
251
+ // We cannot use the exact value from the F32 multiplication because the quantizuation will be slightly different
252
+ float sum_of_Q4_result = tensor_sum_elements(gf31.nodes[0]);
253
+ float delta = abs(sum_of_Q4_result - sum_of_F32_reference);
254
+ float allowed_delta = (sum_of_F32_reference) / 1000 / 1000; // Let's accept an epsilon of 10^-6
255
+
256
+ if (delta > allowed_delta) {
257
+ printf("\nABORT - ERROR in Matrix Multiplication result - expected %6.2f, got %6.2f (delta %6.2f > allowed_delta %6.2f)\n",
258
+ sum_of_F32_reference,
259
+ sum_of_Q4_result,
260
+ delta,
261
+ allowed_delta
262
+ );
263
+ exit(0);
264
+ }
265
+
266
+ // Running a different graph computation to make sure we override the CPU cache lines
267
+ ggml_graph_compute_helper(work_buffer, &gf32, benchmark_params.n_threads);
268
+ }
269
+ printf("\n");
270
+ printf("Average%78.2f\n",gflops_sum/((double)benchmark_params.n_iterations));
271
+ printf("=====================================================================================\n");
272
+ }
examples/chat-13B.bat ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @setlocal disabledelayedexpansion enableextensions
2
+ @echo off
3
+
4
+ cd /d "%~dp0.."
5
+ if not "%errorlevel%"=="0" (
6
+ echo Unable to change directory.
7
+ pause
8
+ exit /b 1
9
+ )
10
+
11
+ if not defined MODEL set "MODEL=models\13B\ggml-model-q4_0.bin"
12
+ if not defined USER_NAME set "USER_NAME=User"
13
+ if not defined AI_NAME set "AI_NAME=ChatLLaMa"
14
+ rem Adjust to the number of CPU cores you want to use.
15
+ rem if not defined N_THREAD set "N_THREAD=8"
16
+ rem Number of tokens to predict (made it larger than default because we want a long interaction)
17
+ if not defined N_PREDICTS set "N_PREDICTS=2048"
18
+ if not defined GEN_OPTIONS set "GEN_OPTIONS=--ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647"
19
+
20
+ rem Default main script paths
21
+ set "DEFAULT_MAIN_SCRIPT_PATHS=main.exe build\bin\main.exe"
22
+
23
+ rem Get main script path from command line arguments
24
+ set "MAIN_SCRIPT_PATH=%~1"
25
+
26
+ rem If the main script path was not specified, try the default paths
27
+ if not defined MAIN_SCRIPT_PATH (
28
+ for %%i in (%DEFAULT_MAIN_SCRIPT_PATHS%) do (
29
+ if exist "%%i" set "MAIN_SCRIPT_PATH=%%i"
30
+ )
31
+ )
32
+
33
+ rem If the main script path was not found, tell the user how to specify it
34
+ if not defined MAIN_SCRIPT_PATH (
35
+ echo The main script could not be found. Please provide the path to the main script as 1st argument to this script, or place the main script in one of the default locations:
36
+ echo %DEFAULT_MAIN_SCRIPT_PATHS%
37
+ pause
38
+ exit /b 1
39
+ )
40
+
41
+ rem Default context, feel free to edit it
42
+ set "PROMPT_TEXT=Text transcript of a never ending dialog, where %USER_NAME% interacts with an AI assistant named %AI_NAME%. %AI_NAME% is helpful, kind, honest, friendly, good at writing and never fails to answer %USER_NAME%'s requests immediately and with details and precision. There are no annotations like (30 seconds passed...) or (to himself), just what %USER_NAME% and %AI_NAME% say aloud to each other. The dialog lasts for years, the entirety of it is shared below. It's 10000 pages long. The transcript only includes text, it does not include markup like HTML and Markdown."
43
+
44
+ rem Set a temporary variable if N_THREAD is set
45
+ if defined N_THREAD (
46
+ set "_N_THREAD=--threads %N_THREAD%"
47
+ ) else (
48
+ set "_N_THREAD="
49
+ )
50
+
51
+ rem Run the script
52
+ echo "%MAIN_SCRIPT_PATH%" %GEN_OPTIONS% %_N_THREAD% ^
53
+ --model "%MODEL%" ^
54
+ --n_predict %N_PREDICTS% ^
55
+ --color --interactive ^
56
+ --reverse-prompt "%USER_NAME%:" ^
57
+ --prompt "%PROMPT_TEXT%"
examples/chat-13B.sh ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -e
4
+
5
+ cd "$(dirname "$0")/.." || exit
6
+
7
+ MODEL="${MODEL:-./models/13B/ggml-model-q4_0.bin}"
8
+ PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat.txt}
9
+ USER_NAME="${USER_NAME:-USER}"
10
+ AI_NAME="${AI_NAME:-ChatLLaMa}"
11
+
12
+ # Adjust to the number of CPU cores you want to use.
13
+ N_THREAD="${N_THREAD:-8}"
14
+ # Number of tokens to predict (made it larger than default because we want a long interaction)
15
+ N_PREDICTS="${N_PREDICTS:-2048}"
16
+
17
+ # Note: you can also override the generation options by specifying them on the command line:
18
+ # For example, override the context size by doing: ./chatLLaMa --ctx_size 1024
19
+ GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647}"
20
+
21
+ DATE_TIME=$(date +%H:%M)
22
+ DATE_YEAR=$(date +%Y)
23
+
24
+ PROMPT_FILE=$(mktemp -t llamacpp_prompt.XXXXXXX.txt)
25
+
26
+ sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \
27
+ -e "s/\[\[AI_NAME\]\]/$AI_NAME/g" \
28
+ -e "s/\[\[DATE_TIME\]\]/$DATE_TIME/g" \
29
+ -e "s/\[\[DATE_YEAR\]\]/$DATE_YEAR/g" \
30
+ $PROMPT_TEMPLATE > $PROMPT_FILE
31
+
32
+ # shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS
33
+ ./main $GEN_OPTIONS \
34
+ --model "$MODEL" \
35
+ --threads "$N_THREAD" \
36
+ --n_predict "$N_PREDICTS" \
37
+ --color --interactive \
38
+ --file ${PROMPT_FILE} \
39
+ --reverse-prompt "${USER_NAME}:" \
40
+ --in-prefix ' ' \
41
+ "$@"
examples/chat-persistent.sh ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -euo pipefail
4
+
5
+ cd "$(dirname "$0")/.." || exit
6
+
7
+ if [[ -z "${PROMPT_CACHE_FILE+x}" || -z "${CHAT_SAVE_DIR+x}" ]]; then
8
+ echo >&2 "error: PROMPT_CACHE_FILE and CHAT_SAVE_DIR must be provided"
9
+ exit 1
10
+ fi
11
+
12
+ MODEL="${MODEL:-./models/13B/ggml-model-q4_0.bin}"
13
+ PROMPT_TEMPLATE="${PROMPT_TEMPLATE:-./prompts/chat.txt}"
14
+ USER_NAME="${USER_NAME:-User}"
15
+ AI_NAME="${AI_NAME:-ChatLLaMa}"
16
+ DATE_TIME="$(date +%H:%M)"
17
+ DATE_YEAR="$(date +%Y)"
18
+
19
+ LOG="${CHAT_SAVE_DIR}/main.log"
20
+ LOG_BG="${CHAT_SAVE_DIR}/main-bg.log"
21
+ CUR_PROMPT_FILE="${CHAT_SAVE_DIR}/current-prompt.txt"
22
+ CUR_PROMPT_CACHE="${CHAT_SAVE_DIR}/current-cache.bin"
23
+ NEXT_PROMPT_FILE="${CHAT_SAVE_DIR}/next-prompt.txt"
24
+ NEXT_PROMPT_CACHE="${CHAT_SAVE_DIR}/next-cache.bin"
25
+
26
+ SESSION_SIZE_MSG_PATTERN='main: session file matches [[:digit:]]+ / [[:digit:]]+'
27
+ SAMPLE_TIME_MSG_PATTERN='sample time =[[:space:]]+[[:digit:]]+.[[:digit:]]+ ms /[[:space:]]+[[:digit:]]+'
28
+ SED_DELETE_MESSAGES="/^(${USER_NAME}:|${AI_NAME}:|\\.\\.\\.)/,\$d"
29
+
30
+ CTX_SIZE=2048
31
+ CTX_ROTATE_POINT=$((CTX_SIZE * 3 / 5)) # REVIEW
32
+ OPTS=(--model "$MODEL" --ctx_size "$CTX_SIZE" --repeat_last_n 256 "$@")
33
+
34
+ # An unbuffered `tail -c+N`
35
+ skip_bytes() {
36
+ LANG=C IFS= read -r -n "$1" -d '' c
37
+ while LANG=C IFS= read -r -n 1 -d '' c; do
38
+ printf '%s' "$c"
39
+ done
40
+ }
41
+
42
+ mkdir -p "$CHAT_SAVE_DIR"
43
+ echo >"$LOG"
44
+ trap "tail -n100 ${LOG}" EXIT
45
+
46
+ if [[ ! -e "$CUR_PROMPT_FILE" ]]; then
47
+ sed -e "s/\[\[USER_NAME\]\]/${USER_NAME}/g" \
48
+ -e "s/\[\[AI_NAME\]\]/${AI_NAME}/g" \
49
+ -e "s/\[\[DATE_TIME\]\]/${DATE_TIME}/g" \
50
+ -e "s/\[\[DATE_YEAR\]\]/${DATE_YEAR}/g" \
51
+ "$PROMPT_TEMPLATE" >"$CUR_PROMPT_FILE"
52
+ fi
53
+
54
+ if [[ ! -e "$NEXT_PROMPT_FILE" ]]; then
55
+ sed -r "$SED_DELETE_MESSAGES" "$CUR_PROMPT_FILE" >"$NEXT_PROMPT_FILE"
56
+ fi
57
+
58
+ if [[ "$(tail -c4 "$NEXT_PROMPT_FILE")" != "..." ]]; then
59
+ echo '...' >>"$NEXT_PROMPT_FILE"
60
+ fi
61
+
62
+ if [[ ! -e "$PROMPT_CACHE_FILE" ]]; then
63
+ echo 'Prompt cache does not exist, building...'
64
+ # Default batch_size to 8 here for better user feedback during initial prompt processing
65
+ ./main 2>>"$LOG" \
66
+ --batch_size 8 \
67
+ "${OPTS[@]}" \
68
+ --prompt-cache "$PROMPT_CACHE_FILE" \
69
+ --file "$CUR_PROMPT_FILE" \
70
+ --n_predict 1
71
+ echo
72
+ echo 'Done!'
73
+ fi
74
+
75
+ if [[ ! -e "$CUR_PROMPT_CACHE" ]]; then
76
+ cp "$PROMPT_CACHE_FILE" "$CUR_PROMPT_CACHE"
77
+ fi
78
+ if [[ ! -e "$NEXT_PROMPT_CACHE" ]]; then
79
+ cp "$PROMPT_CACHE_FILE" "$NEXT_PROMPT_CACHE"
80
+ fi
81
+
82
+ printf '%s ' "$(< "$CUR_PROMPT_FILE")"
83
+ n_tokens=0
84
+
85
+ while read -e line; do
86
+ # Limit generation to remaining context, with a buffer and estimating 2 chars/token for input
87
+ n_predict=$((CTX_SIZE - n_tokens - ${#line} / 2 - 32))
88
+
89
+ # Swap prompts when we're about to run out of context
90
+ if ((n_predict <= 0)); then
91
+ wait # for background main (below) to finish with next prompt
92
+ mv "$NEXT_PROMPT_FILE" "$CUR_PROMPT_FILE"
93
+ mv "$NEXT_PROMPT_CACHE" "$CUR_PROMPT_CACHE"
94
+
95
+ sed -r "$SED_DELETE_MESSAGES" "$CUR_PROMPT_FILE" >"$NEXT_PROMPT_FILE"
96
+ echo '...' >>"$NEXT_PROMPT_FILE"
97
+ cp "$PROMPT_CACHE_FILE" "$NEXT_PROMPT_CACHE"
98
+
99
+ n_tokens=0
100
+ n_predict=$((CTX_SIZE / 2))
101
+ fi
102
+
103
+ echo " ${line}" >>"$CUR_PROMPT_FILE"
104
+ if ((n_tokens > CTX_ROTATE_POINT)); then
105
+ echo " ${line}" >>"$NEXT_PROMPT_FILE"
106
+ fi
107
+
108
+ n_prompt_len_pre=$(($(wc -c <"$CUR_PROMPT_FILE")))
109
+
110
+ printf '%s: ' "$AI_NAME" >>"$CUR_PROMPT_FILE"
111
+
112
+ ./main 2>>"$LOG" "${OPTS[@]}" \
113
+ --prompt-cache "$CUR_PROMPT_CACHE" \
114
+ --prompt-cache-all \
115
+ --file "$CUR_PROMPT_FILE" \
116
+ --reverse-prompt "${USER_NAME}:" \
117
+ --n_predict "$n_predict" |
118
+ skip_bytes 1 | # skip BOS token added by ./main
119
+ tee "$CUR_PROMPT_FILE.tmp" | # save prompt + generation to tmp file
120
+ skip_bytes "$n_prompt_len_pre" # print generation
121
+
122
+ mv "$CUR_PROMPT_FILE.tmp" "$CUR_PROMPT_FILE"
123
+
124
+ # if we hit n_predict instead of reverse-prompt, we need to add the prompt
125
+ if [[ "$(tail -n1 "$CUR_PROMPT_FILE")" != "${USER_NAME}:" ]]; then
126
+ printf '\n%s:' "$USER_NAME"
127
+ printf '\n%s:' "$USER_NAME" >> "$CUR_PROMPT_FILE"
128
+ fi
129
+
130
+ printf ' '
131
+
132
+ # HACK get num tokens from debug message
133
+ # TODO get both messages in one go
134
+ if ! session_size_msg="$(tail -n30 "$LOG" | grep -oE "$SESSION_SIZE_MSG_PATTERN")" ||
135
+ ! sample_time_msg="$( tail -n10 "$LOG" | grep -oE "$SAMPLE_TIME_MSG_PATTERN")"; then
136
+ echo >&2 "Couldn't get number of tokens from ./main output!"
137
+ exit 1
138
+ fi
139
+
140
+ n_tokens=$(($(cut -d/ -f2 <<<"$session_size_msg") + $(cut -d/ -f2 <<<"$sample_time_msg")))
141
+
142
+ if ((n_tokens > CTX_ROTATE_POINT)); then
143
+ tail -c+$((n_prompt_len_pre + 1)) "$CUR_PROMPT_FILE" >>"$NEXT_PROMPT_FILE"
144
+ fi
145
+
146
+ # Update cache for next prompt in background, ideally during user input
147
+ ./main >>"$LOG_BG" 2>&1 "${OPTS[@]}" \
148
+ --prompt-cache "$NEXT_PROMPT_CACHE" \
149
+ --file "$NEXT_PROMPT_FILE" \
150
+ --n_predict 1 &
151
+ done
examples/chat-vicuna.sh ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -e
4
+
5
+ cd "$(dirname "$0")/.." || exit
6
+
7
+ MODEL="${MODEL:-./models/ggml-vic13b-uncensored-q5_0.bin}"
8
+ PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat.txt}
9
+ USER_NAME="### Human"
10
+ AI_NAME="### Assistant"
11
+
12
+ # Adjust to the number of CPU cores you want to use.
13
+ N_THREAD="${N_THREAD:-8}"
14
+ # Number of tokens to predict (made it larger than default because we want a long interaction)
15
+ N_PREDICTS="${N_PREDICTS:-2048}"
16
+
17
+ # Note: you can also override the generation options by specifying them on the command line:
18
+ # For example, override the context size by doing: ./chatLLaMa --ctx_size 1024
19
+ GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647}"
20
+
21
+ DATE_TIME=$(date +%H:%M)
22
+ DATE_YEAR=$(date +%Y)
23
+
24
+ PROMPT_FILE=$(mktemp -t llamacpp_prompt.XXXXXXX.txt)
25
+
26
+ sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \
27
+ -e "s/\[\[AI_NAME\]\]/$AI_NAME/g" \
28
+ -e "s/\[\[DATE_TIME\]\]/$DATE_TIME/g" \
29
+ -e "s/\[\[DATE_YEAR\]\]/$DATE_YEAR/g" \
30
+ $PROMPT_TEMPLATE > $PROMPT_FILE
31
+
32
+ # shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS
33
+ ./bin/main $GEN_OPTIONS \
34
+ --model "$MODEL" \
35
+ --threads "$N_THREAD" \
36
+ --n_predict "$N_PREDICTS" \
37
+ --color --interactive \
38
+ --file ${PROMPT_FILE} \
39
+ --reverse-prompt "### Human:" \
40
+ --in-prefix ' ' \
41
+ "$@"
examples/chat.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #
4
+ # Temporary script - will be removed in the future
5
+ #
6
+
7
+ cd `dirname $0`
8
+ cd ..
9
+
10
+ # Important:
11
+ #
12
+ # "--keep 48" is based on the contents of prompts/chat-with-bob.txt
13
+ #
14
+ ./main -m ./models/7B/ggml-model-q4_0.bin -c 512 -b 1024 -n 256 --keep 48 \
15
+ --repeat_penalty 1.0 --color -i \
16
+ -r "User:" -f prompts/chat-with-bob.txt
examples/common.cpp ADDED
@@ -0,0 +1,982 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "common.h"
2
+
3
+ #include <cassert>
4
+ #include <iostream>
5
+ #include <cstring>
6
+ #include <fstream>
7
+ #include <string>
8
+ #include <iterator>
9
+ #include <algorithm>
10
+ #include <sstream>
11
+ #include <unordered_set>
12
+ #include <regex>
13
+
14
+ #if defined(__APPLE__) && defined(__MACH__)
15
+ #include <sys/types.h>
16
+ #include <sys/sysctl.h>
17
+ #endif
18
+
19
+ #if defined(_WIN32)
20
+ #define WIN32_LEAN_AND_MEAN
21
+ #define NOMINMAX
22
+ #include <windows.h>
23
+ #include <fcntl.h>
24
+ #include <io.h>
25
+ #else
26
+ #include <sys/ioctl.h>
27
+ #include <unistd.h>
28
+ #include <wchar.h>
29
+ #endif
30
+
31
+ #if defined(_MSC_VER)
32
+ #pragma warning(disable: 4244 4267) // possible loss of data
33
+ #endif
34
+
35
+ int32_t get_num_physical_cores() {
36
+ #ifdef __linux__
37
+ // enumerate the set of thread siblings, num entries is num cores
38
+ std::unordered_set<std::string> siblings;
39
+ for (uint32_t cpu=0; cpu < UINT32_MAX; ++cpu) {
40
+ std::ifstream thread_siblings("/sys/devices/system/cpu"
41
+ + std::to_string(cpu) + "/topology/thread_siblings");
42
+ if (!thread_siblings.is_open()) {
43
+ break; // no more cpus
44
+ }
45
+ std::string line;
46
+ if (std::getline(thread_siblings, line)) {
47
+ siblings.insert(line);
48
+ }
49
+ }
50
+ if (siblings.size() > 0) {
51
+ return static_cast<int32_t>(siblings.size());
52
+ }
53
+ #elif defined(__APPLE__) && defined(__MACH__)
54
+ int32_t num_physical_cores;
55
+ size_t len = sizeof(num_physical_cores);
56
+ int result = sysctlbyname("hw.perflevel0.physicalcpu", &num_physical_cores, &len, NULL, 0);
57
+ if (result == 0) {
58
+ return num_physical_cores;
59
+ }
60
+ result = sysctlbyname("hw.physicalcpu", &num_physical_cores, &len, NULL, 0);
61
+ if (result == 0) {
62
+ return num_physical_cores;
63
+ }
64
+ #elif defined(_WIN32)
65
+ //TODO: Implement
66
+ #endif
67
+ unsigned int n_threads = std::thread::hardware_concurrency();
68
+ return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4;
69
+ }
70
+
71
+ void process_escapes(std::string& input) {
72
+ std::size_t input_len = input.length();
73
+ std::size_t output_idx = 0;
74
+
75
+ for (std::size_t input_idx = 0; input_idx < input_len; ++input_idx) {
76
+ if (input[input_idx] == '\\' && input_idx + 1 < input_len) {
77
+ switch (input[++input_idx]) {
78
+ case 'n': input[output_idx++] = '\n'; break;
79
+ case 'r': input[output_idx++] = '\r'; break;
80
+ case 't': input[output_idx++] = '\t'; break;
81
+ case '\'': input[output_idx++] = '\''; break;
82
+ case '\"': input[output_idx++] = '\"'; break;
83
+ case '\\': input[output_idx++] = '\\'; break;
84
+ default: input[output_idx++] = '\\';
85
+ input[output_idx++] = input[input_idx]; break;
86
+ }
87
+ } else {
88
+ input[output_idx++] = input[input_idx];
89
+ }
90
+ }
91
+
92
+ input.resize(output_idx);
93
+ }
94
+
95
+ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
96
+ bool invalid_param = false;
97
+ bool escape_prompt = false;
98
+ std::string arg;
99
+ gpt_params default_params;
100
+ const std::string arg_prefix = "--";
101
+
102
+ for (int i = 1; i < argc; i++) {
103
+ arg = argv[i];
104
+ if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
105
+ std::replace(arg.begin(), arg.end(), '_', '-');
106
+ }
107
+
108
+ if (arg == "-s" || arg == "--seed") {
109
+ if (++i >= argc) {
110
+ invalid_param = true;
111
+ break;
112
+ }
113
+ params.seed = std::stoul(argv[i]);
114
+ } else if (arg == "-t" || arg == "--threads") {
115
+ if (++i >= argc) {
116
+ invalid_param = true;
117
+ break;
118
+ }
119
+ params.n_threads = std::stoi(argv[i]);
120
+ } else if (arg == "-p" || arg == "--prompt") {
121
+ if (++i >= argc) {
122
+ invalid_param = true;
123
+ break;
124
+ }
125
+ params.prompt = argv[i];
126
+ } else if (arg == "-e") {
127
+ escape_prompt = true;
128
+ } else if (arg == "--prompt-cache") {
129
+ if (++i >= argc) {
130
+ invalid_param = true;
131
+ break;
132
+ }
133
+ params.path_prompt_cache = argv[i];
134
+ } else if (arg == "--prompt-cache-all") {
135
+ params.prompt_cache_all = true;
136
+ } else if (arg == "--prompt-cache-ro") {
137
+ params.prompt_cache_ro = true;
138
+ } else if (arg == "-f" || arg == "--file") {
139
+ if (++i >= argc) {
140
+ invalid_param = true;
141
+ break;
142
+ }
143
+ std::ifstream file(argv[i]);
144
+ if (!file) {
145
+ fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
146
+ invalid_param = true;
147
+ break;
148
+ }
149
+ std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.prompt));
150
+ if (params.prompt.back() == '\n') {
151
+ params.prompt.pop_back();
152
+ }
153
+ } else if (arg == "-n" || arg == "--n-predict") {
154
+ if (++i >= argc) {
155
+ invalid_param = true;
156
+ break;
157
+ }
158
+ params.n_predict = std::stoi(argv[i]);
159
+ } else if (arg == "--top-k") {
160
+ if (++i >= argc) {
161
+ invalid_param = true;
162
+ break;
163
+ }
164
+ params.top_k = std::stoi(argv[i]);
165
+ } else if (arg == "-c" || arg == "--ctx-size") {
166
+ if (++i >= argc) {
167
+ invalid_param = true;
168
+ break;
169
+ }
170
+ params.n_ctx = std::stoi(argv[i]);
171
+ } else if (arg == "--memory-f32") {
172
+ params.memory_f16 = false;
173
+ } else if (arg == "--top-p") {
174
+ if (++i >= argc) {
175
+ invalid_param = true;
176
+ break;
177
+ }
178
+ params.top_p = std::stof(argv[i]);
179
+ } else if (arg == "--temp") {
180
+ if (++i >= argc) {
181
+ invalid_param = true;
182
+ break;
183
+ }
184
+ params.temp = std::stof(argv[i]);
185
+ } else if (arg == "--tfs") {
186
+ if (++i >= argc) {
187
+ invalid_param = true;
188
+ break;
189
+ }
190
+ params.tfs_z = std::stof(argv[i]);
191
+ } else if (arg == "--typical") {
192
+ if (++i >= argc) {
193
+ invalid_param = true;
194
+ break;
195
+ }
196
+ params.typical_p = std::stof(argv[i]);
197
+ } else if (arg == "--repeat-last-n") {
198
+ if (++i >= argc) {
199
+ invalid_param = true;
200
+ break;
201
+ }
202
+ params.repeat_last_n = std::stoi(argv[i]);
203
+ } else if (arg == "--repeat-penalty") {
204
+ if (++i >= argc) {
205
+ invalid_param = true;
206
+ break;
207
+ }
208
+ params.repeat_penalty = std::stof(argv[i]);
209
+ } else if (arg == "--frequency-penalty") {
210
+ if (++i >= argc) {
211
+ invalid_param = true;
212
+ break;
213
+ }
214
+ params.frequency_penalty = std::stof(argv[i]);
215
+ } else if (arg == "--presence-penalty") {
216
+ if (++i >= argc) {
217
+ invalid_param = true;
218
+ break;
219
+ }
220
+ params.presence_penalty = std::stof(argv[i]);
221
+ } else if (arg == "--mirostat") {
222
+ if (++i >= argc) {
223
+ invalid_param = true;
224
+ break;
225
+ }
226
+ params.mirostat = std::stoi(argv[i]);
227
+ } else if (arg == "--mirostat-lr") {
228
+ if (++i >= argc) {
229
+ invalid_param = true;
230
+ break;
231
+ }
232
+ params.mirostat_eta = std::stof(argv[i]);
233
+ } else if (arg == "--mirostat-ent") {
234
+ if (++i >= argc) {
235
+ invalid_param = true;
236
+ break;
237
+ }
238
+ params.mirostat_tau = std::stof(argv[i]);
239
+ } else if (arg == "--cfg-negative-prompt") {
240
+ if (++i >= argc) {
241
+ invalid_param = true;
242
+ break;
243
+ }
244
+ params.cfg_negative_prompt = argv[i];
245
+ } else if (arg == "--cfg-scale") {
246
+ if (++i >= argc) {
247
+ invalid_param = true;
248
+ break;
249
+ }
250
+ params.cfg_scale = std::stof(argv[i]);
251
+ } else if (arg == "--cfg-smooth-factor") {
252
+ if (++i >= argc) {
253
+ invalid_param = true;
254
+ break;
255
+ }
256
+ params.cfg_smooth_factor = std::stof(argv[i]);
257
+ } else if (arg == "-b" || arg == "--batch-size") {
258
+ if (++i >= argc) {
259
+ invalid_param = true;
260
+ break;
261
+ }
262
+ params.n_batch = std::stoi(argv[i]);
263
+ params.n_batch = std::min(512, params.n_batch);
264
+ } else if (arg == "--keep") {
265
+ if (++i >= argc) {
266
+ invalid_param = true;
267
+ break;
268
+ }
269
+ params.n_keep = std::stoi(argv[i]);
270
+ } else if (arg == "-m" || arg == "--model") {
271
+ if (++i >= argc) {
272
+ invalid_param = true;
273
+ break;
274
+ }
275
+ params.model = argv[i];
276
+ } else if (arg == "-a" || arg == "--alias") {
277
+ if (++i >= argc) {
278
+ invalid_param = true;
279
+ break;
280
+ }
281
+ params.model_alias = argv[i];
282
+ } else if (arg == "--lora") {
283
+ if (++i >= argc) {
284
+ invalid_param = true;
285
+ break;
286
+ }
287
+ params.lora_adapter = argv[i];
288
+ } else if (arg == "--lora-base") {
289
+ if (++i >= argc) {
290
+ invalid_param = true;
291
+ break;
292
+ }
293
+ params.lora_base = argv[i];
294
+ } else if (arg == "-i" || arg == "--interactive") {
295
+ params.interactive = true;
296
+ } else if (arg == "--embedding") {
297
+ params.embedding = true;
298
+ } else if (arg == "--interactive-first") {
299
+ params.interactive_first = true;
300
+ } else if (arg == "-ins" || arg == "--instruct") {
301
+ params.instruct = true;
302
+ } else if (arg == "--multiline-input") {
303
+ params.multiline_input = true;
304
+ } else if (arg == "--color") {
305
+ params.use_color = true;
306
+ } else if (arg == "--mlock") {
307
+ params.use_mlock = true;
308
+ } else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers") {
309
+ if (++i >= argc) {
310
+ invalid_param = true;
311
+ break;
312
+ }
313
+ #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
314
+ params.n_gpu_layers = std::stoi(argv[i]);
315
+ #else
316
+ fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n");
317
+ fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
318
+ #endif
319
+ } else if (arg == "--main-gpu" || arg == "-mg") {
320
+ if (++i >= argc) {
321
+ invalid_param = true;
322
+ break;
323
+ }
324
+ #ifdef GGML_USE_CUBLAS
325
+ params.main_gpu = std::stoi(argv[i]);
326
+ #else
327
+ fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.\n");
328
+ #endif
329
+ } else if (arg == "--tensor-split" || arg == "-ts") {
330
+ if (++i >= argc) {
331
+ invalid_param = true;
332
+ break;
333
+ }
334
+ #ifdef GGML_USE_CUBLAS
335
+ std::string arg_next = argv[i];
336
+
337
+ // split string by , and /
338
+ const std::regex regex{R"([,/]+)"};
339
+ std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
340
+ std::vector<std::string> split_arg{it, {}};
341
+ GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
342
+
343
+ for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
344
+ if (i < split_arg.size()) {
345
+ params.tensor_split[i] = std::stof(split_arg[i]);
346
+ } else {
347
+ params.tensor_split[i] = 0.0f;
348
+ }
349
+ }
350
+ #else
351
+ fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n");
352
+ #endif // GGML_USE_CUBLAS
353
+ } else if (arg == "--low-vram" || arg == "-lv") {
354
+ #ifdef GGML_USE_CUBLAS
355
+ params.low_vram = true;
356
+ #else
357
+ fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n");
358
+ #endif // GGML_USE_CUBLAS
359
+ } else if (arg == "--no-mmap") {
360
+ params.use_mmap = false;
361
+ } else if (arg == "--mtest") {
362
+ params.mem_test = true;
363
+ } else if (arg == "--numa") {
364
+ params.numa = true;
365
+ } else if (arg == "--export") {
366
+ params.export_cgraph = true;
367
+ } else if (arg == "--verbose-prompt") {
368
+ params.verbose_prompt = true;
369
+ } else if (arg == "-r" || arg == "--reverse-prompt") {
370
+ if (++i >= argc) {
371
+ invalid_param = true;
372
+ break;
373
+ }
374
+ params.antiprompt.push_back(argv[i]);
375
+ } else if (arg == "--perplexity") {
376
+ params.perplexity = true;
377
+ } else if (arg == "--ignore-eos") {
378
+ params.logit_bias[llama_token_eos()] = -INFINITY;
379
+ } else if (arg == "--no-penalize-nl") {
380
+ params.penalize_nl = false;
381
+ } else if (arg == "-l" || arg == "--logit-bias") {
382
+ if (++i >= argc) {
383
+ invalid_param = true;
384
+ break;
385
+ }
386
+ std::stringstream ss(argv[i]);
387
+ llama_token key;
388
+ char sign;
389
+ std::string value_str;
390
+ try {
391
+ if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) {
392
+ params.logit_bias[key] = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
393
+ } else {
394
+ throw std::exception();
395
+ }
396
+ } catch (const std::exception&) {
397
+ invalid_param = true;
398
+ break;
399
+ }
400
+ } else if (arg == "-h" || arg == "--help") {
401
+ gpt_print_usage(argc, argv, default_params);
402
+ exit(0);
403
+ } else if (arg == "--random-prompt") {
404
+ params.random_prompt = true;
405
+ } else if (arg == "--in-prefix") {
406
+ if (++i >= argc) {
407
+ invalid_param = true;
408
+ break;
409
+ }
410
+ params.input_prefix = argv[i];
411
+ } else if (arg == "--in-suffix") {
412
+ if (++i >= argc) {
413
+ invalid_param = true;
414
+ break;
415
+ }
416
+ params.input_suffix = argv[i];
417
+ } else {
418
+ fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
419
+ gpt_print_usage(argc, argv, default_params);
420
+ exit(1);
421
+ }
422
+ }
423
+ if (invalid_param) {
424
+ fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
425
+ gpt_print_usage(argc, argv, default_params);
426
+ exit(1);
427
+ }
428
+ if (params.prompt_cache_all &&
429
+ (params.interactive || params.interactive_first ||
430
+ params.instruct)) {
431
+ fprintf(stderr, "error: --prompt-cache-all not supported in interactive mode yet\n");
432
+ gpt_print_usage(argc, argv, default_params);
433
+ exit(1);
434
+ }
435
+
436
+ if (escape_prompt) {
437
+ process_escapes(params.prompt);
438
+ process_escapes(params.input_prefix);
439
+ process_escapes(params.input_suffix);
440
+ }
441
+
442
+ return true;
443
+ }
444
+
445
+ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
446
+ fprintf(stderr, "usage: %s [options]\n", argv[0]);
447
+ fprintf(stderr, "\n");
448
+ fprintf(stderr, "options:\n");
449
+ fprintf(stderr, " -h, --help show this help message and exit\n");
450
+ fprintf(stderr, " -i, --interactive run in interactive mode\n");
451
+ fprintf(stderr, " --interactive-first run in interactive mode and wait for input right away\n");
452
+ fprintf(stderr, " -ins, --instruct run in instruction mode (use with Alpaca models)\n");
453
+ fprintf(stderr, " --multiline-input allows you to write or paste multiple lines without ending each in '\\'\n");
454
+ fprintf(stderr, " -r PROMPT, --reverse-prompt PROMPT\n");
455
+ fprintf(stderr, " halt generation at PROMPT, return control in interactive mode\n");
456
+ fprintf(stderr, " (can be specified more than once for multiple prompts).\n");
457
+ fprintf(stderr, " --color colorise output to distinguish prompt and user input from generations\n");
458
+ fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1, use random seed for < 0)\n");
459
+ fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
460
+ fprintf(stderr, " -p PROMPT, --prompt PROMPT\n");
461
+ fprintf(stderr, " prompt to start generation with (default: empty)\n");
462
+ fprintf(stderr, " -e process prompt escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n");
463
+ fprintf(stderr, " --prompt-cache FNAME file to cache prompt state for faster startup (default: none)\n");
464
+ fprintf(stderr, " --prompt-cache-all if specified, saves user input and generations to cache as well.\n");
465
+ fprintf(stderr, " not supported with --interactive or other interactive options\n");
466
+ fprintf(stderr, " --prompt-cache-ro if specified, uses the prompt cache but does not update it.\n");
467
+ fprintf(stderr, " --random-prompt start with a randomized prompt.\n");
468
+ fprintf(stderr, " --in-prefix STRING string to prefix user inputs with (default: empty)\n");
469
+ fprintf(stderr, " --in-suffix STRING string to suffix after user inputs with (default: empty)\n");
470
+ fprintf(stderr, " -f FNAME, --file FNAME\n");
471
+ fprintf(stderr, " prompt file to start generation.\n");
472
+ fprintf(stderr, " -n N, --n-predict N number of tokens to predict (default: %d, -1 = infinity)\n", params.n_predict);
473
+ fprintf(stderr, " --top-k N top-k sampling (default: %d, 0 = disabled)\n", params.top_k);
474
+ fprintf(stderr, " --top-p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)params.top_p);
475
+ fprintf(stderr, " --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)params.tfs_z);
476
+ fprintf(stderr, " --typical N locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)\n", (double)params.typical_p);
477
+ fprintf(stderr, " --repeat-last-n N last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)\n", params.repeat_last_n);
478
+ fprintf(stderr, " --repeat-penalty N penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)\n", (double)params.repeat_penalty);
479
+ fprintf(stderr, " --presence-penalty N repeat alpha presence penalty (default: %.1f, 0.0 = disabled)\n", (double)params.presence_penalty);
480
+ fprintf(stderr, " --frequency-penalty N repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)\n", (double)params.frequency_penalty);
481
+ fprintf(stderr, " --mirostat N use Mirostat sampling.\n");
482
+ fprintf(stderr, " Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used.\n");
483
+ fprintf(stderr, " (default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n", params.mirostat);
484
+ fprintf(stderr, " --mirostat-lr N Mirostat learning rate, parameter eta (default: %.1f)\n", (double)params.mirostat_eta);
485
+ fprintf(stderr, " --mirostat-ent N Mirostat target entropy, parameter tau (default: %.1f)\n", (double)params.mirostat_tau);
486
+ fprintf(stderr, " -l TOKEN_ID(+/-)BIAS, --logit-bias TOKEN_ID(+/-)BIAS\n");
487
+ fprintf(stderr, " modifies the likelihood of token appearing in the completion,\n");
488
+ fprintf(stderr, " i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n");
489
+ fprintf(stderr, " or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'\n");
490
+ fprintf(stderr, " --cfg-negative-prompt PROMPT \n");
491
+ fprintf(stderr, " negative prompt to use for guidance. (default: empty)\n");
492
+ fprintf(stderr, " --cfg-scale N strength of guidance (default: %f, 1.0 = disable)\n", params.cfg_scale);
493
+ fprintf(stderr, " --cfg-smooth-factor N smooth factor between old and new logits (default: %f, 1.0 = no smoothing)\n", params.cfg_smooth_factor);
494
+ fprintf(stderr, " -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
495
+ fprintf(stderr, " --ignore-eos ignore end of stream token and continue generating (implies --logit-bias 2-inf)\n");
496
+ fprintf(stderr, " --no-penalize-nl do not penalize newline token\n");
497
+ fprintf(stderr, " --memory-f32 use f32 instead of f16 for memory key+value (default: disabled)\n");
498
+ fprintf(stderr, " not recommended: doubles context memory required and no measurable increase in quality\n");
499
+ fprintf(stderr, " --temp N temperature (default: %.1f)\n", (double)params.temp);
500
+ fprintf(stderr, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
501
+ fprintf(stderr, " --perplexity compute perplexity over the prompt\n");
502
+ fprintf(stderr, " --keep number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
503
+ if (llama_mlock_supported()) {
504
+ fprintf(stderr, " --mlock force system to keep model in RAM rather than swapping or compressing\n");
505
+ }
506
+ if (llama_mmap_supported()) {
507
+ fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
508
+ }
509
+ fprintf(stderr, " --numa attempt optimizations that help on some NUMA systems\n");
510
+ fprintf(stderr, " if run without this previously, it is recommended to drop the system page cache before using this\n");
511
+ fprintf(stderr, " see https://github.com/ggerganov/llama.cpp/issues/1437\n");
512
+ #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
513
+ fprintf(stderr, " -ngl N, --n-gpu-layers N\n");
514
+ fprintf(stderr, " number of layers to store in VRAM\n");
515
+ fprintf(stderr, " -ts SPLIT --tensor-split SPLIT\n");
516
+ fprintf(stderr, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
517
+ fprintf(stderr, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n" );
518
+ fprintf(stderr, " -lv, --low-vram don't allocate VRAM scratch buffer\n" );
519
+ #endif
520
+ fprintf(stderr, " --mtest compute maximum memory usage\n");
521
+ fprintf(stderr, " --export export the computation graph to 'llama.ggml'\n");
522
+ fprintf(stderr, " --verbose-prompt print prompt before generation\n");
523
+ fprintf(stderr, " --lora FNAME apply LoRA adapter\n");
524
+ fprintf(stderr, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
525
+ fprintf(stderr, " -m FNAME, --model FNAME\n");
526
+ fprintf(stderr, " model path (default: %s)\n", params.model.c_str());
527
+ fprintf(stderr, "\n");
528
+ }
529
+
530
+ std::string gpt_random_prompt(std::mt19937 & rng) {
531
+ const int r = rng() % 10;
532
+ switch (r) {
533
+ case 0: return "So";
534
+ case 1: return "Once upon a time";
535
+ case 2: return "When";
536
+ case 3: return "The";
537
+ case 4: return "After";
538
+ case 5: return "If";
539
+ case 6: return "import";
540
+ case 7: return "He";
541
+ case 8: return "She";
542
+ case 9: return "They";
543
+ default: return "To";
544
+ }
545
+
546
+ return "The";
547
+ }
548
+
549
+ // TODO: not great allocating this every time
550
+ std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos) {
551
+ // initialize to prompt numer of chars, since n_tokens <= n_prompt_chars
552
+ std::vector<llama_token> res(text.size() + (int) add_bos);
553
+ const int n = llama_tokenize(ctx, text.c_str(), res.data(), res.size(), add_bos);
554
+ assert(n >= 0);
555
+ res.resize(n);
556
+
557
+ return res;
558
+ }
559
+
560
+ struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params) {
561
+ auto lparams = llama_context_default_params();
562
+
563
+ lparams.n_ctx = params.n_ctx;
564
+ lparams.n_batch = params.n_batch;
565
+ lparams.n_gpu_layers = params.n_gpu_layers;
566
+ lparams.main_gpu = params.main_gpu;
567
+ memcpy(lparams.tensor_split, params.tensor_split, LLAMA_MAX_DEVICES*sizeof(float));
568
+ lparams.low_vram = params.low_vram;
569
+ lparams.seed = params.seed;
570
+ lparams.f16_kv = params.memory_f16;
571
+ lparams.use_mmap = params.use_mmap;
572
+ lparams.use_mlock = params.use_mlock;
573
+ lparams.logits_all = params.perplexity;
574
+ lparams.embedding = params.embedding;
575
+
576
+ return lparams;
577
+ }
578
+
579
+ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(const gpt_params & params) {
580
+ auto lparams = llama_context_params_from_gpt_params(params);
581
+
582
+ llama_model * model = llama_load_model_from_file(params.model.c_str(), lparams);
583
+ if (model == NULL) {
584
+ fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
585
+ return std::make_tuple(nullptr, nullptr);
586
+ }
587
+
588
+ llama_context * lctx = llama_new_context_with_model(model, lparams);
589
+ if (lctx == NULL) {
590
+ fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, params.model.c_str());
591
+ llama_free_model(model);
592
+ return std::make_tuple(nullptr, nullptr);
593
+ }
594
+
595
+ if (!params.lora_adapter.empty()) {
596
+ int err = llama_model_apply_lora_from_file(model,
597
+ params.lora_adapter.c_str(),
598
+ params.lora_base.empty() ? NULL : params.lora_base.c_str(),
599
+ params.n_threads);
600
+ if (err != 0) {
601
+ fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__);
602
+ llama_free(lctx);
603
+ llama_free_model(model);
604
+ return std::make_tuple(nullptr, nullptr);
605
+ }
606
+ }
607
+
608
+ return std::make_tuple(model, lctx);
609
+ }
610
+
611
+ void console_init(console_state & con_st) {
612
+ #if defined(_WIN32)
613
+ // Windows-specific console initialization
614
+ DWORD dwMode = 0;
615
+ con_st.hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
616
+ if (con_st.hConsole == INVALID_HANDLE_VALUE || !GetConsoleMode(con_st.hConsole, &dwMode)) {
617
+ con_st.hConsole = GetStdHandle(STD_ERROR_HANDLE);
618
+ if (con_st.hConsole != INVALID_HANDLE_VALUE && (!GetConsoleMode(con_st.hConsole, &dwMode))) {
619
+ con_st.hConsole = NULL;
620
+ }
621
+ }
622
+ if (con_st.hConsole) {
623
+ // Enable ANSI colors on Windows 10+
624
+ if (con_st.use_color && !(dwMode & ENABLE_VIRTUAL_TERMINAL_PROCESSING)) {
625
+ SetConsoleMode(con_st.hConsole, dwMode | ENABLE_VIRTUAL_TERMINAL_PROCESSING);
626
+ }
627
+ // Set console output codepage to UTF8
628
+ SetConsoleOutputCP(CP_UTF8);
629
+ }
630
+ HANDLE hConIn = GetStdHandle(STD_INPUT_HANDLE);
631
+ if (hConIn != INVALID_HANDLE_VALUE && GetConsoleMode(hConIn, &dwMode)) {
632
+ // Set console input codepage to UTF16
633
+ _setmode(_fileno(stdin), _O_WTEXT);
634
+
635
+ // Turn off ICANON (ENABLE_LINE_INPUT) and ECHO (ENABLE_ECHO_INPUT)
636
+ dwMode &= ~(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT);
637
+ SetConsoleMode(hConIn, dwMode);
638
+ }
639
+ #else
640
+ // POSIX-specific console initialization
641
+ struct termios new_termios;
642
+ tcgetattr(STDIN_FILENO, &con_st.prev_state);
643
+ new_termios = con_st.prev_state;
644
+ new_termios.c_lflag &= ~(ICANON | ECHO);
645
+ new_termios.c_cc[VMIN] = 1;
646
+ new_termios.c_cc[VTIME] = 0;
647
+ tcsetattr(STDIN_FILENO, TCSANOW, &new_termios);
648
+
649
+ con_st.tty = fopen("/dev/tty", "w+");
650
+ if (con_st.tty != nullptr) {
651
+ con_st.out = con_st.tty;
652
+ }
653
+
654
+ setlocale(LC_ALL, "");
655
+ #endif
656
+ }
657
+
658
+ void console_cleanup(console_state & con_st) {
659
+ // Reset console color
660
+ console_set_color(con_st, CONSOLE_COLOR_DEFAULT);
661
+
662
+ #if !defined(_WIN32)
663
+ if (con_st.tty != nullptr) {
664
+ con_st.out = stdout;
665
+ fclose(con_st.tty);
666
+ con_st.tty = nullptr;
667
+ }
668
+ // Restore the terminal settings on POSIX systems
669
+ tcsetattr(STDIN_FILENO, TCSANOW, &con_st.prev_state);
670
+ #endif
671
+ }
672
+
673
+ /* Keep track of current color of output, and emit ANSI code if it changes. */
674
+ void console_set_color(console_state & con_st, console_color_t color) {
675
+ if (con_st.use_color && con_st.color != color) {
676
+ fflush(stdout);
677
+ switch(color) {
678
+ case CONSOLE_COLOR_DEFAULT:
679
+ fprintf(con_st.out, ANSI_COLOR_RESET);
680
+ break;
681
+ case CONSOLE_COLOR_PROMPT:
682
+ fprintf(con_st.out, ANSI_COLOR_YELLOW);
683
+ break;
684
+ case CONSOLE_COLOR_USER_INPUT:
685
+ fprintf(con_st.out, ANSI_BOLD ANSI_COLOR_GREEN);
686
+ break;
687
+ case CONSOLE_COLOR_ERROR:
688
+ fprintf(con_st.out, ANSI_BOLD ANSI_COLOR_RED);
689
+ break;
690
+ }
691
+ con_st.color = color;
692
+ fflush(con_st.out);
693
+ }
694
+ }
695
+
696
+ char32_t getchar32() {
697
+ #if defined(_WIN32)
698
+ HANDLE hConsole = GetStdHandle(STD_INPUT_HANDLE);
699
+ wchar_t high_surrogate = 0;
700
+
701
+ while (true) {
702
+ INPUT_RECORD record;
703
+ DWORD count;
704
+ if (!ReadConsoleInputW(hConsole, &record, 1, &count) || count == 0) {
705
+ return WEOF;
706
+ }
707
+
708
+ if (record.EventType == KEY_EVENT && record.Event.KeyEvent.bKeyDown) {
709
+ wchar_t wc = record.Event.KeyEvent.uChar.UnicodeChar;
710
+ if (wc == 0) {
711
+ continue;
712
+ }
713
+
714
+ if ((wc >= 0xD800) && (wc <= 0xDBFF)) { // Check if wc is a high surrogate
715
+ high_surrogate = wc;
716
+ continue;
717
+ } else if ((wc >= 0xDC00) && (wc <= 0xDFFF)) { // Check if wc is a low surrogate
718
+ if (high_surrogate != 0) { // Check if we have a high surrogate
719
+ return ((high_surrogate - 0xD800) << 10) + (wc - 0xDC00) + 0x10000;
720
+ }
721
+ }
722
+
723
+ high_surrogate = 0; // Reset the high surrogate
724
+ return static_cast<char32_t>(wc);
725
+ }
726
+ }
727
+ #else
728
+ wchar_t wc = getwchar();
729
+ if (static_cast<wint_t>(wc) == WEOF) {
730
+ return WEOF;
731
+ }
732
+
733
+ #if WCHAR_MAX == 0xFFFF
734
+ if ((wc >= 0xD800) && (wc <= 0xDBFF)) { // Check if wc is a high surrogate
735
+ wchar_t low_surrogate = getwchar();
736
+ if ((low_surrogate >= 0xDC00) && (low_surrogate <= 0xDFFF)) { // Check if the next wchar is a low surrogate
737
+ return (static_cast<char32_t>(wc & 0x03FF) << 10) + (low_surrogate & 0x03FF) + 0x10000;
738
+ }
739
+ }
740
+ if ((wc >= 0xD800) && (wc <= 0xDFFF)) { // Invalid surrogate pair
741
+ return 0xFFFD; // Return the replacement character U+FFFD
742
+ }
743
+ #endif
744
+
745
+ return static_cast<char32_t>(wc);
746
+ #endif
747
+ }
748
+
749
+ void pop_cursor(console_state & con_st) {
750
+ #if defined(_WIN32)
751
+ if (con_st.hConsole != NULL) {
752
+ CONSOLE_SCREEN_BUFFER_INFO bufferInfo;
753
+ GetConsoleScreenBufferInfo(con_st.hConsole, &bufferInfo);
754
+
755
+ COORD newCursorPosition = bufferInfo.dwCursorPosition;
756
+ if (newCursorPosition.X == 0) {
757
+ newCursorPosition.X = bufferInfo.dwSize.X - 1;
758
+ newCursorPosition.Y -= 1;
759
+ } else {
760
+ newCursorPosition.X -= 1;
761
+ }
762
+
763
+ SetConsoleCursorPosition(con_st.hConsole, newCursorPosition);
764
+ return;
765
+ }
766
+ #endif
767
+ putc('\b', con_st.out);
768
+ }
769
+
770
+ int estimateWidth(char32_t codepoint) {
771
+ #if defined(_WIN32)
772
+ return 1;
773
+ #else
774
+ return wcwidth(codepoint);
775
+ #endif
776
+ }
777
+
778
+ int put_codepoint(console_state & con_st, const char* utf8_codepoint, size_t length, int expectedWidth) {
779
+ #if defined(_WIN32)
780
+ CONSOLE_SCREEN_BUFFER_INFO bufferInfo;
781
+ if (!GetConsoleScreenBufferInfo(con_st.hConsole, &bufferInfo)) {
782
+ // go with the default
783
+ return expectedWidth;
784
+ }
785
+ COORD initialPosition = bufferInfo.dwCursorPosition;
786
+ DWORD nNumberOfChars = length;
787
+ WriteConsole(con_st.hConsole, utf8_codepoint, nNumberOfChars, &nNumberOfChars, NULL);
788
+
789
+ CONSOLE_SCREEN_BUFFER_INFO newBufferInfo;
790
+ GetConsoleScreenBufferInfo(con_st.hConsole, &newBufferInfo);
791
+
792
+ // Figure out our real position if we're in the last column
793
+ if (utf8_codepoint[0] != 0x09 && initialPosition.X == newBufferInfo.dwSize.X - 1) {
794
+ DWORD nNumberOfChars;
795
+ WriteConsole(con_st.hConsole, &" \b", 2, &nNumberOfChars, NULL);
796
+ GetConsoleScreenBufferInfo(con_st.hConsole, &newBufferInfo);
797
+ }
798
+
799
+ int width = newBufferInfo.dwCursorPosition.X - initialPosition.X;
800
+ if (width < 0) {
801
+ width += newBufferInfo.dwSize.X;
802
+ }
803
+ return width;
804
+ #else
805
+ // we can trust expectedWidth if we've got one
806
+ if (expectedWidth >= 0 || con_st.tty == nullptr) {
807
+ fwrite(utf8_codepoint, length, 1, con_st.out);
808
+ return expectedWidth;
809
+ }
810
+
811
+ fputs("\033[6n", con_st.tty); // Query cursor position
812
+ int x1, x2, y1, y2;
813
+ int results = 0;
814
+ results = fscanf(con_st.tty, "\033[%d;%dR", &y1, &x1);
815
+
816
+ fwrite(utf8_codepoint, length, 1, con_st.tty);
817
+
818
+ fputs("\033[6n", con_st.tty); // Query cursor position
819
+ results += fscanf(con_st.tty, "\033[%d;%dR", &y2, &x2);
820
+
821
+ if (results != 4) {
822
+ return expectedWidth;
823
+ }
824
+
825
+ int width = x2 - x1;
826
+ if (width < 0) {
827
+ // Calculate the width considering text wrapping
828
+ struct winsize w;
829
+ ioctl(STDOUT_FILENO, TIOCGWINSZ, &w);
830
+ width += w.ws_col;
831
+ }
832
+ return width;
833
+ #endif
834
+ }
835
+
836
+ void replace_last(console_state & con_st, char ch) {
837
+ #if defined(_WIN32)
838
+ pop_cursor(con_st);
839
+ put_codepoint(con_st, &ch, 1, 1);
840
+ #else
841
+ fprintf(con_st.out, "\b%c", ch);
842
+ #endif
843
+ }
844
+
845
+ void append_utf8(char32_t ch, std::string & out) {
846
+ if (ch <= 0x7F) {
847
+ out.push_back(static_cast<unsigned char>(ch));
848
+ } else if (ch <= 0x7FF) {
849
+ out.push_back(static_cast<unsigned char>(0xC0 | ((ch >> 6) & 0x1F)));
850
+ out.push_back(static_cast<unsigned char>(0x80 | (ch & 0x3F)));
851
+ } else if (ch <= 0xFFFF) {
852
+ out.push_back(static_cast<unsigned char>(0xE0 | ((ch >> 12) & 0x0F)));
853
+ out.push_back(static_cast<unsigned char>(0x80 | ((ch >> 6) & 0x3F)));
854
+ out.push_back(static_cast<unsigned char>(0x80 | (ch & 0x3F)));
855
+ } else if (ch <= 0x10FFFF) {
856
+ out.push_back(static_cast<unsigned char>(0xF0 | ((ch >> 18) & 0x07)));
857
+ out.push_back(static_cast<unsigned char>(0x80 | ((ch >> 12) & 0x3F)));
858
+ out.push_back(static_cast<unsigned char>(0x80 | ((ch >> 6) & 0x3F)));
859
+ out.push_back(static_cast<unsigned char>(0x80 | (ch & 0x3F)));
860
+ } else {
861
+ // Invalid Unicode code point
862
+ }
863
+ }
864
+
865
+ // Helper function to remove the last UTF-8 character from a string
866
+ void pop_back_utf8_char(std::string & line) {
867
+ if (line.empty()) {
868
+ return;
869
+ }
870
+
871
+ size_t pos = line.length() - 1;
872
+
873
+ // Find the start of the last UTF-8 character (checking up to 4 bytes back)
874
+ for (size_t i = 0; i < 3 && pos > 0; ++i, --pos) {
875
+ if ((line[pos] & 0xC0) != 0x80) break; // Found the start of the character
876
+ }
877
+ line.erase(pos);
878
+ }
879
+
880
+ bool console_readline(console_state & con_st, std::string & line) {
881
+ console_set_color(con_st, CONSOLE_COLOR_USER_INPUT);
882
+ if (con_st.out != stdout) {
883
+ fflush(stdout);
884
+ }
885
+
886
+ line.clear();
887
+ std::vector<int> widths;
888
+ bool is_special_char = false;
889
+ bool end_of_stream = false;
890
+
891
+ char32_t input_char;
892
+ while (true) {
893
+ fflush(con_st.out); // Ensure all output is displayed before waiting for input
894
+ input_char = getchar32();
895
+
896
+ if (input_char == '\r' || input_char == '\n') {
897
+ break;
898
+ }
899
+
900
+ if (input_char == (char32_t) WEOF || input_char == 0x04 /* Ctrl+D*/) {
901
+ end_of_stream = true;
902
+ break;
903
+ }
904
+
905
+ if (is_special_char) {
906
+ console_set_color(con_st, CONSOLE_COLOR_USER_INPUT);
907
+ replace_last(con_st, line.back());
908
+ is_special_char = false;
909
+ }
910
+
911
+ if (input_char == '\033') { // Escape sequence
912
+ char32_t code = getchar32();
913
+ if (code == '[' || code == 0x1B) {
914
+ // Discard the rest of the escape sequence
915
+ while ((code = getchar32()) != (char32_t) WEOF) {
916
+ if ((code >= 'A' && code <= 'Z') || (code >= 'a' && code <= 'z') || code == '~') {
917
+ break;
918
+ }
919
+ }
920
+ }
921
+ } else if (input_char == 0x08 || input_char == 0x7F) { // Backspace
922
+ if (!widths.empty()) {
923
+ int count;
924
+ do {
925
+ count = widths.back();
926
+ widths.pop_back();
927
+ // Move cursor back, print space, and move cursor back again
928
+ for (int i = 0; i < count; i++) {
929
+ replace_last(con_st, ' ');
930
+ pop_cursor(con_st);
931
+ }
932
+ pop_back_utf8_char(line);
933
+ } while (count == 0 && !widths.empty());
934
+ }
935
+ } else {
936
+ int offset = line.length();
937
+ append_utf8(input_char, line);
938
+ int width = put_codepoint(con_st, line.c_str() + offset, line.length() - offset, estimateWidth(input_char));
939
+ if (width < 0) {
940
+ width = 0;
941
+ }
942
+ widths.push_back(width);
943
+ }
944
+
945
+ if (!line.empty() && (line.back() == '\\' || line.back() == '/')) {
946
+ console_set_color(con_st, CONSOLE_COLOR_PROMPT);
947
+ replace_last(con_st, line.back());
948
+ is_special_char = true;
949
+ }
950
+ }
951
+
952
+ bool has_more = con_st.multiline_input;
953
+ if (is_special_char) {
954
+ replace_last(con_st, ' ');
955
+ pop_cursor(con_st);
956
+
957
+ char last = line.back();
958
+ line.pop_back();
959
+ if (last == '\\') {
960
+ line += '\n';
961
+ fputc('\n', con_st.out);
962
+ has_more = !has_more;
963
+ } else {
964
+ // llama will just eat the single space, it won't act as a space
965
+ if (line.length() == 1 && line.back() == ' ') {
966
+ line.clear();
967
+ pop_cursor(con_st);
968
+ }
969
+ has_more = false;
970
+ }
971
+ } else {
972
+ if (end_of_stream) {
973
+ has_more = false;
974
+ } else {
975
+ line += '\n';
976
+ fputc('\n', con_st.out);
977
+ }
978
+ }
979
+
980
+ fflush(con_st.out);
981
+ return has_more;
982
+ }
examples/common.h ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Various helper functions and utilities
2
+
3
+ #pragma once
4
+
5
+ #include "llama.h"
6
+
7
+ #include <string>
8
+ #include <vector>
9
+ #include <random>
10
+ #include <thread>
11
+ #include <unordered_map>
12
+ #include <tuple>
13
+
14
+ #if !defined (_WIN32)
15
+ #include <stdio.h>
16
+ #include <termios.h>
17
+ #endif
18
+
19
+ //
20
+ // CLI argument parsing
21
+ //
22
+ int32_t get_num_physical_cores();
23
+
24
+ struct gpt_params {
25
+ uint32_t seed = -1; // RNG seed
26
+ int32_t n_threads = get_num_physical_cores();
27
+ int32_t n_predict = -1; // new tokens to predict
28
+ int32_t n_ctx = 512; // context size
29
+ int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
30
+ int32_t n_keep = 0; // number of tokens to keep from initial prompt
31
+ int32_t n_gpu_layers = 0; // number of layers to store in VRAM
32
+ int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
33
+ float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
34
+ int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
35
+
36
+ // sampling parameters
37
+ std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
38
+ int32_t top_k = 40; // <= 0 to use vocab size
39
+ float top_p = 0.95f; // 1.0 = disabled
40
+ float tfs_z = 1.00f; // 1.0 = disabled
41
+ float typical_p = 1.00f; // 1.0 = disabled
42
+ float temp = 0.80f; // 1.0 = disabled
43
+ float repeat_penalty = 1.10f; // 1.0 = disabled
44
+ int32_t repeat_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size)
45
+ float frequency_penalty = 0.00f; // 0.0 = disabled
46
+ float presence_penalty = 0.00f; // 0.0 = disabled
47
+ int mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
48
+ float mirostat_tau = 5.00f; // target entropy
49
+ float mirostat_eta = 0.10f; // learning rate
50
+
51
+ // Classifier-Free Guidance
52
+ // https://arxiv.org/abs/2306.17806
53
+ std::string cfg_negative_prompt; // string to help guidance
54
+ float cfg_scale = 1.f; // How strong is guidance
55
+ float cfg_smooth_factor = 1.f; // Smooth factor between old and new logits
56
+
57
+ std::string model = "models/7B/ggml-model.bin"; // model path
58
+ std::string model_alias = "unknown"; // model alias
59
+ std::string prompt = "";
60
+ std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
61
+ std::string input_prefix = ""; // string to prefix user inputs with
62
+ std::string input_suffix = ""; // string to suffix user inputs with
63
+ std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
64
+
65
+ std::string lora_adapter = ""; // lora adapter path
66
+ std::string lora_base = ""; // base model path for the lora adapter
67
+
68
+ bool low_vram = false; // if true, reduce VRAM usage at the cost of performance
69
+ bool memory_f16 = true; // use f16 instead of f32 for memory kv
70
+ bool random_prompt = false; // do not randomize prompt if none provided
71
+ bool use_color = false; // use color to distinguish generations and inputs
72
+ bool interactive = false; // interactive mode
73
+ bool prompt_cache_all = false; // save user input and generations to prompt cache
74
+ bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it
75
+
76
+ bool embedding = false; // get only sentence embedding
77
+ bool interactive_first = false; // wait for user input immediately
78
+ bool multiline_input = false; // reverse the usage of `\`
79
+
80
+ bool instruct = false; // instruction mode (used for Alpaca models)
81
+ bool penalize_nl = true; // consider newlines as a repeatable token
82
+ bool perplexity = false; // compute perplexity over the prompt
83
+ bool use_mmap = true; // use mmap for faster loads
84
+ bool use_mlock = false; // use mlock to keep model in memory
85
+ bool mem_test = false; // compute maximum memory usage
86
+ bool numa = false; // attempt optimizations that help on some NUMA systems
87
+ bool export_cgraph = false; // export the computation graph
88
+ bool verbose_prompt = false; // print prompt tokens before generation
89
+ };
90
+
91
+ bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
92
+
93
+ void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
94
+
95
+ std::string gpt_random_prompt(std::mt19937 & rng);
96
+
97
+ //
98
+ // Vocab utils
99
+ //
100
+
101
+ std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos);
102
+
103
+ //
104
+ // Model utils
105
+ //
106
+
107
+ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(const gpt_params & params);
108
+ struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params);
109
+
110
+ //
111
+ // Console utils
112
+ //
113
+
114
+ #define ANSI_COLOR_RED "\x1b[31m"
115
+ #define ANSI_COLOR_GREEN "\x1b[32m"
116
+ #define ANSI_COLOR_YELLOW "\x1b[33m"
117
+ #define ANSI_COLOR_BLUE "\x1b[34m"
118
+ #define ANSI_COLOR_MAGENTA "\x1b[35m"
119
+ #define ANSI_COLOR_CYAN "\x1b[36m"
120
+ #define ANSI_COLOR_RESET "\x1b[0m"
121
+ #define ANSI_BOLD "\x1b[1m"
122
+
123
+ enum console_color_t {
124
+ CONSOLE_COLOR_DEFAULT=0,
125
+ CONSOLE_COLOR_PROMPT,
126
+ CONSOLE_COLOR_USER_INPUT,
127
+ CONSOLE_COLOR_ERROR
128
+ };
129
+
130
+ struct console_state {
131
+ bool multiline_input = false;
132
+ bool use_color = false;
133
+ console_color_t color = CONSOLE_COLOR_DEFAULT;
134
+
135
+ FILE* out = stdout;
136
+ #if defined (_WIN32)
137
+ void* hConsole;
138
+ #else
139
+ FILE* tty = nullptr;
140
+ termios prev_state;
141
+ #endif
142
+ };
143
+
144
+ void console_init(console_state & con_st);
145
+ void console_cleanup(console_state & con_st);
146
+ void console_set_color(console_state & con_st, console_color_t color);
147
+ bool console_readline(console_state & con_st, std::string & line);
examples/embd-input/.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ PandaGPT
2
+ MiniGPT-4
3
+ *.pth
4
+
examples/embd-input/CMakeLists.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set(TARGET embdinput)
2
+ add_library(${TARGET} embd-input-lib.cpp embd-input.h)
3
+ target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
4
+ target_compile_features(${TARGET} PRIVATE cxx_std_11)
5
+ if(TARGET BUILD_INFO)
6
+ add_dependencies(${TARGET} BUILD_INFO)
7
+ endif()
8
+
9
+ set(TARGET embd-input-test)
10
+ add_executable(${TARGET} embd-input-test.cpp)
11
+ target_link_libraries(${TARGET} PRIVATE common llama embdinput ${CMAKE_THREAD_LIBS_INIT})
12
+ target_compile_features(${TARGET} PRIVATE cxx_std_11)
13
+ if(TARGET BUILD_INFO)
14
+ add_dependencies(${TARGET} BUILD_INFO)
15
+ endif()
examples/embd-input/README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Examples for input embedding directly
2
+
3
+ ## Requirement
4
+ build `libembdinput.so`
5
+ run the following comman in main dir (../../).
6
+ ```
7
+ make
8
+ ```
9
+
10
+ ## [LLaVA](https://github.com/haotian-liu/LLaVA/) example (llava.py)
11
+
12
+ 1. Obtian LLaVA model (following https://github.com/haotian-liu/LLaVA/ , use https://huggingface.co/liuhaotian/LLaVA-13b-delta-v1-1/).
13
+ 2. Convert it to ggml format.
14
+ 3. `llava_projection.pth` is [pytorch_model-00003-of-00003.bin](https://huggingface.co/liuhaotian/LLaVA-13b-delta-v1-1/blob/main/pytorch_model-00003-of-00003.bin).
15
+
16
+ ```
17
+ import torch
18
+
19
+ bin_path = "../LLaVA-13b-delta-v1-1/pytorch_model-00003-of-00003.bin"
20
+ pth_path = "./examples/embd_input/llava_projection.pth"
21
+
22
+ dic = torch.load(bin_path)
23
+ used_key = ["model.mm_projector.weight","model.mm_projector.bias"]
24
+ torch.save({k: dic[k] for k in used_key}, pth_path)
25
+ ```
26
+ 4. Check the path of LLaVA model and `llava_projection.pth` in `llava.py`.
27
+
28
+
29
+ ## [PandaGPT](https://github.com/yxuansu/PandaGPT) example (panda_gpt.py)
30
+
31
+ 1. Obtian PandaGPT lora model from https://github.com/yxuansu/PandaGPT. Rename the file to `adapter_model.bin`. Use [convert-lora-to-ggml.py](../../convert-lora-to-ggml.py) to convert it to ggml format.
32
+ The `adapter_config.json` is
33
+ ```
34
+ {
35
+ "peft_type": "LORA",
36
+ "fan_in_fan_out": false,
37
+ "bias": null,
38
+ "modules_to_save": null,
39
+ "r": 32,
40
+ "lora_alpha": 32,
41
+ "lora_dropout": 0.1,
42
+ "target_modules": ["q_proj", "k_proj", "v_proj", "o_proj"]
43
+ }
44
+ ```
45
+ 2. Papare the `vicuna` v0 model.
46
+ 3. Obtain the [ImageBind](https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth) model.
47
+ 4. Clone the PandaGPT source.
48
+ ```
49
+ git clone https://github.com/yxuansu/PandaGPT
50
+ ```
51
+ 5. Install the requirement of PandaGPT.
52
+ 6. Check the path of PandaGPT source, ImageBind model, lora model and vicuna model in panda_gpt.py.
53
+
54
+ ## [MiniGPT-4](https://github.com/Vision-CAIR/MiniGPT-4/) example (minigpt4.py)
55
+
56
+ 1. Obtain MiniGPT-4 model from https://github.com/Vision-CAIR/MiniGPT-4/ and put it in `embd-input`.
57
+ 2. Clone the MiniGPT-4 source.
58
+ ```
59
+ git clone https://github.com/Vision-CAIR/MiniGPT-4/
60
+ ```
61
+ 3. Install the requirement of PandaGPT.
62
+ 4. Papare the `vicuna` v0 model.
63
+ 5. Check the path of MiniGPT-4 source, MiniGPT-4 model and vicuna model in `minigpt4.py`.
examples/embd-input/embd-input-lib.cpp ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Defines sigaction on msys:
2
+ #ifndef _GNU_SOURCE
3
+ #define _GNU_SOURCE
4
+ #endif
5
+
6
+ #include "embd-input.h"
7
+
8
+ #include <cassert>
9
+ #include <cinttypes>
10
+ #include <cmath>
11
+ #include <cstdio>
12
+ #include <cstring>
13
+ #include <ctime>
14
+ #include <fstream>
15
+ #include <iostream>
16
+ #include <string>
17
+ #include <vector>
18
+
19
+ static llama_context ** g_ctx;
20
+
21
+ extern "C" {
22
+
23
+ struct MyModel* create_mymodel(int argc, char ** argv) {
24
+ gpt_params params;
25
+
26
+ if (gpt_params_parse(argc, argv, params) == false) {
27
+ return nullptr;
28
+ }
29
+
30
+ fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
31
+
32
+ if (params.seed == LLAMA_DEFAULT_SEED) {
33
+ params.seed = time(NULL);
34
+ }
35
+ fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
36
+
37
+ llama_backend_init(params.numa);
38
+
39
+ llama_model * model;
40
+ llama_context * ctx;
41
+
42
+ g_ctx = &ctx;
43
+
44
+ // load the model and apply lora adapter, if any
45
+ std::tie(model, ctx) = llama_init_from_gpt_params(params);
46
+ if (model == NULL) {
47
+ fprintf(stderr, "%s: error: unable to load model\n", __func__);
48
+ return nullptr;
49
+ }
50
+
51
+ // print system information
52
+ {
53
+ fprintf(stderr, "\n");
54
+ fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
55
+ params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
56
+ }
57
+ struct MyModel * ret = new MyModel();
58
+ ret->ctx = ctx;
59
+ ret->params = params;
60
+ ret->n_past = 0;
61
+ // printf("ctx: %d\n", ret->ctx);
62
+ return ret;
63
+ }
64
+
65
+ void free_mymodel(struct MyModel * mymodel) {
66
+ llama_context * ctx = mymodel->ctx;
67
+ llama_print_timings(ctx);
68
+ llama_free(ctx);
69
+ delete mymodel;
70
+ }
71
+
72
+
73
+ bool eval_float(void * model, float * input, int N){
74
+ MyModel * mymodel = (MyModel*)model;
75
+ llama_context * ctx = mymodel->ctx;
76
+ gpt_params params = mymodel->params;
77
+ int n_emb = llama_n_embd(ctx);
78
+ int n_past = mymodel->n_past;
79
+ int n_batch = N; // params.n_batch;
80
+
81
+ for (int i = 0; i < (int) N; i += n_batch) {
82
+ int n_eval = (int) N - i;
83
+ if (n_eval > n_batch) {
84
+ n_eval = n_batch;
85
+ }
86
+ if (llama_eval_embd(ctx, (input+i*n_emb), n_eval, n_past, params.n_threads)) {
87
+ fprintf(stderr, "%s : failed to eval\n", __func__);
88
+ return false;
89
+ }
90
+ n_past += n_eval;
91
+ }
92
+ mymodel->n_past = n_past;
93
+ return true;
94
+ }
95
+
96
+ bool eval_tokens(void * model, std::vector<llama_token> tokens) {
97
+ MyModel * mymodel = (MyModel* )model;
98
+ llama_context * ctx;
99
+ ctx = mymodel->ctx;
100
+ gpt_params params = mymodel->params;
101
+ int n_past = mymodel->n_past;
102
+ for (int i = 0; i < (int) tokens.size(); i += params.n_batch) {
103
+ int n_eval = (int) tokens.size() - i;
104
+ if (n_eval > params.n_batch) {
105
+ n_eval = params.n_batch;
106
+ }
107
+ if (llama_eval(ctx, &tokens[i], n_eval, n_past, params.n_threads)) {
108
+ fprintf(stderr, "%s : failed to eval\n", __func__);
109
+ return false;
110
+ }
111
+ n_past += n_eval;
112
+ }
113
+ mymodel->n_past = n_past;
114
+ return true;
115
+ }
116
+
117
+ bool eval_id(struct MyModel* mymodel, int id) {
118
+ std::vector<llama_token> tokens;
119
+ tokens.push_back(id);
120
+ return eval_tokens(mymodel, tokens);
121
+ }
122
+
123
+ bool eval_string(struct MyModel * mymodel,const char* str){
124
+ llama_context * ctx = mymodel->ctx;
125
+ std::string str2 = str;
126
+ std::vector<llama_token> embd_inp = ::llama_tokenize(ctx, str2, true);
127
+ eval_tokens(mymodel, embd_inp);
128
+ return true;
129
+ }
130
+
131
+ llama_token sampling_id(struct MyModel* mymodel) {
132
+ llama_context* ctx = mymodel->ctx;
133
+ gpt_params params = mymodel->params;
134
+ // int n_ctx = llama_n_ctx(ctx);
135
+
136
+ // out of user input, sample next token
137
+ const float temp = params.temp;
138
+ const int32_t top_k = params.top_k <= 0 ? llama_n_vocab(ctx) : params.top_k;
139
+ const float top_p = params.top_p;
140
+ const float tfs_z = params.tfs_z;
141
+ const float typical_p = params.typical_p;
142
+ // const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
143
+ // const float repeat_penalty = params.repeat_penalty;
144
+ // const float alpha_presence = params.presence_penalty;
145
+ // const float alpha_frequency = params.frequency_penalty;
146
+ const int mirostat = params.mirostat;
147
+ const float mirostat_tau = params.mirostat_tau;
148
+ const float mirostat_eta = params.mirostat_eta;
149
+ // const bool penalize_nl = params.penalize_nl;
150
+
151
+ llama_token id = 0;
152
+ {
153
+ auto logits = llama_get_logits(ctx);
154
+ auto n_vocab = llama_n_vocab(ctx);
155
+
156
+ // Apply params.logit_bias map
157
+ for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
158
+ logits[it->first] += it->second;
159
+ }
160
+
161
+ std::vector<llama_token_data> candidates;
162
+ candidates.reserve(n_vocab);
163
+ for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
164
+ candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
165
+ }
166
+
167
+ llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
168
+
169
+ // TODO: Apply penalties
170
+ // float nl_logit = logits[llama_token_nl()];
171
+ // auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
172
+ // llama_sample_repetition_penalty(ctx, &candidates_p,
173
+ // last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
174
+ // last_n_repeat, repeat_penalty);
175
+ // llama_sample_frequency_and_presence_penalties(ctx, &candidates_p,
176
+ // last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
177
+ // last_n_repeat, alpha_frequency, alpha_presence);
178
+ // if (!penalize_nl) {
179
+ // logits[llama_token_nl()] = nl_logit;
180
+ // }
181
+
182
+ if (temp <= 0) {
183
+ // Greedy sampling
184
+ id = llama_sample_token_greedy(ctx, &candidates_p);
185
+ } else {
186
+ if (mirostat == 1) {
187
+ static float mirostat_mu = 2.0f * mirostat_tau;
188
+ const int mirostat_m = 100;
189
+ llama_sample_temperature(ctx, &candidates_p, temp);
190
+ id = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
191
+ } else if (mirostat == 2) {
192
+ static float mirostat_mu = 2.0f * mirostat_tau;
193
+ llama_sample_temperature(ctx, &candidates_p, temp);
194
+ id = llama_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
195
+ } else {
196
+ // Temperature sampling
197
+ llama_sample_top_k(ctx, &candidates_p, top_k, 1);
198
+ llama_sample_tail_free(ctx, &candidates_p, tfs_z, 1);
199
+ llama_sample_typical(ctx, &candidates_p, typical_p, 1);
200
+ llama_sample_top_p(ctx, &candidates_p, top_p, 1);
201
+ llama_sample_temperature(ctx, &candidates_p, temp);
202
+ id = llama_sample_token(ctx, &candidates_p);
203
+ }
204
+ }
205
+ }
206
+
207
+ return id;
208
+ }
209
+
210
+ const char * sampling(struct MyModel * mymodel) {
211
+ llama_context * ctx = mymodel->ctx;
212
+ int id = sampling_id(mymodel);
213
+ static std::string ret;
214
+ if (id == llama_token_eos()) {
215
+ ret = "</s>";
216
+ } else {
217
+ ret = llama_token_to_str(ctx, id);
218
+ }
219
+ eval_id(mymodel, id);
220
+ return ret.c_str();
221
+ }
222
+
223
+ }
examples/embd-input/embd-input-test.cpp ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "embd-input.h"
2
+ #include <stdlib.h>
3
+ #include <random>
4
+ #include <string.h>
5
+
6
+ int main(int argc, char** argv) {
7
+
8
+ auto mymodel = create_mymodel(argc, argv);
9
+ int N = 10;
10
+ int max_tgt_len = 500;
11
+ int n_embd = llama_n_embd(mymodel->ctx);
12
+
13
+ // add random float embd to test evaluation
14
+ float * data = new float[N*n_embd];
15
+ std::default_random_engine e;
16
+ std::uniform_real_distribution<float> u(0,1);
17
+ for (int i=0;i<N*n_embd;i++) {
18
+ data[i] = u(e);
19
+ }
20
+
21
+ eval_string(mymodel, "user: what is the color of the flag of UN?");
22
+ eval_float(mymodel, data, N);
23
+ eval_string(mymodel, "assistant:");
24
+ eval_string(mymodel, mymodel->params.prompt.c_str());
25
+ const char* tmp;
26
+ for (int i=0; i<max_tgt_len; i++) {
27
+ tmp = sampling(mymodel);
28
+ if (strcmp(tmp, "</s>")==0) break;
29
+ printf("%s", tmp);
30
+ fflush(stdout);
31
+ }
32
+ printf("\n");
33
+ free_mymodel(mymodel);
34
+ return 0;
35
+ }
examples/embd-input/embd-input.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef _EMBD_INPUT_H_
2
+ #define _EMBD_INPUT_H_ 1
3
+
4
+ #include "common.h"
5
+ #include "llama.h"
6
+ #include "build-info.h"
7
+
8
+ extern "C" {
9
+
10
+ typedef struct MyModel {
11
+ llama_context* ctx;
12
+ gpt_params params;
13
+ int n_past = 0;
14
+ } MyModel;
15
+
16
+ struct MyModel* create_mymodel(int argc, char ** argv);
17
+
18
+ bool eval_float(void* model, float* input, int N);
19
+ bool eval_tokens(void* model, std::vector<llama_token> tokens);
20
+ bool eval_id(struct MyModel* mymodel, int id);
21
+ bool eval_string(struct MyModel* mymodel, const char* str);
22
+ const char * sampling(struct MyModel* mymodel);
23
+ llama_token sampling_id(struct MyModel* mymodel);
24
+ void free_mymodel(struct MyModel* mymodel);
25
+
26
+ }
27
+
28
+ #endif
examples/embd-input/embd_input.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ from ctypes import cdll, c_char_p, c_void_p, POINTER, c_float, c_int
3
+ import numpy as np
4
+ import os
5
+
6
+ libc = cdll.LoadLibrary("./libembdinput.so")
7
+ libc.sampling.restype=c_char_p
8
+ libc.create_mymodel.restype=c_void_p
9
+ libc.eval_string.argtypes=[c_void_p, c_char_p]
10
+ libc.sampling.argtypes=[c_void_p]
11
+ libc.eval_float.argtypes=[c_void_p, POINTER(c_float), c_int]
12
+
13
+
14
+ class MyModel:
15
+ def __init__(self, args):
16
+ argc = len(args)
17
+ c_str = [c_char_p(i.encode()) for i in args]
18
+ args_c = (c_char_p * argc)(*c_str)
19
+ self.model = c_void_p(libc.create_mymodel(argc, args_c))
20
+ self.max_tgt_len = 512
21
+ self.print_string_eval = True
22
+
23
+ def __del__(self):
24
+ libc.free_mymodel(self.model)
25
+
26
+ def eval_float(self, x):
27
+ libc.eval_float(self.model, x.astype(np.float32).ctypes.data_as(POINTER(c_float)), x.shape[1])
28
+
29
+ def eval_string(self, x):
30
+ libc.eval_string(self.model, x.encode()) # c_char_p(x.encode()))
31
+ if self.print_string_eval:
32
+ print(x)
33
+
34
+ def eval_token(self, x):
35
+ libc.eval_id(self.model, x)
36
+
37
+ def sampling(self):
38
+ s = libc.sampling(self.model)
39
+ return s
40
+
41
+ def stream_generate(self, end="</s>"):
42
+ ret = b""
43
+ end = end.encode()
44
+ for _ in range(self.max_tgt_len):
45
+ tmp = self.sampling()
46
+ ret += tmp
47
+ yield tmp
48
+ if ret.endswith(end):
49
+ break
50
+
51
+ def generate_with_print(self, end="</s>"):
52
+ ret = b""
53
+ for i in self.stream_generate(end=end):
54
+ ret += i
55
+ print(i.decode(errors="replace"), end="", flush=True)
56
+ print("")
57
+ return ret.decode(errors="replace")
58
+
59
+
60
+ def generate(self, end="</s>"):
61
+ text = b"".join(self.stream_generate(end=end))
62
+ return text.decode(errors="replace")
63
+
64
+ if __name__ == "__main__":
65
+ model = MyModel(["main", "--model", "../llama.cpp/models/ggml-vic13b-q4_1.bin", "-c", "2048"])
66
+ model.eval_string("""user: what is the color of the flag of UN?""")
67
+ x = np.random.random((5120,10))# , dtype=np.float32)
68
+ model.eval_float(x)
69
+ model.eval_string("""assistant:""")
70
+ for i in model.generate():
71
+ print(i.decode(errors="replace"), end="", flush=True)
examples/embd-input/llava.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ sys.path.insert(0, os.path.dirname(__file__))
4
+ from embd_input import MyModel
5
+ import numpy as np
6
+ from torch import nn
7
+ import torch
8
+ from transformers import CLIPVisionModel, CLIPImageProcessor
9
+ from PIL import Image
10
+
11
+ # model parameters from 'liuhaotian/LLaVA-13b-delta-v1-1'
12
+ vision_tower = "openai/clip-vit-large-patch14"
13
+ select_hidden_state_layer = -2
14
+ # (vision_config.image_size // vision_config.patch_size) ** 2
15
+ image_token_len = (224//14)**2
16
+
17
+ class Llava:
18
+ def __init__(self, args):
19
+ self.image_processor = CLIPImageProcessor.from_pretrained(vision_tower)
20
+ self.vision_tower = CLIPVisionModel.from_pretrained(vision_tower)
21
+ self.mm_projector = nn.Linear(1024, 5120)
22
+ self.model = MyModel(["main", *args])
23
+
24
+ def load_projection(self, path):
25
+ state = torch.load(path)
26
+ self.mm_projector.load_state_dict({
27
+ "weight": state["model.mm_projector.weight"],
28
+ "bias": state["model.mm_projector.bias"]})
29
+
30
+ def chat(self, question):
31
+ self.model.eval_string("user: ")
32
+ self.model.eval_string(question)
33
+ self.model.eval_string("\nassistant: ")
34
+ return self.model.generate_with_print()
35
+
36
+ def chat_with_image(self, image, question):
37
+ with torch.no_grad():
38
+ embd_image = self.image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
39
+ image_forward_out = self.vision_tower(embd_image.unsqueeze(0), output_hidden_states=True)
40
+ select_hidden_state = image_forward_out.hidden_states[select_hidden_state_layer]
41
+ image_feature = select_hidden_state[:, 1:]
42
+ embd_image = self.mm_projector(image_feature)
43
+ embd_image = embd_image.cpu().numpy()[0]
44
+ self.model.eval_string("user: ")
45
+ self.model.eval_token(32003-2) # im_start
46
+ self.model.eval_float(embd_image.T)
47
+ for i in range(image_token_len-embd_image.shape[0]):
48
+ self.model.eval_token(32003-3) # im_patch
49
+ self.model.eval_token(32003-1) # im_end
50
+ self.model.eval_string(question)
51
+ self.model.eval_string("\nassistant: ")
52
+ return self.model.generate_with_print()
53
+
54
+
55
+ if __name__=="__main__":
56
+ # model form liuhaotian/LLaVA-13b-delta-v1-1
57
+ a = Llava(["--model", "./models/ggml-llava-13b-v1.1.bin", "-c", "2048"])
58
+ # Extract from https://huggingface.co/liuhaotian/LLaVA-13b-delta-v1-1/blob/main/pytorch_model-00003-of-00003.bin.
59
+ # Also here can use pytorch_model-00003-of-00003.bin directly.
60
+ a.load_projection(os.path.join(
61
+ os.path.dirname(__file__) ,
62
+ "llava_projetion.pth"))
63
+ respose = a.chat_with_image(
64
+ Image.open("./media/llama1-logo.png").convert('RGB'),
65
+ "what is the text in the picture?")
66
+ respose
67
+ a.chat("what is the color of it?")
68
+
69
+
70
+
examples/embd-input/minigpt4.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ sys.path.insert(0, os.path.dirname(__file__))
4
+ from embd_input import MyModel
5
+ import numpy as np
6
+ from torch import nn
7
+ import torch
8
+ from PIL import Image
9
+
10
+ minigpt4_path = os.path.join(os.path.dirname(__file__), "MiniGPT-4")
11
+ sys.path.insert(0, minigpt4_path)
12
+ from minigpt4.models.blip2 import Blip2Base
13
+ from minigpt4.processors.blip_processors import Blip2ImageEvalProcessor
14
+
15
+
16
+ class MiniGPT4(Blip2Base):
17
+ """
18
+ MiniGPT4 model from https://github.com/Vision-CAIR/MiniGPT-4
19
+ """
20
+ def __init__(self,
21
+ args,
22
+ vit_model="eva_clip_g",
23
+ q_former_model="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth",
24
+ img_size=224,
25
+ drop_path_rate=0,
26
+ use_grad_checkpoint=False,
27
+ vit_precision="fp32",
28
+ freeze_vit=True,
29
+ freeze_qformer=True,
30
+ num_query_token=32,
31
+ llama_model="",
32
+ prompt_path="",
33
+ prompt_template="",
34
+ max_txt_len=32,
35
+ end_sym='\n',
36
+ low_resource=False, # use 8 bit and put vit in cpu
37
+ device_8bit=0
38
+ ):
39
+ super().__init__()
40
+ self.img_size = img_size
41
+ self.low_resource = low_resource
42
+ self.preprocessor = Blip2ImageEvalProcessor(img_size)
43
+
44
+ print('Loading VIT')
45
+ self.visual_encoder, self.ln_vision = self.init_vision_encoder(
46
+ vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision
47
+ )
48
+ print('Loading VIT Done')
49
+ print('Loading Q-Former')
50
+ self.Qformer, self.query_tokens = self.init_Qformer(
51
+ num_query_token, self.visual_encoder.num_features
52
+ )
53
+ self.Qformer.cls = None
54
+ self.Qformer.bert.embeddings.word_embeddings = None
55
+ self.Qformer.bert.embeddings.position_embeddings = None
56
+ for layer in self.Qformer.bert.encoder.layer:
57
+ layer.output = None
58
+ layer.intermediate = None
59
+ self.load_from_pretrained(url_or_filename=q_former_model)
60
+ print('Loading Q-Former Done')
61
+ self.llama_proj = nn.Linear(
62
+ self.Qformer.config.hidden_size, 5120 # self.llama_model.config.hidden_size
63
+ )
64
+ self.max_txt_len = max_txt_len
65
+ self.end_sym = end_sym
66
+ self.model = MyModel(["main", *args])
67
+ # system promt
68
+ self.model.eval_string("Give the following image: <Img>ImageContent</Img>. "
69
+ "You will be able to see the image once I provide it to you. Please answer my questions."
70
+ "###")
71
+
72
+ def encode_img(self, image):
73
+ image = self.preprocessor(image)
74
+ image = image.unsqueeze(0)
75
+ device = image.device
76
+ if self.low_resource:
77
+ self.vit_to_cpu()
78
+ image = image.to("cpu")
79
+
80
+ with self.maybe_autocast():
81
+ image_embeds = self.ln_vision(self.visual_encoder(image)).to(device)
82
+ image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(device)
83
+
84
+ query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
85
+ query_output = self.Qformer.bert(
86
+ query_embeds=query_tokens,
87
+ encoder_hidden_states=image_embeds,
88
+ encoder_attention_mask=image_atts,
89
+ return_dict=True,
90
+ )
91
+
92
+ inputs_llama = self.llama_proj(query_output.last_hidden_state)
93
+ # atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(image.device)
94
+ return inputs_llama
95
+
96
+ def load_projection(self, path):
97
+ state = torch.load(path)["model"]
98
+ self.llama_proj.load_state_dict({
99
+ "weight": state["llama_proj.weight"],
100
+ "bias": state["llama_proj.bias"]})
101
+
102
+ def chat(self, question):
103
+ self.model.eval_string("Human: ")
104
+ self.model.eval_string(question)
105
+ self.model.eval_string("\n### Assistant:")
106
+ return self.model.generate_with_print(end="###")
107
+
108
+ def chat_with_image(self, image, question):
109
+ with torch.no_grad():
110
+ embd_image = self.encode_img(image)
111
+ embd_image = embd_image.cpu().numpy()[0]
112
+ self.model.eval_string("Human: <Img>")
113
+ self.model.eval_float(embd_image.T)
114
+ self.model.eval_string("</Img> ")
115
+ self.model.eval_string(question)
116
+ self.model.eval_string("\n### Assistant:")
117
+ return self.model.generate_with_print(end="###")
118
+
119
+
120
+ if __name__=="__main__":
121
+ a = MiniGPT4(["--model", "./models/ggml-vicuna-13b-v0-q4_1.bin", "-c", "2048"])
122
+ a.load_projection(os.path.join(
123
+ os.path.dirname(__file__) ,
124
+ "pretrained_minigpt4.pth"))
125
+ respose = a.chat_with_image(
126
+ Image.open("./media/llama1-logo.png").convert('RGB'),
127
+ "what is the text in the picture?")
128
+ a.chat("what is the color of it?")
examples/embd-input/panda_gpt.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ sys.path.insert(0, os.path.dirname(__file__))
4
+ from embd_input import MyModel
5
+ import numpy as np
6
+ from torch import nn
7
+ import torch
8
+
9
+ # use PandaGPT path
10
+ panda_gpt_path = os.path.join(os.path.dirname(__file__), "PandaGPT")
11
+ imagebind_ckpt_path = "./models/panda_gpt/"
12
+
13
+ sys.path.insert(0, os.path.join(panda_gpt_path,"code","model"))
14
+ from ImageBind.models import imagebind_model
15
+ from ImageBind import data
16
+
17
+ ModalityType = imagebind_model.ModalityType
18
+ max_tgt_len = 400
19
+
20
+ class PandaGPT:
21
+ def __init__(self, args):
22
+ self.visual_encoder,_ = imagebind_model.imagebind_huge(pretrained=True, store_path=imagebind_ckpt_path)
23
+ self.visual_encoder.eval()
24
+ self.llama_proj = nn.Linear(1024, 5120) # self.visual_hidden_size, 5120)
25
+ self.max_tgt_len = max_tgt_len
26
+ self.model = MyModel(["main", *args])
27
+ self.generated_text = ""
28
+ self.device = "cpu"
29
+
30
+ def load_projection(self, path):
31
+ state = torch.load(path, map_location="cpu")
32
+ self.llama_proj.load_state_dict({
33
+ "weight": state["llama_proj.weight"],
34
+ "bias": state["llama_proj.bias"]})
35
+
36
+ def eval_inputs(self, inputs):
37
+ self.model.eval_string("<Img>")
38
+ embds = self.extract_multimoal_feature(inputs)
39
+ for i in embds:
40
+ self.model.eval_float(i.T)
41
+ self.model.eval_string("</Img> ")
42
+
43
+ def chat(self, question):
44
+ return self.chat_with_image(None, question)
45
+
46
+ def chat_with_image(self, inputs, question):
47
+ if self.generated_text == "":
48
+ self.model.eval_string("###")
49
+ self.model.eval_string(" Human: ")
50
+ if inputs:
51
+ self.eval_inputs(inputs)
52
+ self.model.eval_string(question)
53
+ self.model.eval_string("\n### Assistant:")
54
+ ret = self.model.generate_with_print(end="###")
55
+ self.generated_text += ret
56
+ return ret
57
+
58
+ def extract_multimoal_feature(self, inputs):
59
+ features = []
60
+ for key in ["image", "audio", "video", "thermal"]:
61
+ if key + "_paths" in inputs:
62
+ embeds = self.encode_data(key, inputs[key+"_paths"])
63
+ features.append(embeds)
64
+ return features
65
+
66
+ def encode_data(self, data_type, data_paths):
67
+
68
+ type_map = {
69
+ "image": ModalityType.VISION,
70
+ "audio": ModalityType.AUDIO,
71
+ "video": ModalityType.VISION,
72
+ "thermal": ModalityType.THERMAL,
73
+ }
74
+ load_map = {
75
+ "image": data.load_and_transform_vision_data,
76
+ "audio": data.load_and_transform_audio_data,
77
+ "video": data.load_and_transform_video_data,
78
+ "thermal": data.load_and_transform_thermal_data
79
+ }
80
+
81
+ load_function = load_map[data_type]
82
+ key = type_map[data_type]
83
+
84
+ inputs = {key: load_function(data_paths, self.device)}
85
+ with torch.no_grad():
86
+ embeddings = self.visual_encoder(inputs)
87
+ embeds = embeddings[key]
88
+ embeds = self.llama_proj(embeds).cpu().numpy()
89
+ return embeds
90
+
91
+
92
+ if __name__=="__main__":
93
+ a = PandaGPT(["--model", "./models/ggml-vicuna-13b-v0-q4_1.bin", "-c", "2048", "--lora", "./models/panda_gpt/ggml-adapter-model.bin","--temp", "0"])
94
+ a.load_projection("./models/panda_gpt/adapter_model.bin")
95
+ a.chat_with_image(
96
+ {"image_paths": ["./media/llama1-logo.png"]},
97
+ "what is the text in the picture? 'llama' or 'lambda'?")
98
+ a.chat("what is the color of it?")