.. |
baby-llama
|
|
|
benchmark
|
|
|
convert-llama2c-to-ggml
|
|
|
embd-input
|
|
|
embedding
|
|
|
gguf
|
|
|
gptneox-wip
|
|
|
jeopardy
|
|
|
llama-bench
|
|
|
main
|
|
|
metal
|
|
|
perplexity
|
|
|
quantize
|
|
|
quantize-stats
|
|
|
save-load-state
|
|
|
server
|
|
|
simple
|
|
|
train-text-from-scratch
|
|
|
alpaca.sh
|
|
|
chat-13B.bat
|
|
|
chat-13B.sh
|
|
|
chat-persistent.sh
|
|
|
chat-vicuna.sh
|
|
|
chat.sh
|
|
|
CMakeLists.txt
|
|
|
gpt4all.sh
|
|
|
json-schema-to-grammar.py
|
|
|
llama.vim
|
|
|
llama2-13b.sh
|
|
|
llama2.sh
|
|
|
llm.vim
|
|
|
make-ggml.py
|
|
|
Miku.sh
|
|
|
reason-act.sh
|
|
|
server-llama2-13B.sh
|
|
|