mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 01:24:36 +00:00
Merge branch 'upstream' into concedo_experimental
# Conflicts: # .gitignore # README.md # docs/backend/BLIS.md # docs/backend/SYCL.md # docs/development/llama-star/idea-arch.key # docs/development/llama-star/idea-arch.pdf # docs/development/token_generation_performance_tips.md # src/llama.cpp # tests/test-tokenizer-0.cpp # tests/test-tokenizer-1-bpe.cpp # tests/test-tokenizer-1-spm.cpp # tests/test-tokenizer-random.py
This commit is contained in:
commit
8e5fd6f509
28 changed files with 352 additions and 2091 deletions
782
AUTHORS
782
AUTHORS
|
@ -1,782 +0,0 @@
|
||||||
# date: Wed Jun 26 19:36:34 EEST 2024
|
|
||||||
# this file is auto-generated by scripts/gen-authors.sh
|
|
||||||
|
|
||||||
0cc4m <picard12@live.de>
|
|
||||||
0xspringtime <110655352+0xspringtime@users.noreply.github.com>
|
|
||||||
20kdc <asdd2808@gmail.com>
|
|
||||||
2f38b454 <dxf@protonmail.com>
|
|
||||||
3ooabkhxtn <31479382+3ooabkhxtn@users.noreply.github.com>
|
|
||||||
44670 <44670@users.noreply.github.com>
|
|
||||||
AN Long <aisk@users.noreply.github.com>
|
|
||||||
AT <manyoso@users.noreply.github.com>
|
|
||||||
Aarni Koskela <akx@iki.fi>
|
|
||||||
Aaron Miller <apage43@ninjawhale.com>
|
|
||||||
Aaryaman Vasishta <aaryaman.vasishta@amd.com>
|
|
||||||
Abheek Gulati <abheekg@hotmail.com>
|
|
||||||
Abhilash Majumder <30946547+abhilash1910@users.noreply.github.com>
|
|
||||||
Abhishek Gopinath K <31348521+overtunned@users.noreply.github.com>
|
|
||||||
Adithya Balaji <adithya.b94@gmail.com>
|
|
||||||
AdithyanI <adithyan.i4internet@gmail.com>
|
|
||||||
Adrian <smith.adriane@gmail.com>
|
|
||||||
Adrian Hesketh <a-h@users.noreply.github.com>
|
|
||||||
Ahmet Zeer <ahmed.zeer@std.yildiz.edu.tr>
|
|
||||||
AidanBeltonS <87009434+AidanBeltonS@users.noreply.github.com>
|
|
||||||
Aisuko <urakiny@gmail.com>
|
|
||||||
Akarshan Biswas <akarshanbiswas@fedoraproject.org>
|
|
||||||
Albert Jin <albert.jin@gmail.com>
|
|
||||||
Alberto <57916483+albbus-stack@users.noreply.github.com>
|
|
||||||
Alex <awhill19@icloud.com>
|
|
||||||
Alex Azarov <alex@azarov.by>
|
|
||||||
Alex Azarov <alexander.azarov@mapbox.com>
|
|
||||||
Alex Klinkhamer <from.github.com.917@grencez.dev>
|
|
||||||
Alex Klinkhamer <git@grencez.dev>
|
|
||||||
Alex Nguyen <tiendung@users.noreply.github.com>
|
|
||||||
Alex Petenchea <alex.petenchea@gmail.com>
|
|
||||||
Alex Renda <alexrenda@users.noreply.github.com>
|
|
||||||
Alex von Gluck IV <kallisti5@unixzen.com>
|
|
||||||
Alexey Parfenov <zxed@alkatrazstudio.net>
|
|
||||||
Ali Chraghi <63465728+alichraghi@users.noreply.github.com>
|
|
||||||
Ali Nehzat <ali.nehzat@thanks.dev>
|
|
||||||
Ali Tariq <ali.tariq@10xengineers.ai>
|
|
||||||
Alon <alonfaraj@gmail.com>
|
|
||||||
AlpinDale <52078762+AlpinDale@users.noreply.github.com>
|
|
||||||
Amir <amir_zia@outlook.com>
|
|
||||||
AmirAli Mirian <37371367+amiralimi@users.noreply.github.com>
|
|
||||||
Ananta Bastola <anantarajbastola@gmail.com>
|
|
||||||
Anas Ahouzi <112881240+aahouzi@users.noreply.github.com>
|
|
||||||
András Salamon <ott2@users.noreply.github.com>
|
|
||||||
Andrei <abetlen@gmail.com>
|
|
||||||
Andrew Canis <andrew.canis@gmail.com>
|
|
||||||
Andrew Downing <andrew2085@gmail.com>
|
|
||||||
Andrew Duffy <a10y@users.noreply.github.com>
|
|
||||||
Andrew Godfrey <AndrewGodfrey@users.noreply.github.com>
|
|
||||||
Andy Tai <andy-tai@users.noreply.github.com>
|
|
||||||
Arik Poznanski <arikpoz@users.noreply.github.com>
|
|
||||||
Artem <guinmoon@gmail.com>
|
|
||||||
Artem Zinnatullin <ceo@abstractny.gay>
|
|
||||||
Artyom Lebedev <vagran.ast@gmail.com>
|
|
||||||
Asbjørn Olling <asbjornolling@gmail.com>
|
|
||||||
Ásgeir Bjarni Ingvarsson <asgeir@fundinn.org>
|
|
||||||
Ashish <1856117+ashishdatta@users.noreply.github.com>
|
|
||||||
Ashok Gelal <401055+ashokgelal@users.noreply.github.com>
|
|
||||||
Ashraful Islam <ashraful.meche@gmail.com>
|
|
||||||
Atsushi Tatsuma <yoshoku@outlook.com>
|
|
||||||
Austin <77757836+teleprint-me@users.noreply.github.com>
|
|
||||||
AustinMroz <austinmroz@utexas.edu>
|
|
||||||
BADR <contact@pythops.com>
|
|
||||||
Bach Le <bach@bullno1.com>
|
|
||||||
Bailey Chittle <39804642+bachittle@users.noreply.github.com>
|
|
||||||
BarfingLemurs <128182951+BarfingLemurs@users.noreply.github.com>
|
|
||||||
Bartowski <ckealty1182@gmail.com>
|
|
||||||
Behnam M <58621210+ibehnam@users.noreply.github.com>
|
|
||||||
Ben Ashbaugh <ben.ashbaugh@intel.com>
|
|
||||||
Ben Garney <bengarney@users.noreply.github.com>
|
|
||||||
Ben Siraphob <bensiraphob@gmail.com>
|
|
||||||
Ben Williams <ben@719ben.com>
|
|
||||||
Benjamin Findley <39356821+Kartoffelsaft@users.noreply.github.com>
|
|
||||||
Benjamin Lecaillon <84293038+blecaillon@users.noreply.github.com>
|
|
||||||
Bernat Vadell <hounter.caza@gmail.com>
|
|
||||||
Bingan <70050083+binganao@users.noreply.github.com>
|
|
||||||
Bodo Graumann <mail@bodograumann.de>
|
|
||||||
Bono Lv <lvscar@users.noreply.github.com>
|
|
||||||
Borislav Stanimirov <b.stanimirov@abv.bg>
|
|
||||||
Branden Butler <bwtbutler@hotmail.com>
|
|
||||||
Brian <mofosyne@gmail.com>
|
|
||||||
Bruce MacDonald <brucewmacdonald@gmail.com>
|
|
||||||
Bryan Honof <bryanhonof@gmail.com>
|
|
||||||
CJ Pais <cj@cjpais.com>
|
|
||||||
CRD716 <crd716@gmail.com>
|
|
||||||
Calvin Laurenson <calvin@laurenson.dev>
|
|
||||||
Cameron <csteele@steelecameron.com>
|
|
||||||
Cameron Kaiser <classilla@users.noreply.github.com>
|
|
||||||
Carolinabanana <140120812+Carolinabanana@users.noreply.github.com>
|
|
||||||
Casey Primozic <casey@cprimozic.net>
|
|
||||||
Casey Primozic <me@ameo.link>
|
|
||||||
CausalLM <148736309+CausalLM@users.noreply.github.com>
|
|
||||||
Cebtenzzre <cebtenzzre@gmail.com>
|
|
||||||
Chad Brewbaker <crb002@gmail.com>
|
|
||||||
Chao Jiang <jc19chaoj@zoho.com>
|
|
||||||
Cheng Shao <terrorjack@type.dance>
|
|
||||||
Chris Elrod <elrodc@gmail.com>
|
|
||||||
Chris Kuehl <ckuehl@ckuehl.me>
|
|
||||||
Christian Demsar <christian@github.email.demsar.us>
|
|
||||||
Christian Demsar <crasm@git.vczf.us>
|
|
||||||
Christian Falch <875252+chrfalch@users.noreply.github.com>
|
|
||||||
Christian Kögler <ck3d@gmx.de>
|
|
||||||
Christian Zhou-Zheng <59622928+christianazinn@users.noreply.github.com>
|
|
||||||
Clark Saben <76020733+csaben@users.noreply.github.com>
|
|
||||||
Clint Herron <hanclinto@gmail.com>
|
|
||||||
CrispStrobe <154636388+CrispStrobe@users.noreply.github.com>
|
|
||||||
Cuong Trinh Manh <nguoithichkhampha@gmail.com>
|
|
||||||
DAN™ <dranger003@gmail.com>
|
|
||||||
Damian Stewart <d@damianstewart.com>
|
|
||||||
Dane Madsen <dane_madsen@hotmail.com>
|
|
||||||
DaniAndTheWeb <57776841+DaniAndTheWeb@users.noreply.github.com>
|
|
||||||
Daniel Bevenius <daniel.bevenius@gmail.com>
|
|
||||||
Daniel Drake <drake@endlessos.org>
|
|
||||||
Daniel Hiltgen <dhiltgen@users.noreply.github.com>
|
|
||||||
Daniel Illescas Romero <illescas.daniel@protonmail.com>
|
|
||||||
Daniele <57776841+daniandtheweb@users.noreply.github.com>
|
|
||||||
DannyDaemonic <DannyDaemonic@gmail.com>
|
|
||||||
Dat Quoc Nguyen <2412555+datquocnguyen@users.noreply.github.com>
|
|
||||||
Dave <dave-fl@users.noreply.github.com>
|
|
||||||
Dave Airlie <airlied@gmail.com>
|
|
||||||
Dave Airlie <airlied@redhat.com>
|
|
||||||
Dave Della Costa <ddellacosta+github@gmail.com>
|
|
||||||
David Friehs <david@friehs.info>
|
|
||||||
David Kennedy <dakennedyd@gmail.com>
|
|
||||||
David Pflug <david@pflug.email>
|
|
||||||
David Renshaw <dwrenshaw@gmail.com>
|
|
||||||
David Sommers <12738+databyte@users.noreply.github.com>
|
|
||||||
David Yang <davidyang6us@gmail.com>
|
|
||||||
Dawid Potocki <github@dawidpotocki.com>
|
|
||||||
Dawid Wysocki <62249621+TortillaZHawaii@users.noreply.github.com>
|
|
||||||
Dean <Dean.Sinaean@gmail.com>
|
|
||||||
Deins <deinsegle@gmail.com>
|
|
||||||
Deven Mistry <31466137+deven367@users.noreply.github.com>
|
|
||||||
Didzis Gosko <didzis@users.noreply.github.com>
|
|
||||||
Djip007 <djip.perois@free.fr>
|
|
||||||
Don Mahurin <dmahurin@users.noreply.github.com>
|
|
||||||
DooWoong Lee (David) <manics99@naver.com>
|
|
||||||
Doomsdayrs <38189170+Doomsdayrs@users.noreply.github.com>
|
|
||||||
Douglas Hanley <thesecretaryofwar@gmail.com>
|
|
||||||
Dr. Tom Murphy VII Ph.D <499244+tom7@users.noreply.github.com>
|
|
||||||
Ebey Abraham <ebey97@gmail.com>
|
|
||||||
Ed Lee <edilee@mozilla.com>
|
|
||||||
Ed Lepedus <ed.lepedus@googlemail.com>
|
|
||||||
Eddie-Wang <wangjinheng1120@163.com>
|
|
||||||
Edward Taylor <edeetee@gmail.com>
|
|
||||||
Elaine <elaine.zosa@gmail.com>
|
|
||||||
Elbios <141279586+Elbios@users.noreply.github.com>
|
|
||||||
Elton Kola <eltonkola@gmail.com>
|
|
||||||
Engininja2 <139037756+Engininja2@users.noreply.github.com>
|
|
||||||
Equim <sayaka@ekyu.moe>
|
|
||||||
Eric Sommerlade <es0m@users.noreply.github.com>
|
|
||||||
Eric Zhang <34133756+EZForever@users.noreply.github.com>
|
|
||||||
Erik Garrison <erik.garrison@gmail.com>
|
|
||||||
Erik Scholz <Green-Sky@users.noreply.github.com>
|
|
||||||
Ettore Di Giacinto <mudler@users.noreply.github.com>
|
|
||||||
Evan Jones <evan.q.jones@gmail.com>
|
|
||||||
Evan Miller <emmiller@gmail.com>
|
|
||||||
Eve <139727413+netrunnereve@users.noreply.github.com>
|
|
||||||
Evgeny Kurnevsky <kurnevsky@gmail.com>
|
|
||||||
Ewout ter Hoeven <E.M.terHoeven@student.tudelft.nl>
|
|
||||||
ExtReMLapin <3909752+ExtReMLapin@users.noreply.github.com>
|
|
||||||
FK <sozforex@gmail.com>
|
|
||||||
Fabian <cmdrf@users.noreply.github.com>
|
|
||||||
Fabio R. Sluzala <Fabio3rs@users.noreply.github.com>
|
|
||||||
Faez Shakil <faez.shakil@gmail.com>
|
|
||||||
FantasyGmm <16450052+FantasyGmm@users.noreply.github.com>
|
|
||||||
Fattire <528174+fat-tire@users.noreply.github.com>
|
|
||||||
Felix <stenbackfelix@gmail.com>
|
|
||||||
Finn Voorhees <finnvoorhees@gmail.com>
|
|
||||||
Firat <firatkiral@gmail.com>
|
|
||||||
Folko-Ven <71110216+Folko-Ven@users.noreply.github.com>
|
|
||||||
Foul-Tarnished <107711110+Foul-Tarnished@users.noreply.github.com>
|
|
||||||
Francisco Melo <43780565+francis2tm@users.noreply.github.com>
|
|
||||||
Frank Mai <thxcode0824@gmail.com>
|
|
||||||
FrankHB <frankhb1989@gmail.com>
|
|
||||||
Fred Douglas <43351173+fredlas@users.noreply.github.com>
|
|
||||||
Frederik Vogel <Schaltfehler@users.noreply.github.com>
|
|
||||||
Gabe Goodhart <gabe.l.hart@gmail.com>
|
|
||||||
GainLee <perfecter.gen@gmail.com>
|
|
||||||
Galunid <karolek1231456@gmail.com>
|
|
||||||
Gary Linscott <glinscott@gmail.com>
|
|
||||||
Gary Mulder <gjmulder@gmail.com>
|
|
||||||
Gavin Zhao <gavinzhaojw@protonmail.com>
|
|
||||||
Genkagaku.GPT <hlhr202@163.com>
|
|
||||||
Georgi Gerganov <ggerganov@gmail.com>
|
|
||||||
Gilad S <giladgd@users.noreply.github.com>
|
|
||||||
Giuseppe Scrivano <giuseppe@scrivano.org>
|
|
||||||
GiviMAD <GiviMAD@users.noreply.github.com>
|
|
||||||
Govlzkoy <gotope@users.noreply.github.com>
|
|
||||||
Guillaume "Vermeille" Sanchez <Guillaume.V.Sanchez@gmail.com>
|
|
||||||
Guillaume Wenzek <gwenzek@users.noreply.github.com>
|
|
||||||
Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
|
|
||||||
Gustavo Rocha Dias <91472747+gustrd@users.noreply.github.com>
|
|
||||||
Haggai Nuchi <h.nuchi@gmail.com>
|
|
||||||
Halalaluyafail3 <55773281+Halalaluyafail3@users.noreply.github.com>
|
|
||||||
Hamdoud Hakem <90524568+hamdoudhakem@users.noreply.github.com>
|
|
||||||
HanishKVC <hanishkvc@gmail.com>
|
|
||||||
Haohui Mai <ricetons@gmail.com>
|
|
||||||
Haoxiang Fei <tonyfettes@tonyfettes.com>
|
|
||||||
Harald Fernengel <harald.fernengel@here.com>
|
|
||||||
Hatsune Miku <129688334+at8u@users.noreply.github.com>
|
|
||||||
HatsuneMikuUwU33 <173229399+HatsuneMikuUwU33@users.noreply.github.com>
|
|
||||||
Henk Poley <HenkPoley@gmail.com>
|
|
||||||
Henri Vasserman <henv@hot.ee>
|
|
||||||
Henrik Forstén <henrik.forsten@gmail.com>
|
|
||||||
Herman Semenov <GermanAizek@yandex.ru>
|
|
||||||
Hesen Peng <hesen.peng@gmail.com>
|
|
||||||
Hoang Nguyen <hugo53@users.noreply.github.com>
|
|
||||||
Hong Bo PENG <penghb@cn.ibm.com>
|
|
||||||
Hongyu Ouyang <96765450+casavaca@users.noreply.github.com>
|
|
||||||
Howard Su <howard0su@gmail.com>
|
|
||||||
Hua Jiang <allenhjiang@outlook.com>
|
|
||||||
Huawei Lin <huaweilin.cs@gmail.com>
|
|
||||||
Hugo Roussel <hugo.rous@gmail.com>
|
|
||||||
Ian Bull <irbull@eclipsesource.com>
|
|
||||||
Ian Bull <irbull@gmail.com>
|
|
||||||
Ian Scrivener <github@zilogy.asia>
|
|
||||||
Ido S <ido.pluto@gmail.com>
|
|
||||||
IgnacioFDM <ignaciofdm@gmail.com>
|
|
||||||
Igor Okulist <okigan@gmail.com>
|
|
||||||
Ikko Eltociear Ashimine <eltociear@gmail.com>
|
|
||||||
Ilya Kurdyukov <59548320+ilyakurdyukov@users.noreply.github.com>
|
|
||||||
Ionoclast Laboratories <brigham@ionoclast.com>
|
|
||||||
Isaac McFadyen <isaac@imcf.me>
|
|
||||||
IsaacDynamo <61521674+IsaacDynamo@users.noreply.github.com>
|
|
||||||
Ivan Komarov <Ivan.Komarov@dfyz.info>
|
|
||||||
Ivan Stepanov <ivanstepanovftw@gmail.com>
|
|
||||||
JH23X <165871467+JH23X@users.noreply.github.com>
|
|
||||||
Jack Mousseau <jmousseau@users.noreply.github.com>
|
|
||||||
JackJollimore <130917767+JackJollimore@users.noreply.github.com>
|
|
||||||
Jaemin Son <woalsdnd@gmail.com>
|
|
||||||
Jag Chadha <jagtesh@gmail.com>
|
|
||||||
Jakub N <jakubniemczyk97@gmail.com>
|
|
||||||
James A Capozzoli <157492257+jac-jim@users.noreply.github.com>
|
|
||||||
James Reynolds <magnusviri@users.noreply.github.com>
|
|
||||||
Jan Boon <jan.boon@kaetemi.be>
|
|
||||||
Jan Boon <kaetemi@gmail.com>
|
|
||||||
Jan Ploski <jpl@plosquare.com>
|
|
||||||
Jannis Schönleber <joennlae@gmail.com>
|
|
||||||
Jared Van Bortel <cebtenzzre@gmail.com>
|
|
||||||
Jared Van Bortel <jared@nomic.ai>
|
|
||||||
Jason McCartney <jmac@theroot.org>
|
|
||||||
Jean-Christophe Hoelt <hoelt@fovea.cc>
|
|
||||||
Jean-Michaël Celerier <jeanmichael.celerier+github@gmail.com>
|
|
||||||
Jed Fox <git@jedfox.com>
|
|
||||||
Jeffrey Quesnelle <emozilla@nousresearch.com>
|
|
||||||
Jesse Jojo Johnson <williamsaintgeorge@gmail.com>
|
|
||||||
Jeximo <jeximo@gmail.com>
|
|
||||||
Jhen-Jie Hong <iainst0409@gmail.com>
|
|
||||||
Jiahao Li <liplus17@163.com>
|
|
||||||
Jian Liao <jianliao@users.noreply.github.com>
|
|
||||||
JidongZhang-THU <1119708529@qq.com>
|
|
||||||
Jinwoo Jeong <33892306+williamjeong2@users.noreply.github.com>
|
|
||||||
Jiří Podivín <66251151+jpodivin@users.noreply.github.com>
|
|
||||||
Jiří Sejkora <Sejseloid@gmail.com>
|
|
||||||
Joan Fontanals <jfontanalsmartinez@gmail.com>
|
|
||||||
Joan Fontanals <joan.fontanals.martinez@jina.ai>
|
|
||||||
Johan <JohanAR@users.noreply.github.com>
|
|
||||||
Johannes Gäßler <johannesg@5d6.de>
|
|
||||||
Johannes Rudolph <johannes.rudolph@gmail.com>
|
|
||||||
John <78893154+cmp-nct@users.noreply.github.com>
|
|
||||||
John Balis <phobossystems@gmail.com>
|
|
||||||
John Smith <67539080+kingsidelee@users.noreply.github.com>
|
|
||||||
JohnnyB <jboero@users.noreply.github.com>
|
|
||||||
Jonas Wunderlich <32615971+jonas-w@users.noreply.github.com>
|
|
||||||
Jorge A <161275481+jorgealias@users.noreply.github.com>
|
|
||||||
Jose Maldonado <63384398+yukiteruamano@users.noreply.github.com>
|
|
||||||
Joseph Stahl <1269177+josephst@users.noreply.github.com>
|
|
||||||
Josh Ramer <josh.ramer@icloud.com>
|
|
||||||
Joyce <joycebrum@google.com>
|
|
||||||
Juan Calderon-Perez <835733+gaby@users.noreply.github.com>
|
|
||||||
Judd <foldl@users.noreply.github.com>
|
|
||||||
Julius Arkenberg <arki05@users.noreply.github.com>
|
|
||||||
Jun Jie <71215065+junnjiee16@users.noreply.github.com>
|
|
||||||
Junyang Lin <justinlin930319@hotmail.com>
|
|
||||||
Juraj Bednar <juraj@bednar.io>
|
|
||||||
Justin Parker <jparkerweb@gmail.com>
|
|
||||||
Justin Suess <justin.suess@westpoint.edu>
|
|
||||||
Justina Cho <justcho5@gmail.com>
|
|
||||||
Justine Tunney <jtunney@gmail.com>
|
|
||||||
Justine Tunney <jtunney@mozilla.com>
|
|
||||||
Juuso Alasuutari <juuso.alasuutari@gmail.com>
|
|
||||||
KASR <karim.asrih@gmail.com>
|
|
||||||
Kamil Tomšík <info@tomsik.cz>
|
|
||||||
Karsten Weiss <knweiss@gmail.com>
|
|
||||||
Karthick <j.karthic2004@gmail.com>
|
|
||||||
Karthik Kumar Viswanathan <195178+guilt@users.noreply.github.com>
|
|
||||||
Karthik Sethuraman <k.seth1993@gmail.com>
|
|
||||||
Kasumi <90275229+kasumi-1@users.noreply.github.com>
|
|
||||||
Kawrakow <48489457+ikawrakow@users.noreply.github.com>
|
|
||||||
Keiichi Tabata <keiichi.tabata@outlook.com>
|
|
||||||
Kenvix ⭐ <kenvixzure@live.com>
|
|
||||||
Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com>
|
|
||||||
Kevin Gibbons <bakkot@gmail.com>
|
|
||||||
Kevin Ji <1146876+kevinji@users.noreply.github.com>
|
|
||||||
Kevin Kwok <antimatter15@gmail.com>
|
|
||||||
Kevin Lo <kevlo@kevlo.org>
|
|
||||||
Kolen Cheung <ickc@users.noreply.github.com>
|
|
||||||
Konstantin Herud <konstantin.herud@denkbares.com>
|
|
||||||
Konstantin Zhuravlyov <konstantin.zhuravlyov@amd.com>
|
|
||||||
Kunshang Ji <kunshang.ji@intel.com>
|
|
||||||
Kyle Liang <liangmanlai@gmail.com>
|
|
||||||
Kyle Mistele <kyle@mistele.com>
|
|
||||||
Kylin <56434533+KyL0N@users.noreply.github.com>
|
|
||||||
Lars Grammel <lars.grammel@gmail.com>
|
|
||||||
Laura <Tijntje_7@msn.com>
|
|
||||||
Lee <44310445+lx200916@users.noreply.github.com>
|
|
||||||
Lee Drake <b.lee.drake@gmail.com>
|
|
||||||
Leng Yue <lengyue@lengyue.me>
|
|
||||||
Leon Knauer <git@leonknauer.com>
|
|
||||||
LeonEricsson <70749762+LeonEricsson@users.noreply.github.com>
|
|
||||||
Leonardo Neumann <leonardo@neumann.dev.br>
|
|
||||||
Li Tan <tanliboy@gmail.com>
|
|
||||||
Linwei Wang <wanix1988@gmail.com>
|
|
||||||
LoganDark <github@logandark.mozmail.com>
|
|
||||||
LostRuins <39025047+LostRuins@users.noreply.github.com>
|
|
||||||
Luciano <lucianostrika44@gmail.com>
|
|
||||||
Luo Tian <lt@basecity.com>
|
|
||||||
Lyle Dean <dean@lyle.dev>
|
|
||||||
M. Yusuf Sarıgöz <yusufsarigoz@gmail.com>
|
|
||||||
Maarten ter Huurne <maarten@treewalker.org>
|
|
||||||
Mack Straight <eiz@users.noreply.github.com>
|
|
||||||
Maël Kerbiriou <m431.kerbiriou@gmail.com>
|
|
||||||
MaggotHATE <clay1326@gmail.com>
|
|
||||||
Manuel <44313466+makuche@users.noreply.github.com>
|
|
||||||
Marc Köhlbrugge <subscriptions@marckohlbrugge.com>
|
|
||||||
Marco Matthies <71844+marcom@users.noreply.github.com>
|
|
||||||
Marcus Dunn <51931484+MarcusDunn@users.noreply.github.com>
|
|
||||||
Marian Cepok <marian.cepok@gmail.com>
|
|
||||||
Mark Fairbairn <thebaron88@gmail.com>
|
|
||||||
Marko Tasic <mtasic85@gmail.com>
|
|
||||||
Markus Tavenrath <mtavenrath@users.noreply.github.com>
|
|
||||||
Martin Delille <martin@delille.org>
|
|
||||||
Martin Krasser <krasserm@googlemail.com>
|
|
||||||
Martin Schwaighofer <mschwaig@users.noreply.github.com>
|
|
||||||
Marvin Gießing <marvin.giessing@gmail.com>
|
|
||||||
Masaya, Kato <62578291+msy-kato@users.noreply.github.com>
|
|
||||||
MasterYi1024 <39848311+MasterYi1024@users.noreply.github.com>
|
|
||||||
Mateusz Charytoniuk <mateusz.charytoniuk@protonmail.com>
|
|
||||||
Matheus C. França <matheus-catarino@hotmail.com>
|
|
||||||
Matheus Gabriel Alves Silva <matheusgasource@gmail.com>
|
|
||||||
Mathieu Nayrolles <MathieuNls@users.noreply.github.com>
|
|
||||||
Mathijs de Bruin <mathijs@mathijsfietst.nl>
|
|
||||||
Matt Clayton <156335168+mattjcly@users.noreply.github.com>
|
|
||||||
Matt Pulver <matt.pulver@heavy.ai>
|
|
||||||
Matteo Boschini <12133566+mbosc@users.noreply.github.com>
|
|
||||||
Mattheus Chediak <shammcity00@gmail.com>
|
|
||||||
Matthew Tejo <matthew.tejo@gmail.com>
|
|
||||||
Matvey Soloviev <blackhole89@gmail.com>
|
|
||||||
Max Krasnyansky <max.krasnyansky@gmail.com>
|
|
||||||
Max Krasnyansky <quic_maxk@quicinc.com>
|
|
||||||
Maxime <672982+maximegmd@users.noreply.github.com>
|
|
||||||
Maximilian Winter <maximilian.winter.91@gmail.com>
|
|
||||||
Meng Zhang <meng@tabbyml.com>
|
|
||||||
Meng, Hengyu <hengyu.meng@intel.com>
|
|
||||||
Merrick Christensen <merrick.christensen@gmail.com>
|
|
||||||
Michael Coppola <m18coppola@gmail.com>
|
|
||||||
Michael Hueschen <m@mhueschen.dev>
|
|
||||||
Michael Kesper <mkesper@schokokeks.org>
|
|
||||||
Michael Klimenko <mklimenko29@gmail.com>
|
|
||||||
Michael Podvitskiy <podvitskiymichael@gmail.com>
|
|
||||||
Michael Potter <NanoTekGuy@Gmail.com>
|
|
||||||
Michael de Gans <michael.john.degans@gmail.com>
|
|
||||||
Michaël de Vries <vriesdemichael@gmail.com>
|
|
||||||
Mihai <mihai.chirculescu@yahoo.com>
|
|
||||||
Mike <ytianhui2004@gmail.com>
|
|
||||||
Mikko Juola <mikjuo@gmail.com>
|
|
||||||
Minsoo Cheong <54794500+mscheong01@users.noreply.github.com>
|
|
||||||
Mirko185 <mirkosig@gmail.com>
|
|
||||||
Mirror Azure <54669636+MirrorAzure@users.noreply.github.com>
|
|
||||||
Miwa / Ensan <63481257+ensan-hcl@users.noreply.github.com>
|
|
||||||
Mohammadreza Hendiani <hendiani.mohammadreza@gmail.com>
|
|
||||||
Mohammadreza Hendiani <mohammad.r.hendiani@gmail.com>
|
|
||||||
Murilo Santana <mvrilo@gmail.com>
|
|
||||||
Musab Gultekin <musabgultekin@users.noreply.github.com>
|
|
||||||
Nam D. Tran <42194884+namtranase@users.noreply.github.com>
|
|
||||||
Nathan Epstein <nate2@umbc.edu>
|
|
||||||
NawafAlansari <72708095+NawafAlansari@users.noreply.github.com>
|
|
||||||
Nebula <infinitewormhole@gmail.com>
|
|
||||||
Neo Zhang <14088817+arthw@users.noreply.github.com>
|
|
||||||
Neo Zhang <zhang.jianyu@outlook.com>
|
|
||||||
Neo Zhang Jianyu <jianyu.zhang@intel.com>
|
|
||||||
Neuman Vong <neuman.vong@gmail.com>
|
|
||||||
Nexesenex <124105151+Nexesenex@users.noreply.github.com>
|
|
||||||
Niall Coates <1349685+Niall-@users.noreply.github.com>
|
|
||||||
Nicolai Weitkemper <kontakt@nicolaiweitkemper.de>
|
|
||||||
Nicolás Pérez <nicolas_perez@brown.edu>
|
|
||||||
Nigel Bosch <pnigelb@gmail.com>
|
|
||||||
Niklas Korz <niklas@niklaskorz.de>
|
|
||||||
Nikolas <127742645+nneubacher@users.noreply.github.com>
|
|
||||||
Nindaleth <Nindaleth@users.noreply.github.com>
|
|
||||||
Oleksandr Nikitin <oleksandr@tvori.info>
|
|
||||||
Oleksii Maryshchenko <oleksii.maryshchenko@gmail.com>
|
|
||||||
Olivier Chafik <ochafik@users.noreply.github.com>
|
|
||||||
Ondřej Čertík <ondrej@certik.us>
|
|
||||||
Ouadie EL FAROUKI <ouadie.elfarouki@codeplay.com>
|
|
||||||
Patrice Ferlet <metal3d@gmail.com>
|
|
||||||
Paul Tsochantaris <ptsochantaris@icloud.com>
|
|
||||||
Pavol Rusnak <pavol@rusnak.io>
|
|
||||||
Pedro Cuenca <pedro@huggingface.co>
|
|
||||||
Peter Sugihara <peter@campsh.com>
|
|
||||||
Phil H <5756783+phiharri@users.noreply.github.com>
|
|
||||||
Philip Taron <philip.taron@gmail.com>
|
|
||||||
Phillip Kravtsov <phillip@kravtsov.net>
|
|
||||||
Pierre Alexandre SCHEMBRI <pa.schembri@gmail.com>
|
|
||||||
Pierrick Hymbert <pierrick.hymbert@gmail.com>
|
|
||||||
Przemysław Pawełczyk <przemoc@gmail.com>
|
|
||||||
Qin Yue Chen <71813199+chenqiny@users.noreply.github.com>
|
|
||||||
Qingyou Meng <meng.qingyou@gmail.com>
|
|
||||||
Qu Zongfu <43257352+yancaoweidaode@users.noreply.github.com>
|
|
||||||
RJ Adriaansen <adriaansen@eshcc.eur.nl>
|
|
||||||
Radoslav Gerganov <rgerganov@gmail.com>
|
|
||||||
Radosław Gryta <radek.gryta@gmail.com>
|
|
||||||
Rahul Vivek Nair <68507071+RahulVivekNair@users.noreply.github.com>
|
|
||||||
Raj Hammeer Singh Hada <hammeerraj@gmail.com>
|
|
||||||
Ralph Soika <ralph.soika@imixs.com>
|
|
||||||
Rand Xie <randxiexyy29@gmail.com>
|
|
||||||
Randall Fitzgerald <randall@dasaku.net>
|
|
||||||
Reinforce-II <fate@eastal.com>
|
|
||||||
Ren Xuancheng <jklj077@users.noreply.github.com>
|
|
||||||
Rene Leonhardt <65483435+reneleonhardt@users.noreply.github.com>
|
|
||||||
RhinoDevel <RhinoDevel@users.noreply.github.com>
|
|
||||||
Riceball LEE <snowyu.lee@gmail.com>
|
|
||||||
Richard Kiss <him@richardkiss.com>
|
|
||||||
Richard Roberson <richardr1126@gmail.com>
|
|
||||||
Rick G <26732651+TheFlipbook@users.noreply.github.com>
|
|
||||||
Rickard Edén <rickardeden@gmail.com>
|
|
||||||
Rickard Hallerbäck <rickard.hallerback@gmail.com>
|
|
||||||
Rickey Bowers Jr <bitRAKE@gmail.com>
|
|
||||||
Riley Stewart <ristew@users.noreply.github.com>
|
|
||||||
Rinne <AsakusaRinne@gmail.com>
|
|
||||||
Rinne <liu_yaohui1998@126.com>
|
|
||||||
Robert Brisita <986796+rbrisita@users.noreply.github.com>
|
|
||||||
Robert Sung-wook Shin <edp1096@users.noreply.github.com>
|
|
||||||
Robey Holderith <robey@flaminglunchbox.net>
|
|
||||||
Robyn <robyngraf@users.noreply.github.com>
|
|
||||||
Roger Meier <r.meier@siemens.com>
|
|
||||||
Roland <14355895+rbur0425@users.noreply.github.com>
|
|
||||||
Romain D <90720+Artefact2@users.noreply.github.com>
|
|
||||||
Romain Neutron <romain@neutron.io>
|
|
||||||
Roman Parykin <donderom@gmail.com>
|
|
||||||
Ron Evans <ron@hybridgroup.com>
|
|
||||||
Ron Jailall <rojailal@gmail.com>
|
|
||||||
Ronny Brendel <ronnybrendel@gmail.com>
|
|
||||||
Ronsor <ronsor@ronsor.pw>
|
|
||||||
Rowan Hart <rowanbhart@gmail.com>
|
|
||||||
Rune <43761327+Rune-AI@users.noreply.github.com>
|
|
||||||
Ryan Landay <rlanday@gmail.com>
|
|
||||||
Ryder Wishart <ryderwishart@gmail.com>
|
|
||||||
Ryuei <louixs@users.noreply.github.com>
|
|
||||||
Rőczey Barnabás <31726601+An0nie@users.noreply.github.com>
|
|
||||||
SakuraUmi <yukinon244@gmail.com>
|
|
||||||
Salvador E. Tropea <stropea@inti.gob.ar>
|
|
||||||
Sam Spilsbury <smspillaz@gmail.com>
|
|
||||||
Sami Farin <3876865+Safari77@users.noreply.github.com>
|
|
||||||
Samuel Maynard <samwmaynard@gmail.com>
|
|
||||||
Sang-Kil Park <sang.park@42dot.ai>
|
|
||||||
Seb C <47074056+Sebby37@users.noreply.github.com>
|
|
||||||
Sebastián A <sebastian.aedo29@gmail.com>
|
|
||||||
SebastianApel <13675545+SebastianApel@users.noreply.github.com>
|
|
||||||
Senemu <10880819+Senemu@users.noreply.github.com>
|
|
||||||
Sergey Alirzaev <zl29ah@gmail.com>
|
|
||||||
Sergio López <slp@sinrega.org>
|
|
||||||
Sertaç Özercan <852750+sozercan@users.noreply.github.com>
|
|
||||||
SeungWon Jeong <65549245+redlion0929@users.noreply.github.com>
|
|
||||||
ShadovvBeast <ShadovvBeast@gmail.com>
|
|
||||||
Shakhar Dasgupta <shakhardasgupta@gmail.com>
|
|
||||||
Shangning Xu <32517059+xushangning@users.noreply.github.com>
|
|
||||||
Shijie <821898965@qq.com>
|
|
||||||
Shintarou Okada <kokuzen@gmail.com>
|
|
||||||
Shouzheng Liu <61452103+lshzh-ww@users.noreply.github.com>
|
|
||||||
Shouzheng Liu <lshzh.hi@gmail.com>
|
|
||||||
Shuichi Tsutsumi <shuichi0526@gmail.com>
|
|
||||||
Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com>
|
|
||||||
Simon Willison <swillison@gmail.com>
|
|
||||||
Siwen Yu <yusiwen@gmail.com>
|
|
||||||
Sky Yan <skyan83@gmail.com>
|
|
||||||
Slaren <2141330+slaren@users.noreply.github.com>
|
|
||||||
Slava Primenko <primenko.s@gmail.com>
|
|
||||||
SoftwareRenderer <138734813+SoftwareRenderer@users.noreply.github.com>
|
|
||||||
Someone <sergei.kozlukov@aalto.fi>
|
|
||||||
Someone Serge <sergei.kozlukov@aalto.fi>
|
|
||||||
Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com>
|
|
||||||
Spencer Sutton <spencersutton@users.noreply.github.com>
|
|
||||||
Srihari-mcw <96763064+Srihari-mcw@users.noreply.github.com>
|
|
||||||
Srinivas Billa <nivibilla@gmail.com>
|
|
||||||
Stefan Sydow <stefan@sydow.email>
|
|
||||||
Steffen Röcker <sroecker@gmail.com>
|
|
||||||
Stephan Walter <stephan@walter.name>
|
|
||||||
Stephen Nichols <snichols@users.noreply.github.com>
|
|
||||||
Steve Grubb <ausearch.1@gmail.com>
|
|
||||||
Steven Prichard <spprichard20@gmail.com>
|
|
||||||
Steven Roussey <sroussey@gmail.com>
|
|
||||||
Steward Garcia <57494570+FSSRepo@users.noreply.github.com>
|
|
||||||
Suaj Carrot <72162667+SuajCarrot@users.noreply.github.com>
|
|
||||||
SuperUserNameMan <yoann@terminajones.com>
|
|
||||||
Tai Duc Nguyen <taiducnguyen.drexel@gmail.com>
|
|
||||||
Taikono-Himazin <kazu@po.harenet.ne.jp>
|
|
||||||
Tameem <113388789+AhmadTameem@users.noreply.github.com>
|
|
||||||
Tamotsu Takahashi <ttakah+github@gmail.com>
|
|
||||||
Thái Hoàng Tâm <75922889+RoyalHeart@users.noreply.github.com>
|
|
||||||
Thatcher Chamberlin <j.thatcher.c@gmail.com>
|
|
||||||
Theia Vogel <theia@vgel.me>
|
|
||||||
Thérence <13496987+Royalphax@users.noreply.github.com>
|
|
||||||
Thibault Terrasson <thibault.terrasson@gmail.com>
|
|
||||||
Thomas Klausner <wiz@gatalith.at>
|
|
||||||
Tim Miller <drasticactions@users.noreply.github.com>
|
|
||||||
Timmy Knight <r2d2fish@gmail.com>
|
|
||||||
Timothy Cronin <40186632+4imothy@users.noreply.github.com>
|
|
||||||
Ting Lou <ting.lou@gmail.com>
|
|
||||||
Ting Sun <suntcrick@gmail.com>
|
|
||||||
Tobias Lütke <tobi@shopify.com>
|
|
||||||
Tom C <tom.corelis@gmail.com>
|
|
||||||
Tom Jobbins <784313+TheBloke@users.noreply.github.com>
|
|
||||||
Tomas <tom.tomas.36478119@gmail.com>
|
|
||||||
Tomáš Pazdiora <tomas.pazdiora@gmail.com>
|
|
||||||
Tristan Druyen <tristan@vault81.mozmail.com>
|
|
||||||
Tristan Ross <rosscomputerguy@protonmail.com>
|
|
||||||
Tungsten842 <886724vf@anonaddy.me>
|
|
||||||
Tungsten842 <quantmint@protonmail.com>
|
|
||||||
Tushar <ditsuke@protonmail.com>
|
|
||||||
UEXTM.com <84163508+uextm@users.noreply.github.com>
|
|
||||||
Ulrich Drepper <drepper@gmail.com>
|
|
||||||
Uzo Nweke <uzoechi@gmail.com>
|
|
||||||
Vaibhav Srivastav <vaibhavs10@gmail.com>
|
|
||||||
Val Kharitonov <mail@kharvd.com>
|
|
||||||
Valentin Konovalov <valle.ketsujin@gmail.com>
|
|
||||||
Valentyn Bezshapkin <61702053+valentynbez@users.noreply.github.com>
|
|
||||||
Victor Nogueira <felladrin@gmail.com>
|
|
||||||
Victor Z. Peng <ziliangdotme@gmail.com>
|
|
||||||
Vlad <spitfireage@gmail.com>
|
|
||||||
Vladimir <bogdad@gmail.com>
|
|
||||||
Vladimir Malyutin <first-leon@yandex.ru>
|
|
||||||
Vladimir Zorin <vladimir@deviant.guru>
|
|
||||||
Volodymyr Vitvitskyi <72226+signalpillar@users.noreply.github.com>
|
|
||||||
WangHaoranRobin <56047610+WangHaoranRobin@users.noreply.github.com>
|
|
||||||
Weird Constructor <weirdconstructor@gmail.com>
|
|
||||||
Welby Seely <welbyseely@gmail.com>
|
|
||||||
Wentai Zhang <rchardx@gmail.com>
|
|
||||||
WillCorticesAI <150854901+WillCorticesAI@users.noreply.github.com>
|
|
||||||
William Tambellini <william.tambellini@gmail.com>
|
|
||||||
Willy Tarreau <w@1wt.eu>
|
|
||||||
Wouter <9594229+DifferentialityDevelopment@users.noreply.github.com>
|
|
||||||
Wu Jian Ping <wujjpp@hotmail.com>
|
|
||||||
Wu Jian Ping <wujp@greatld.com>
|
|
||||||
Xiake Sun <xiake.sun@intel.com>
|
|
||||||
Xiang (Kevin) Li <kevinli020508@gmail.com>
|
|
||||||
Xiao-Yong Jin <jinxiaoyong@gmail.com>
|
|
||||||
XiaotaoChen <chenxiaotao1234@gmail.com>
|
|
||||||
Xiaoyi Chen <cxychina@gmail.com>
|
|
||||||
Xingchen Song(宋星辰) <xingchensong1996@163.com>
|
|
||||||
Xuan Son Nguyen <thichthat@gmail.com>
|
|
||||||
Yann Follet <131855179+YannFollet@users.noreply.github.com>
|
|
||||||
Yaroslav <yaroslav.yashin@me.com>
|
|
||||||
Yazan Agha-Schrader <mountaiin@icloud.com>
|
|
||||||
Yiming Cui <conandiy@vip.qq.com>
|
|
||||||
Yishuo Wang <MeouSker77@outlook.com>
|
|
||||||
Yueh-Po Peng <94939112+y10ab1@users.noreply.github.com>
|
|
||||||
Yui <dev@sleepyyui.com>
|
|
||||||
Yusuf Kağan Hanoğlu <hanoglu@yahoo.com>
|
|
||||||
Yuval Peled <31162840+Yuval-Peled@users.noreply.github.com>
|
|
||||||
ZHAOKAI WANG <sanxianwei@163.com>
|
|
||||||
Zane Shannon <z@zcs.me>
|
|
||||||
Zay <95888118+isaiahbjork@users.noreply.github.com>
|
|
||||||
Zenix <zenixls2@gmail.com>
|
|
||||||
Zhang Peiyuan <a1286225768@gmail.com>
|
|
||||||
Zheng.Deng <32841220+dengzheng-cloud@users.noreply.github.com>
|
|
||||||
ZhouYuChen <zhouyuchen@naver.com>
|
|
||||||
Ziad Ben Hadj-Alouane <zied.benhadjalouane@gmail.com>
|
|
||||||
Ziang Wu <97337387+ZiangWu-77@users.noreply.github.com>
|
|
||||||
Zsapi <martin1.zsapka@gmail.com>
|
|
||||||
a-n-n-a-l-e-e <150648636+a-n-n-a-l-e-e@users.noreply.github.com>
|
|
||||||
adel boussaken <netdur@gmail.com>
|
|
||||||
afrideva <95653597+afrideva@users.noreply.github.com>
|
|
||||||
agray3 <agray3@users.noreply.github.com>
|
|
||||||
akawrykow <142945436+akawrykow@users.noreply.github.com>
|
|
||||||
alexpinel <93524949+alexpinel@users.noreply.github.com>
|
|
||||||
alonfaraj <alonfaraj@gmail.com>
|
|
||||||
alwqx <kenan3015@gmail.com>
|
|
||||||
amd-lalithnc <lalithnc@amd.com>
|
|
||||||
andrijdavid <david@geek.mg>
|
|
||||||
anon998 <131767832+anon998@users.noreply.github.com>
|
|
||||||
anzz1 <anzz1@live.com>
|
|
||||||
apaz <aarpazdera@gmail.com>
|
|
||||||
apcameron <37645737+apcameron@users.noreply.github.com>
|
|
||||||
arch-btw <57669023+arch-btw@users.noreply.github.com>
|
|
||||||
arcrank <arcrank@gmail.com>
|
|
||||||
arlo-phoenix <140345165+arlo-phoenix@users.noreply.github.com>
|
|
||||||
at8u <129688334+at8u@users.noreply.github.com>
|
|
||||||
automaticcat <daogiatuank54@gmail.com>
|
|
||||||
bandoti <141645996+bandoti@users.noreply.github.com>
|
|
||||||
beiller <beiller@gmail.com>
|
|
||||||
bhubbb <79117352+bhubbb@users.noreply.github.com>
|
|
||||||
bmwl <brian.marshall@tolko.com>
|
|
||||||
bobqianic <129547291+bobqianic@users.noreply.github.com>
|
|
||||||
bryanSwk <93190252+bryanSwk@users.noreply.github.com>
|
|
||||||
bsilvereagle <bsilvereagle@users.noreply.github.com>
|
|
||||||
bssrdf <merlintiger@hotmail.com>
|
|
||||||
byte-6174 <88070277+byte-6174@users.noreply.github.com>
|
|
||||||
cebtenzzre <cebtenzzre@gmail.com>
|
|
||||||
chaihahaha <chai836275709@gmail.com>
|
|
||||||
chiranko <96988916+chiranko@users.noreply.github.com>
|
|
||||||
clibdev <52199778+clibdev@users.noreply.github.com>
|
|
||||||
clyang <clyang@clyang.net>
|
|
||||||
cocktailpeanut <121128867+cocktailpeanut@users.noreply.github.com>
|
|
||||||
coezbek <c.oezbek@gmail.com>
|
|
||||||
comex <comexk@gmail.com>
|
|
||||||
compilade <113953597+compilade@users.noreply.github.com>
|
|
||||||
compilade <git@compilade.net>
|
|
||||||
cpumaxx <163466046+cpumaxx@users.noreply.github.com>
|
|
||||||
crasm <crasm@git.vczf.net>
|
|
||||||
crasm <crasm@git.vczf.us>
|
|
||||||
daboe01 <daboe01@googlemail.com>
|
|
||||||
david raistrick <keen99@users.noreply.github.com>
|
|
||||||
ddh0 <dylanhalladay02@icloud.com>
|
|
||||||
ddpasa <112642920+ddpasa@users.noreply.github.com>
|
|
||||||
deepdiffuser <112834445+deepdiffuser@users.noreply.github.com>
|
|
||||||
divinity76 <divinity76@gmail.com>
|
|
||||||
dm4 <sunrisedm4@gmail.com>
|
|
||||||
dotpy314 <33351922+dotpy314@users.noreply.github.com>
|
|
||||||
drbh <david.richard.holtz@gmail.com>
|
|
||||||
ds5t5 <145942675+ds5t5@users.noreply.github.com>
|
|
||||||
dylan <canardleteer@users.noreply.github.com>
|
|
||||||
eastriver <lee@eastriver.dev>
|
|
||||||
ebraminio <ebraminio@gmail.com>
|
|
||||||
eiery <19350831+eiery@users.noreply.github.com>
|
|
||||||
eric8607242 <e0928021388@gmail.com>
|
|
||||||
fairydreaming <166155368+fairydreaming@users.noreply.github.com>
|
|
||||||
fraxy-v <65565042+fraxy-v@users.noreply.github.com>
|
|
||||||
github-actions[bot] <github-actions[bot]@users.noreply.github.com>
|
|
||||||
gliptic <gliptic@users.noreply.github.com>
|
|
||||||
goerch <jhr.walter@t-online.de>
|
|
||||||
grahameth <96447521+grahameth@users.noreply.github.com>
|
|
||||||
gwjr <502526+gwjr@users.noreply.github.com>
|
|
||||||
h-h-h-h <13482553+h-h-h-h@users.noreply.github.com>
|
|
||||||
hankcs <cnhankmc@gmail.com>
|
|
||||||
hoangmit <hoangmit@users.noreply.github.com>
|
|
||||||
hongbo.mo <352280764@qq.com>
|
|
||||||
hopkins385 <98618192+hopkins385@users.noreply.github.com>
|
|
||||||
howlger <eclipse@voormann.de>
|
|
||||||
howlger <github@voormann.de>
|
|
||||||
hutli <6594598+hutli@users.noreply.github.com>
|
|
||||||
hutli <hutli@hutli.hu>
|
|
||||||
hutli <jensstaermose@hotmail.com>
|
|
||||||
hxer7963 <hxer7963@gmail.com>
|
|
||||||
hydai <z54981220@gmail.com>
|
|
||||||
iSma <ismail.senhaji@gmail.com>
|
|
||||||
iacore <74560659+iacore@users.noreply.github.com>
|
|
||||||
igarnier <igarnier@protonmail.com>
|
|
||||||
intelmatt <61025942+intelmatt@users.noreply.github.com>
|
|
||||||
iohub <rickyang.pro@gmail.com>
|
|
||||||
jacobi petrucciani <8117202+jpetrucciani@users.noreply.github.com>
|
|
||||||
jaime-m-p <167997752+jaime-m-p@users.noreply.github.com>
|
|
||||||
jameswu2014 <545426914@qq.com>
|
|
||||||
jiez <373447296@qq.com>
|
|
||||||
jneem <joeneeman@gmail.com>
|
|
||||||
joecryptotoo <80373433+joecryptotoo@users.noreply.github.com>
|
|
||||||
johnson442 <56517414+johnson442@users.noreply.github.com>
|
|
||||||
jojorne <jojorne@users.noreply.github.com>
|
|
||||||
jon-chuang <9093549+jon-chuang@users.noreply.github.com>
|
|
||||||
jp-x-g <jpxg-dev@protonmail.com>
|
|
||||||
jukofyork <69222624+jukofyork@users.noreply.github.com>
|
|
||||||
junchao-loongson <68935141+junchao-loongson@users.noreply.github.com>
|
|
||||||
jwj7140 <32943891+jwj7140@users.noreply.github.com>
|
|
||||||
k.h.lai <adrian.k.h.lai@outlook.com>
|
|
||||||
kaizau <kaizau@users.noreply.github.com>
|
|
||||||
kalomaze <66376113+kalomaze@users.noreply.github.com>
|
|
||||||
kang <tpdns9032100@gmail.com>
|
|
||||||
katsu560 <118887472+katsu560@users.noreply.github.com>
|
|
||||||
kchro3 <62481661+kchro3@users.noreply.github.com>
|
|
||||||
khimaros <me@khimaros.com>
|
|
||||||
kiltyj <kiltyj@gmail.com>
|
|
||||||
klosax <131523366+klosax@users.noreply.github.com>
|
|
||||||
kunal-vaishnavi <115581922+kunal-vaishnavi@users.noreply.github.com>
|
|
||||||
kunnis <kunnis@users.noreply.github.com>
|
|
||||||
kuronekosaiko <EvanChanJ@163.com>
|
|
||||||
kuvaus <22169537+kuvaus@users.noreply.github.com>
|
|
||||||
kwin1412 <42286931+kwin1412@users.noreply.github.com>
|
|
||||||
l3utterfly <gc.pthzfoldr@gmail.com>
|
|
||||||
ldwang <ftgreat@163.com>
|
|
||||||
le.chang <cljs118@126.com>
|
|
||||||
leejet <leejet714@gmail.com>
|
|
||||||
limitedAtonement <limitedAtonement@users.noreply.github.com>
|
|
||||||
liuwei-git <14815172+liuwei-git@users.noreply.github.com>
|
|
||||||
lon <114724657+longregen@users.noreply.github.com>
|
|
||||||
loonerin <132926317+loonerin@users.noreply.github.com>
|
|
||||||
luoyu-intel <yu.luo@intel.com>
|
|
||||||
m3ndax <adrian.goessl@outlook.com>
|
|
||||||
maddes8cht <55592906+maddes8cht@users.noreply.github.com>
|
|
||||||
makomk <makosoft@googlemail.com>
|
|
||||||
manikbhandari <mbbhandarimanik2@gmail.com>
|
|
||||||
maor-ps <154728172+maor-ps@users.noreply.github.com>
|
|
||||||
mdrokz <mohammadmunshi@gmail.com>
|
|
||||||
mgroeber9110 <45620825+mgroeber9110@users.noreply.github.com>
|
|
||||||
minarchist <minarchist@users.noreply.github.com>
|
|
||||||
mj-shifu <77107165+mj-shifu@users.noreply.github.com>
|
|
||||||
mmyjona <jonathan.gonse@gmail.com>
|
|
||||||
momonga <115213907+mmnga@users.noreply.github.com>
|
|
||||||
moritzbrantner <31051084+moritzbrantner@users.noreply.github.com>
|
|
||||||
mzcu <milos.cubrilo@gmail.com>
|
|
||||||
nanahi <130121847+na-na-hi@users.noreply.github.com>
|
|
||||||
ngc92 <7938269+ngc92@users.noreply.github.com>
|
|
||||||
nhamanasu <45545786+nhamanasu@users.noreply.github.com>
|
|
||||||
niansa/tuxifan <anton-sa@web.de>
|
|
||||||
niansa/tuxifan <tuxifan@posteo.de>
|
|
||||||
nickp27 <nb.porter@gmail.com>
|
|
||||||
ningshanwutuobang <ningshanwutuobang@gmail.com>
|
|
||||||
nold <Nold360@users.noreply.github.com>
|
|
||||||
nopperl <54780682+nopperl@users.noreply.github.com>
|
|
||||||
nusu-github <29514220+nusu-github@users.noreply.github.com>
|
|
||||||
olexiyb <olexiyb@gmail.com>
|
|
||||||
omahs <73983677+omahs@users.noreply.github.com>
|
|
||||||
oobabooga <112222186+oobabooga@users.noreply.github.com>
|
|
||||||
opparco <parco.opaai@gmail.com>
|
|
||||||
ostix360 <55257054+ostix360@users.noreply.github.com>
|
|
||||||
pengxin99 <pengxin.yuan@intel.com>
|
|
||||||
perserk <perserk@gmail.com>
|
|
||||||
pmysl <piotr.myslinski@outlook.com>
|
|
||||||
postmasters <namnguyen@google.com>
|
|
||||||
pudepiedj <pudepiedj@gmail.com>
|
|
||||||
qingfengfenga <41416092+qingfengfenga@users.noreply.github.com>
|
|
||||||
qouoq <qouoq@fastmail.com>
|
|
||||||
qunash <anzoria@gmail.com>
|
|
||||||
rabidcopy <rabidcopy@yahoo.com>
|
|
||||||
rankaiyx <rankaiyx@rankaiyx.com>
|
|
||||||
rhjdvsgsgks <26178113+rhjdvsgsgks@users.noreply.github.com>
|
|
||||||
rhuddleston <ryan.huddleston@percona.com>
|
|
||||||
rimoliga <53384203+rimoliga@users.noreply.github.com>
|
|
||||||
runfuture <runfuture@users.noreply.github.com>
|
|
||||||
sandyiscool <sandyiscool@gmail.com>
|
|
||||||
sasha0552 <admin@sasha0552.org>
|
|
||||||
semidark <me@semidark.net>
|
|
||||||
sharpHL <132747147+sharpHL@users.noreply.github.com>
|
|
||||||
shibe2 <shibe@tuta.io>
|
|
||||||
singularity <12184989+singularity-s0@users.noreply.github.com>
|
|
||||||
sjinzh <sjinzh@gmail.com>
|
|
||||||
sjxx <63994076+ylsdamxssjxxdd@users.noreply.github.com>
|
|
||||||
slaren <2141330+slaren@users.noreply.github.com>
|
|
||||||
slaren <slarengh@gmail.com>
|
|
||||||
snadampal <87143774+snadampal@users.noreply.github.com>
|
|
||||||
staviq <staviq@gmail.com>
|
|
||||||
stduhpf <stephduh@live.fr>
|
|
||||||
strawberrymelonpanda <152940198+strawberrymelonpanda@users.noreply.github.com>
|
|
||||||
swittk <switt1995@gmail.com>
|
|
||||||
takov751 <40316768+takov751@users.noreply.github.com>
|
|
||||||
tarcey <cey.tarik@gmail.com>
|
|
||||||
texmex76 <40733439+texmex76@users.noreply.github.com>
|
|
||||||
thement <40525767+thement@users.noreply.github.com>
|
|
||||||
tjohnman <tjohnman@users.noreply.github.com>
|
|
||||||
tslmy <tslmy@users.noreply.github.com>
|
|
||||||
ubik2 <ubik2@users.noreply.github.com>
|
|
||||||
uint256_t <konndennsa@gmail.com>
|
|
||||||
uint256_t <maekawatoshiki1017@gmail.com>
|
|
||||||
unbounded <haakon@likedan.net>
|
|
||||||
valiray <133289098+valiray@users.noreply.github.com>
|
|
||||||
vik <vikhyatk@gmail.com>
|
|
||||||
viric <viric@viric.name>
|
|
||||||
vodkaslime <646329483@qq.com>
|
|
||||||
vvhg1 <94630311+vvhg1@users.noreply.github.com>
|
|
||||||
vxiiduu <73044267+vxiiduu@users.noreply.github.com>
|
|
||||||
wbpxre150 <100937007+wbpxre150@users.noreply.github.com>
|
|
||||||
whoreson <139810751+whoreson@users.noreply.github.com>
|
|
||||||
woachk <24752637+woachk@users.noreply.github.com>
|
|
||||||
wonjun Jang <strutive07@gmail.com>
|
|
||||||
woodx <124784234+woodx9@users.noreply.github.com>
|
|
||||||
wzy <32936898+Freed-Wu@users.noreply.github.com>
|
|
||||||
xaedes <xaedes@gmail.com>
|
|
||||||
xaedes <xaedes@googlemail.com>
|
|
||||||
xloem <0xloem@gmail.com>
|
|
||||||
yangli2 <yangli2@gmail.com>
|
|
||||||
yuiseki <yuiseki@gmail.com>
|
|
||||||
zakkor <edward.partenie@gmail.com>
|
|
||||||
zhangkaihuo <zhangkaihuo@gmail.com>
|
|
||||||
zhouwg <6889919+zhouwg@users.noreply.github.com>
|
|
||||||
zhouwg <zhouwg2000@gmail.com>
|
|
||||||
zrm <trustiosity.zrm@gmail.com>
|
|
||||||
Ștefan-Gabriel Muscalu <legraphista@users.noreply.github.com>
|
|
||||||
源文雨 <41315874+fumiama@users.noreply.github.com>
|
|
||||||
Нияз Гарифзянов <112617865+garrnizon@users.noreply.github.com>
|
|
|
@ -2593,51 +2593,35 @@ std::vector<llama_token> llama_tokenize(
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
|
std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
|
||||||
std::vector<char> result(8, 0);
|
|
||||||
const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special);
|
|
||||||
if (n_tokens < 0) {
|
|
||||||
result.resize(-n_tokens);
|
|
||||||
int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special);
|
|
||||||
GGML_ASSERT(check == -n_tokens);
|
|
||||||
} else {
|
|
||||||
result.resize(n_tokens);
|
|
||||||
}
|
|
||||||
|
|
||||||
return std::string(result.data(), result.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string llama_detokenize_spm(llama_context * ctx, const std::vector<llama_token> & tokens) {
|
|
||||||
const llama_token bos_id = llama_token_bos(llama_get_model(ctx));
|
|
||||||
|
|
||||||
std::string piece;
|
std::string piece;
|
||||||
std::string result;
|
piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n'
|
||||||
|
const int n_chars = llama_token_to_piece(llama_get_model(ctx), token, &piece[0], piece.size(), 0, special);
|
||||||
for (size_t i = 0; i < tokens.size(); ++i) {
|
if (n_chars < 0) {
|
||||||
piece = llama_token_to_piece(ctx, tokens[i]);
|
piece.resize(-n_chars);
|
||||||
|
int check = llama_token_to_piece(llama_get_model(ctx), token, &piece[0], piece.size(), 0, special);
|
||||||
// remove the leading space of the first non-BOS token
|
GGML_ASSERT(check == -n_chars);
|
||||||
if (((tokens[0] == bos_id && i == 1) || (tokens[0] != bos_id && i == 0)) && piece[0] == ' ') {
|
}
|
||||||
piece = piece.substr(1);
|
else {
|
||||||
|
piece.resize(n_chars);
|
||||||
}
|
}
|
||||||
|
|
||||||
result += piece;
|
return piece;
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
std::string llama_detokenize(llama_context * ctx, const std::vector<llama_token> & tokens, bool special) {
|
||||||
|
std::string text;
|
||||||
|
text.resize(std::max(text.capacity(), tokens.size()));
|
||||||
|
int32_t n_chars = llama_detokenize(llama_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
|
||||||
|
if (n_chars < 0) {
|
||||||
|
text.resize(-n_chars);
|
||||||
|
n_chars = llama_detokenize(llama_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
|
||||||
|
GGML_ASSERT(n_chars <= (int32_t)text.size()); // whitespace trimming is performed after per-token detokenization
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string llama_detokenize_bpe(llama_context * ctx, const std::vector<llama_token> & tokens) {
|
text.resize(n_chars);
|
||||||
std::string piece;
|
|
||||||
std::string result;
|
|
||||||
|
|
||||||
for (size_t i = 0; i < tokens.size(); ++i) {
|
|
||||||
piece = llama_token_to_piece(ctx, tokens[i]);
|
|
||||||
|
|
||||||
result += piece;
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: the original tokenizer decodes bytes after collecting the pieces.
|
// NOTE: the original tokenizer decodes bytes after collecting the pieces.
|
||||||
return result;
|
return text;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool llama_should_add_bos_token(const llama_model * model) {
|
bool llama_should_add_bos_token(const llama_model * model) {
|
||||||
|
|
|
@ -367,21 +367,13 @@ std::string llama_token_to_piece(
|
||||||
llama_token token,
|
llama_token token,
|
||||||
bool special = true);
|
bool special = true);
|
||||||
|
|
||||||
// TODO: these should be moved in llama.h C-style API under single `llama_detokenize` function
|
|
||||||
// that takes into account the tokenizer type and decides how to handle the leading space
|
|
||||||
//
|
|
||||||
// detokenizes a vector of tokens into a string
|
// detokenizes a vector of tokens into a string
|
||||||
// should work similar to Python's `tokenizer.decode`
|
// should work similar to Python's `tokenizer.decode`
|
||||||
// removes the leading space from the first non-BOS token
|
// optionally renders special/control tokens
|
||||||
std::string llama_detokenize_spm(
|
std::string llama_detokenize(
|
||||||
llama_context * ctx,
|
llama_context * ctx,
|
||||||
const std::vector<llama_token> & tokens);
|
const std::vector<llama_token> & tokens,
|
||||||
|
bool special = true);
|
||||||
// detokenizes a vector of tokens into a string
|
|
||||||
// should work similar to Python's `tokenizer.decode`
|
|
||||||
std::string llama_detokenize_bpe(
|
|
||||||
llama_context * ctx,
|
|
||||||
const std::vector<llama_token> & tokens);
|
|
||||||
|
|
||||||
// Uses the value from the model metadata if possible, otherwise
|
// Uses the value from the model metadata if possible, otherwise
|
||||||
// defaults to true when model type is SPM, otherwise false.
|
// defaults to true when model type is SPM, otherwise false.
|
||||||
|
|
|
@ -1,119 +0,0 @@
|
||||||
## Add a new model architecture to `llama.cpp`
|
|
||||||
|
|
||||||
Adding a model requires few steps:
|
|
||||||
|
|
||||||
1. Convert the model to GGUF
|
|
||||||
2. Define the model architecture in `llama.cpp`
|
|
||||||
3. Build the GGML graph implementation
|
|
||||||
|
|
||||||
After following these steps, you can open PR.
|
|
||||||
|
|
||||||
Also, it is important to check that the examples and main ggml backends (CUDA, METAL, CPU) are working with the new architecture, especially:
|
|
||||||
- [main](../examples/main)
|
|
||||||
- [imatrix](../examples/imatrix)
|
|
||||||
- [quantize](../examples/quantize)
|
|
||||||
- [server](../examples/server)
|
|
||||||
|
|
||||||
### 1. Convert the model to GGUF
|
|
||||||
|
|
||||||
This step is done in python with a `convert` script using the [gguf](https://pypi.org/project/gguf/) library.
|
|
||||||
Depending on the model architecture, you can use either [convert_hf_to_gguf.py](../convert_hf_to_gguf.py) or [examples/convert_legacy_llama.py](../examples/convert_legacy_llama.py) (for `llama/llama2` models in `.pth` format).
|
|
||||||
|
|
||||||
The convert script reads the model configuration, tokenizer, tensor names+data and converts them to GGUF metadata and tensors.
|
|
||||||
|
|
||||||
The required steps to implement for an HF model are:
|
|
||||||
|
|
||||||
1. Define the model `Model.register` annotation in a new `Model` subclass, example:
|
|
||||||
|
|
||||||
```python
|
|
||||||
@Model.register("MyModelForCausalLM")
|
|
||||||
class MyModel(Model):
|
|
||||||
model_arch = gguf.MODEL_ARCH.GROK
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Define the layout of the GGUF tensors in [constants.py](../gguf-py/gguf/constants.py)
|
|
||||||
|
|
||||||
Add an enum entry in `MODEL_ARCH`, the model human friendly name in `MODEL_ARCH_NAMES` and the GGUF tensor names in `MODEL_TENSORS`.
|
|
||||||
|
|
||||||
Example for `falcon` model:
|
|
||||||
```python
|
|
||||||
MODEL_ARCH.FALCON: [
|
|
||||||
MODEL_TENSOR.TOKEN_EMBD,
|
|
||||||
MODEL_TENSOR.OUTPUT_NORM,
|
|
||||||
MODEL_TENSOR.OUTPUT,
|
|
||||||
MODEL_TENSOR.ATTN_NORM,
|
|
||||||
MODEL_TENSOR.ATTN_NORM_2,
|
|
||||||
MODEL_TENSOR.ATTN_QKV,
|
|
||||||
MODEL_TENSOR.ATTN_OUT,
|
|
||||||
MODEL_TENSOR.FFN_DOWN,
|
|
||||||
MODEL_TENSOR.FFN_UP,
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Map the original tensor names to the standardize equivalent in GGUF
|
|
||||||
|
|
||||||
As a general rule, before adding a new tensor name to GGUF, be sure the equivalent naming does not already exist.
|
|
||||||
|
|
||||||
Once you have found the GGUF tensor name equivalent, add it to the [tensor_mapping.py](../gguf-py/gguf/tensor_mapping.py) file.
|
|
||||||
|
|
||||||
If the tensor name is part of a repetitive layer/block, the key word `bid` substitutes it.
|
|
||||||
|
|
||||||
Example for the normalization tensor in attention layers:
|
|
||||||
|
|
||||||
```python
|
|
||||||
block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
|
|
||||||
# Attention norm
|
|
||||||
MODEL_TENSOR.ATTN_NORM: (
|
|
||||||
"gpt_neox.layers.{bid}.input_layernorm", # gptneox
|
|
||||||
"transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen
|
|
||||||
"transformer.blocks.{bid}.norm_1", # mpt
|
|
||||||
...
|
|
||||||
)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
`transformer.blocks.{bid}.norm_1` will be mapped to `blk.{bid}.attn_norm` in GGUF.
|
|
||||||
|
|
||||||
Depending on the model configuration, tokenizer, code and tensors layout, you will have to override:
|
|
||||||
- `Model#set_gguf_parameters`
|
|
||||||
- `Model#set_vocab`
|
|
||||||
- `Model#write_tensors`
|
|
||||||
|
|
||||||
NOTE: Tensor names must end with `.weight` suffix, that is the convention and several tools like `quantize` expect this to proceed the weights.
|
|
||||||
|
|
||||||
### 2. Define the model architecture in `llama.cpp`
|
|
||||||
|
|
||||||
The model params and tensors layout must be defined in `llama.cpp`:
|
|
||||||
1. Define a new `llm_arch`
|
|
||||||
2. Define the tensors layout in `LLM_TENSOR_NAMES`
|
|
||||||
3. Add any non standard metadata in `llm_load_hparams`
|
|
||||||
4. Create the tensors for inference in `llm_load_tensors`
|
|
||||||
5. If the model has a RoPE operation, add the rope type in `llama_rope_type`
|
|
||||||
|
|
||||||
NOTE: The dimensions in `ggml` are typically in the reverse order of the `pytorch` dimensions.
|
|
||||||
|
|
||||||
### 3. Build the GGML graph implementation
|
|
||||||
|
|
||||||
This is the funniest part, you have to provide the inference graph implementation of the new model architecture in `llama_build_graph`.
|
|
||||||
|
|
||||||
Have a look at existing implementation like `build_llama`, `build_dbrx` or `build_bert`.
|
|
||||||
|
|
||||||
When implementing a new graph, please note that the underlying `ggml` backends might not support them all, support for missing backend operations can be added in another PR.
|
|
||||||
|
|
||||||
Note: to debug the inference graph: you can use [llama-eval-callback](../examples/eval-callback).
|
|
||||||
|
|
||||||
## GGUF specification
|
|
||||||
|
|
||||||
https://github.com/ggerganov/ggml/blob/master/docs/gguf.md
|
|
||||||
|
|
||||||
## Resources
|
|
||||||
|
|
||||||
- YaRN RoPE scaling https://github.com/ggerganov/llama.cpp/pull/2268
|
|
||||||
- support Baichuan serial models https://github.com/ggerganov/llama.cpp/pull/3009
|
|
||||||
- support attention bias https://github.com/ggerganov/llama.cpp/pull/4283
|
|
||||||
- Mixtral support https://github.com/ggerganov/llama.cpp/pull/4406
|
|
||||||
- BERT embeddings https://github.com/ggerganov/llama.cpp/pull/5423
|
|
||||||
- Grok-1 support https://github.com/ggerganov/llama.cpp/pull/6204
|
|
||||||
- Command R Plus support https://github.com/ggerganov/llama.cpp/pull/6491
|
|
||||||
- support arch DBRX https://github.com/ggerganov/llama.cpp/pull/6515
|
|
||||||
- How to convert HuggingFace model to GGUF format https://github.com/ggerganov/llama.cpp/discussions/2948
|
|
|
@ -1,104 +0,0 @@
|
||||||
# Debugging Tests Tips
|
|
||||||
|
|
||||||
## How to run & execute or debug a specific test without anything else to keep the feedback loop short?
|
|
||||||
|
|
||||||
There is a script called debug-test.sh in the scripts folder whose parameter takes a REGEX and an optional test number.
|
|
||||||
|
|
||||||
For example, running the following command will output an interactive list from which you can select a test. It takes this form:
|
|
||||||
|
|
||||||
`debug-test.sh [OPTION]... <test_regex> <test_number>`
|
|
||||||
|
|
||||||
It will then build & run in the debugger for you.
|
|
||||||
|
|
||||||
To just execute a test and get back a PASS or FAIL message run:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./scripts/debug-test.sh test-tokenizer
|
|
||||||
```
|
|
||||||
|
|
||||||
To test in GDB use the `-g` flag to enable gdb test mode.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./scripts/debug-test.sh -g test-tokenizer
|
|
||||||
|
|
||||||
# Once in the debugger, i.e. at the chevrons prompt, setting a breakpoint could be as follows:
|
|
||||||
>>> b main
|
|
||||||
```
|
|
||||||
|
|
||||||
To speed up the testing loop, if you know your test number you can just run it similar to below:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./scripts/debug-test.sh test 23
|
|
||||||
```
|
|
||||||
|
|
||||||
For further reference use `debug-test.sh -h` to print help.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### How does the script work?
|
|
||||||
If you want to be able to use the concepts contained in the script separately, the important ones are briefly outlined below.
|
|
||||||
|
|
||||||
#### Step 1: Reset and Setup folder context
|
|
||||||
|
|
||||||
From base of this repository, let's create `build-ci-debug` as our build context.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
rm -rf build-ci-debug && mkdir build-ci-debug && cd build-ci-debug
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Step 2: Setup Build Environment and Compile Test Binaries
|
|
||||||
|
|
||||||
Setup and trigger a build under debug mode. You may adapt the arguments as needed, but in this case these are sane defaults.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cmake -DCMAKE_BUILD_TYPE=Debug -DLLAMA_CUDA=1 -DLLAMA_FATAL_WARNINGS=ON ..
|
|
||||||
make -j
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Step 3: Find all tests available that matches REGEX
|
|
||||||
|
|
||||||
The output of this command will give you the command & arguments needed to run GDB.
|
|
||||||
|
|
||||||
* `-R test-tokenizer` : looks for all the test files named `test-tokenizer*` (R=Regex)
|
|
||||||
* `-N` : "show-only" disables test execution & shows test commands that you can feed to GDB.
|
|
||||||
* `-V` : Verbose Mode
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ctest -R "test-tokenizer" -V -N
|
|
||||||
```
|
|
||||||
|
|
||||||
This may return output similar to below (focusing on key lines to pay attention to):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
...
|
|
||||||
1: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf"
|
|
||||||
1: Working Directory: .
|
|
||||||
Labels: main
|
|
||||||
Test #1: test-tokenizer-0-llama-spm
|
|
||||||
...
|
|
||||||
4: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-falcon.gguf"
|
|
||||||
4: Working Directory: .
|
|
||||||
Labels: main
|
|
||||||
Test #4: test-tokenizer-0-falcon
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Step 4: Identify Test Command for Debugging
|
|
||||||
|
|
||||||
So for test #1 above we can tell these two pieces of relevant information:
|
|
||||||
* Test Binary: `~/llama.cpp/build-ci-debug/bin/test-tokenizer-0`
|
|
||||||
* Test GGUF Model: `~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf`
|
|
||||||
|
|
||||||
#### Step 5: Run GDB on test command
|
|
||||||
|
|
||||||
Based on the ctest 'test command' report above we can then run a gdb session via this command below:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
gdb --args ${Test Binary} ${Test GGUF Model}
|
|
||||||
```
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
gdb --args ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf"
|
|
||||||
```
|
|
|
@ -229,7 +229,7 @@ private func tokenize(text: String, add_bos: Bool) -> [llama_token] {
|
||||||
|
|
||||||
private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String? {
|
private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String? {
|
||||||
var result = [CChar](repeating: 0, count: 8)
|
var result = [CChar](repeating: 0, count: 8)
|
||||||
let nTokens = llama_token_to_piece(model, token, &result, Int32(result.count), false)
|
let nTokens = llama_token_to_piece(model, token, &result, Int32(result.count), 0, false)
|
||||||
if nTokens < 0 {
|
if nTokens < 0 {
|
||||||
let actualTokensCount = -Int(nTokens)
|
let actualTokensCount = -Int(nTokens)
|
||||||
result = .init(repeating: 0, count: actualTokensCount)
|
result = .init(repeating: 0, count: actualTokensCount)
|
||||||
|
@ -238,6 +238,7 @@ private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String
|
||||||
token,
|
token,
|
||||||
&result,
|
&result,
|
||||||
Int32(result.count),
|
Int32(result.count),
|
||||||
|
0,
|
||||||
false
|
false
|
||||||
)
|
)
|
||||||
assert(check == actualTokensCount)
|
assert(check == actualTokensCount)
|
||||||
|
|
|
@ -322,7 +322,7 @@ actor LlamaContext {
|
||||||
defer {
|
defer {
|
||||||
result.deallocate()
|
result.deallocate()
|
||||||
}
|
}
|
||||||
let nTokens = llama_token_to_piece(model, token, result, 8, false)
|
let nTokens = llama_token_to_piece(model, token, result, 8, 0, false)
|
||||||
|
|
||||||
if nTokens < 0 {
|
if nTokens < 0 {
|
||||||
let newResult = UnsafeMutablePointer<Int8>.allocate(capacity: Int(-nTokens))
|
let newResult = UnsafeMutablePointer<Int8>.allocate(capacity: Int(-nTokens))
|
||||||
|
@ -330,7 +330,7 @@ actor LlamaContext {
|
||||||
defer {
|
defer {
|
||||||
newResult.deallocate()
|
newResult.deallocate()
|
||||||
}
|
}
|
||||||
let nNewTokens = llama_token_to_piece(model, token, newResult, -nTokens, false)
|
let nNewTokens = llama_token_to_piece(model, token, newResult, -nTokens, 0, false)
|
||||||
let bufferPointer = UnsafeBufferPointer(start: newResult, count: Int(nNewTokens))
|
let bufferPointer = UnsafeBufferPointer(start: newResult, count: Int(nNewTokens))
|
||||||
return Array(bufferPointer)
|
return Array(bufferPointer)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -4,7 +4,89 @@ You can also use the [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-
|
||||||
|
|
||||||
Note: It is synced from llama.cpp `main` every 6 hours.
|
Note: It is synced from llama.cpp `main` every 6 hours.
|
||||||
|
|
||||||
## Llama 2 7B
|
Example usage:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# obtain the official LLaMA model weights and place them in ./models
|
||||||
|
ls ./models
|
||||||
|
llama-2-7b tokenizer_checklist.chk tokenizer.model
|
||||||
|
# [Optional] for models using BPE tokenizers
|
||||||
|
ls ./models
|
||||||
|
<folder containing weights and tokenizer json> vocab.json
|
||||||
|
# [Optional] for PyTorch .bin models like Mistral-7B
|
||||||
|
ls ./models
|
||||||
|
<folder containing weights and tokenizer json>
|
||||||
|
|
||||||
|
# install Python dependencies
|
||||||
|
python3 -m pip install -r requirements.txt
|
||||||
|
|
||||||
|
# convert the model to ggml FP16 format
|
||||||
|
python3 convert_hf_to_gguf.py models/mymodel/
|
||||||
|
|
||||||
|
# quantize the model to 4-bits (using Q4_K_M method)
|
||||||
|
./llama-quantize ./models/mymodel/ggml-model-f16.gguf ./models/mymodel/ggml-model-Q4_K_M.gguf Q4_K_M
|
||||||
|
|
||||||
|
# update the gguf filetype to current version if older version is now unsupported
|
||||||
|
./llama-quantize ./models/mymodel/ggml-model-Q4_K_M.gguf ./models/mymodel/ggml-model-Q4_K_M-v2.gguf COPY
|
||||||
|
```
|
||||||
|
|
||||||
|
Run the quantized model:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# start inference on a gguf model
|
||||||
|
./llama-cli -m ./models/mymodel/ggml-model-Q4_K_M.gguf -n 128
|
||||||
|
```
|
||||||
|
|
||||||
|
When running the larger models, make sure you have enough disk space to store all the intermediate files.
|
||||||
|
|
||||||
|
## Memory/Disk Requirements
|
||||||
|
|
||||||
|
As the models are currently fully loaded into memory, you will need adequate disk space to save them and sufficient RAM to load them. At the moment, memory and disk requirements are the same.
|
||||||
|
|
||||||
|
| Model | Original size | Quantized size (Q4_0) |
|
||||||
|
|------:|--------------:|----------------------:|
|
||||||
|
| 7B | 13 GB | 3.9 GB |
|
||||||
|
| 13B | 24 GB | 7.8 GB |
|
||||||
|
| 30B | 60 GB | 19.5 GB |
|
||||||
|
| 65B | 120 GB | 38.5 GB |
|
||||||
|
|
||||||
|
## Quantization
|
||||||
|
|
||||||
|
Several quantization methods are supported. They differ in the resulting model disk size and inference speed.
|
||||||
|
|
||||||
|
*(outdated)*
|
||||||
|
|
||||||
|
| Model | Measure | F16 | Q4_0 | Q4_1 | Q5_0 | Q5_1 | Q8_0 |
|
||||||
|
|------:|--------------|-------:|-------:|-------:|-------:|-------:|-------:|
|
||||||
|
| 7B | perplexity | 5.9066 | 6.1565 | 6.0912 | 5.9862 | 5.9481 | 5.9070 |
|
||||||
|
| 7B | file size | 13.0G | 3.5G | 3.9G | 4.3G | 4.7G | 6.7G |
|
||||||
|
| 7B | ms/tok @ 4th | 127 | 55 | 54 | 76 | 83 | 72 |
|
||||||
|
| 7B | ms/tok @ 8th | 122 | 43 | 45 | 52 | 56 | 67 |
|
||||||
|
| 7B | bits/weight | 16.0 | 4.5 | 5.0 | 5.5 | 6.0 | 8.5 |
|
||||||
|
| 13B | perplexity | 5.2543 | 5.3860 | 5.3608 | 5.2856 | 5.2706 | 5.2548 |
|
||||||
|
| 13B | file size | 25.0G | 6.8G | 7.6G | 8.3G | 9.1G | 13G |
|
||||||
|
| 13B | ms/tok @ 4th | - | 103 | 105 | 148 | 160 | 131 |
|
||||||
|
| 13B | ms/tok @ 8th | - | 73 | 82 | 98 | 105 | 128 |
|
||||||
|
| 13B | bits/weight | 16.0 | 4.5 | 5.0 | 5.5 | 6.0 | 8.5 |
|
||||||
|
|
||||||
|
- [k-quants](https://github.com/ggerganov/llama.cpp/pull/1684)
|
||||||
|
- recent k-quants improvements and new i-quants
|
||||||
|
- [#2707](https://github.com/ggerganov/llama.cpp/pull/2707)
|
||||||
|
- [#2807](https://github.com/ggerganov/llama.cpp/pull/2807)
|
||||||
|
- [#4773 - 2-bit i-quants (inference)](https://github.com/ggerganov/llama.cpp/pull/4773)
|
||||||
|
- [#4856 - 2-bit i-quants (inference)](https://github.com/ggerganov/llama.cpp/pull/4856)
|
||||||
|
- [#4861 - importance matrix](https://github.com/ggerganov/llama.cpp/pull/4861)
|
||||||
|
- [#4872 - MoE models](https://github.com/ggerganov/llama.cpp/pull/4872)
|
||||||
|
- [#4897 - 2-bit quantization](https://github.com/ggerganov/llama.cpp/pull/4897)
|
||||||
|
- [#4930 - imatrix for all k-quants](https://github.com/ggerganov/llama.cpp/pull/4930)
|
||||||
|
- [#4951 - imatrix on the GPU](https://github.com/ggerganov/llama.cpp/pull/4957)
|
||||||
|
- [#4969 - imatrix for legacy quants](https://github.com/ggerganov/llama.cpp/pull/4969)
|
||||||
|
- [#4996 - k-qunats tuning](https://github.com/ggerganov/llama.cpp/pull/4996)
|
||||||
|
- [#5060 - Q3_K_XS](https://github.com/ggerganov/llama.cpp/pull/5060)
|
||||||
|
- [#5196 - 3-bit i-quants](https://github.com/ggerganov/llama.cpp/pull/5196)
|
||||||
|
- [quantization tuning](https://github.com/ggerganov/llama.cpp/pull/5320), [another one](https://github.com/ggerganov/llama.cpp/pull/5334), and [another one](https://github.com/ggerganov/llama.cpp/pull/5361)
|
||||||
|
|
||||||
|
**Llama 2 7B**
|
||||||
|
|
||||||
| Quantization | Bits per Weight (BPW) |
|
| Quantization | Bits per Weight (BPW) |
|
||||||
|--------------|-----------------------|
|
|--------------|-----------------------|
|
||||||
|
@ -18,7 +100,8 @@ Note: It is synced from llama.cpp `main` every 6 hours.
|
||||||
| Q5_K_M | 5.68 |
|
| Q5_K_M | 5.68 |
|
||||||
| Q6_K | 6.56 |
|
| Q6_K | 6.56 |
|
||||||
|
|
||||||
## Llama 2 13B
|
**Llama 2 13B**
|
||||||
|
|
||||||
Quantization | Bits per Weight (BPW)
|
Quantization | Bits per Weight (BPW)
|
||||||
-- | --
|
-- | --
|
||||||
Q2_K | 3.34
|
Q2_K | 3.34
|
||||||
|
@ -31,7 +114,7 @@ Q5_K_S | 5.51
|
||||||
Q5_K_M | 5.67
|
Q5_K_M | 5.67
|
||||||
Q6_K | 6.56
|
Q6_K | 6.56
|
||||||
|
|
||||||
# Llama 2 70B
|
**Llama 2 70B**
|
||||||
|
|
||||||
Quantization | Bits per Weight (BPW)
|
Quantization | Bits per Weight (BPW)
|
||||||
-- | --
|
-- | --
|
||||||
|
|
|
@ -906,6 +906,7 @@ extern "C" {
|
||||||
/// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
|
/// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
|
||||||
/// @return Returns the number of tokens on success, no more than n_tokens_max
|
/// @return Returns the number of tokens on success, no more than n_tokens_max
|
||||||
/// @return Returns a negative number on failure - the number of tokens that would have been returned
|
/// @return Returns a negative number on failure - the number of tokens that would have been returned
|
||||||
|
/// @param add_special Allow to add BOS and EOS tokens if model is configured to do so.
|
||||||
/// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated
|
/// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated
|
||||||
/// as plaintext. Does not insert a leading space.
|
/// as plaintext. Does not insert a leading space.
|
||||||
LLAMA_API int32_t llama_tokenize(
|
LLAMA_API int32_t llama_tokenize(
|
||||||
|
@ -920,15 +921,31 @@ extern "C" {
|
||||||
// Token Id -> Piece.
|
// Token Id -> Piece.
|
||||||
// Uses the vocabulary in the provided context.
|
// Uses the vocabulary in the provided context.
|
||||||
// Does not write null terminator to the buffer.
|
// Does not write null terminator to the buffer.
|
||||||
// User code is responsible to remove the leading whitespace of the first non-BOS token when decoding multiple tokens.
|
// User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix')
|
||||||
// @param special If true, special tokens are rendered in the output.
|
// @param special If true, special tokens are rendered in the output.
|
||||||
LLAMA_API int32_t llama_token_to_piece(
|
LLAMA_API int32_t llama_token_to_piece(
|
||||||
const struct llama_model * model,
|
const struct llama_model * model,
|
||||||
llama_token token,
|
llama_token token,
|
||||||
char * buf,
|
char * buf,
|
||||||
int32_t length,
|
int32_t length,
|
||||||
|
int32_t lstrip,
|
||||||
bool special);
|
bool special);
|
||||||
|
|
||||||
|
/// @details Convert the provided tokens into text (inverse of llama_tokenize()).
|
||||||
|
/// @param text The char pointer must be large enough to hold the resulting text.
|
||||||
|
/// @return Returns the number of chars/bytes on success, no more than text_len_max.
|
||||||
|
/// @return Returns a negative number on failure - the number of chars/bytes that would have been returned.
|
||||||
|
/// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so.
|
||||||
|
/// @param unparse_special If true, special tokens are rendered in the output.
|
||||||
|
LLAMA_API int32_t llama_detokenize(
|
||||||
|
const struct llama_model * model,
|
||||||
|
const llama_token * tokens,
|
||||||
|
int32_t n_tokens,
|
||||||
|
char * text,
|
||||||
|
int32_t text_len_max,
|
||||||
|
bool remove_special,
|
||||||
|
bool unparse_special);
|
||||||
|
|
||||||
/// Apply chat template. Inspired by hf apply_chat_template() on python.
|
/// Apply chat template. Inspired by hf apply_chat_template() on python.
|
||||||
/// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
|
/// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
|
||||||
/// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
|
/// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
|
||||||
|
|
34
koboldcpp.py
34
koboldcpp.py
|
@ -1775,7 +1775,7 @@ def show_new_gui():
|
||||||
|
|
||||||
tabs = ctk.CTkFrame(root, corner_radius = 0, width=windowwidth, height=windowheight-50)
|
tabs = ctk.CTkFrame(root, corner_radius = 0, width=windowwidth, height=windowheight-50)
|
||||||
tabs.grid(row=0, stick="nsew")
|
tabs.grid(row=0, stick="nsew")
|
||||||
tabnames= ["Quick Launch", "Hardware", "Tokens", "Model Files", "Network", "Horde Worker","Image Gen","Audio"]
|
tabnames= ["Quick Launch", "Hardware", "Tokens", "Model Files", "Network", "Horde Worker","Image Gen","Audio","Extra"]
|
||||||
navbuttons = {}
|
navbuttons = {}
|
||||||
navbuttonframe = ctk.CTkFrame(tabs, width=100, height=int(tabs.cget("height")))
|
navbuttonframe = ctk.CTkFrame(tabs, width=100, height=int(tabs.cget("height")))
|
||||||
navbuttonframe.grid(row=0, column=0, padx=2,pady=2)
|
navbuttonframe.grid(row=0, column=0, padx=2,pady=2)
|
||||||
|
@ -2464,7 +2464,37 @@ def show_new_gui():
|
||||||
|
|
||||||
# audio tab
|
# audio tab
|
||||||
audio_tab = tabcontent["Audio"]
|
audio_tab = tabcontent["Audio"]
|
||||||
makefileentry(audio_tab, "Whisper Model:", "Select Whisper .bin Model File", whisper_model_var, 1, width=280, filetypes=[("*.bin","*.bin")], tooltiptxt="Select a Whisper .bin model file on disk to be loaded.")
|
makefileentry(audio_tab, "Whisper Model (Speech-To-Text):", "Select Whisper .bin Model File", whisper_model_var, 1, width=280, filetypes=[("*.bin","*.bin")], tooltiptxt="Select a Whisper .bin model file on disk to be loaded.")
|
||||||
|
|
||||||
|
def unpack_to_dir():
|
||||||
|
from tkinter.filedialog import askdirectory
|
||||||
|
from tkinter import messagebox
|
||||||
|
import shutil
|
||||||
|
destpath = askdirectory(title='Select an empty folder to unpack KoboldCpp')
|
||||||
|
if not destpath:
|
||||||
|
return
|
||||||
|
srcpath = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
if os.path.isdir(srcpath) and os.path.isdir(destpath) and not os.listdir(destpath):
|
||||||
|
try:
|
||||||
|
messagebox.showinfo("Unpack Starting", f"KoboldCpp will be extracted to {destpath}\nThis process may take several seconds to complete.")
|
||||||
|
for item in os.listdir(srcpath):
|
||||||
|
s = os.path.join(srcpath, item)
|
||||||
|
d = os.path.join(destpath, item)
|
||||||
|
if os.path.isdir(s):
|
||||||
|
shutil.copytree(s, d, False, None)
|
||||||
|
else:
|
||||||
|
shutil.copy2(s, d)
|
||||||
|
messagebox.showinfo("KoboldCpp Unpack Success", f"KoboldCpp extracted to {destpath}")
|
||||||
|
except Exception as e:
|
||||||
|
messagebox.showerror("Error", f"An error occurred while unpacking: {e}")
|
||||||
|
else:
|
||||||
|
messagebox.showwarning("Invalid Selection", "The folder is not empty or invalid. Please select an empty folder.")
|
||||||
|
|
||||||
|
# extra tab
|
||||||
|
extra_tab = tabcontent["Extra"]
|
||||||
|
makelabel(extra_tab, "Unpack KoboldCpp to a local directory to modify its files.", 1, 0)
|
||||||
|
makelabel(extra_tab, "You can also launch via koboldcpp.py for faster startup.", 2, 0)
|
||||||
|
ctk.CTkButton(extra_tab , text = "Unpack KoboldCpp To Folder", command = unpack_to_dir ).grid(row=3,column=0, stick="w", padx= 8, pady=2)
|
||||||
|
|
||||||
# launch
|
# launch
|
||||||
def guilaunch():
|
def guilaunch():
|
||||||
|
|
Binary file not shown.
|
@ -1,112 +0,0 @@
|
||||||
ied 4 ½ months
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Führer
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello world
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello world
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello World
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello World
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello World!
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello, world!
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello, world!
|
|
||||||
__ggml_vocab_test__
|
|
||||||
this is 🦙.cpp
|
|
||||||
__ggml_vocab_test__
|
|
||||||
w048 7tuijk dsdfhu
|
|
||||||
__ggml_vocab_test__
|
|
||||||
нещо на Български
|
|
||||||
__ggml_vocab_test__
|
|
||||||
កាន់តែពិសេសអាចខលចេញ
|
|
||||||
__ggml_vocab_test__
|
|
||||||
🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
(
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
=
|
|
||||||
__ggml_vocab_test__
|
|
||||||
' era
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello, y'all! How are you 😁 ?我想在apple工作1314151天~
|
|
||||||
__ggml_vocab_test__
|
|
||||||
!!!!!!
|
|
||||||
__ggml_vocab_test__
|
|
||||||
3
|
|
||||||
__ggml_vocab_test__
|
|
||||||
33
|
|
||||||
__ggml_vocab_test__
|
|
||||||
333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
3333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
33333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
333333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
3333333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
33333333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
333333333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Cửa Việt
|
|
||||||
__ggml_vocab_test__
|
|
||||||
discards
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL
|
|
||||||
__ggml_vocab_test__
|
|
|
@ -1,46 +0,0 @@
|
||||||
2536 228 27 228 22957 6983
|
|
||||||
45 193433
|
|
||||||
|
|
||||||
228
|
|
||||||
1667
|
|
||||||
1742
|
|
||||||
205
|
|
||||||
206
|
|
||||||
2126
|
|
||||||
11516
|
|
||||||
34777
|
|
||||||
28339 3845
|
|
||||||
46609 3845
|
|
||||||
28339 3930
|
|
||||||
46609 3930
|
|
||||||
46609 3930 8
|
|
||||||
28339 19 3845 8
|
|
||||||
46609 19 3845 8
|
|
||||||
2075 1801 11254 107 255 21 19317
|
|
||||||
94 23 27 31 228 30 21213 20752 39267 6405 9980
|
|
||||||
4929 40071 2196 3236 8750 1764 37097 41168
|
|
||||||
38111 230 174833 38111 249 86325 241 38111 245 86325 232 38111 252 38111 123 38111 261 165 24629 38111 261 38111 103 174833 38111 235 38111 231 38111 257 38111 235 165 24629 38111 239
|
|
||||||
2226 256 230 1737 18258 16 80503 122 35927 2226 242 112 57462 1737 54457 223165 106230 2096 16 48389 1737 10203 109160 1875 2222 2517 3342 12523 16
|
|
||||||
28339
|
|
||||||
46609
|
|
||||||
228 46609
|
|
||||||
1667 46609
|
|
||||||
1742 46609
|
|
||||||
1742 46609 1856 46609
|
|
||||||
1737
|
|
||||||
206 1857
|
|
||||||
14 4515
|
|
||||||
28339 19 1770 14 1954 8 4070 1955 1933 80503 231 5691 12081 13336 2648 29325 14315 24 26 24 27 24 28 24 5123 18372
|
|
||||||
57178 10251
|
|
||||||
26
|
|
||||||
26 26
|
|
||||||
26 26 26
|
|
||||||
26 26 26 26
|
|
||||||
26 26 26 26 26
|
|
||||||
26 26 26 26 26 26
|
|
||||||
26 26 26 26 26 26 26
|
|
||||||
26 26 26 26 26 26 26 26
|
|
||||||
26 26 26 26 26 26 26 26 26
|
|
||||||
42 30719 12584
|
|
||||||
3642 4388
|
|
||||||
127731 51628 205 57788 18494 97469 126134 206 2226 256 230 1737 18258 16 80503 122 35927 2226 242 112 57462 1737 54457 223165 106230 2096 16 48389 11254 107 255 2226 107 255 228 26 228 26 26 228 26 26 26 228 26 26 26 26 228 26 26 26 26 26 228 26 26 26 26 26 26 228 26 26 26 26 26 26 26 228 26 26 26 26 26 26 26 26 228 26 21 26 228 26 2271 26 228 26 3834 26 182018 230 174833 38111 249 86325 241 38111 245 86325 232 38111 252 38111 123 38111 261 165 24629 38111 261 38111 103 174833 38111 235 188568 231 5691 12081 13336 2648 29325 14315 24 26 24 27 24 28 24 5123 18372 8391 158343 3512 40071 2196 3236 8750 1764 37097 41168 29721 32797 25646 3802 4975 4975 116167 57178 10251 154048 27292 1767 5125 2632 2155 91 2378 1919 1914 2782 19 2155 3354 1933 5470 38 2155 52 2068 5470 1767 4961 3059 1894 19 2155 43 1933 3026 2725 23186 38 2930 14 20676 1671 14 83 51
|
|
Binary file not shown.
|
@ -1,112 +0,0 @@
|
||||||
ied 4 ½ months
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Führer
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello world
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello world
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello World
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello World
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello World!
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello, world!
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello, world!
|
|
||||||
__ggml_vocab_test__
|
|
||||||
this is 🦙.cpp
|
|
||||||
__ggml_vocab_test__
|
|
||||||
w048 7tuijk dsdfhu
|
|
||||||
__ggml_vocab_test__
|
|
||||||
нещо на Български
|
|
||||||
__ggml_vocab_test__
|
|
||||||
កាន់តែពិសេសអាចខលចេញ
|
|
||||||
__ggml_vocab_test__
|
|
||||||
🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
(
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
=
|
|
||||||
__ggml_vocab_test__
|
|
||||||
' era
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello, y'all! How are you 😁 ?我想在apple工作1314151天~
|
|
||||||
__ggml_vocab_test__
|
|
||||||
!!!!!!
|
|
||||||
__ggml_vocab_test__
|
|
||||||
3
|
|
||||||
__ggml_vocab_test__
|
|
||||||
33
|
|
||||||
__ggml_vocab_test__
|
|
||||||
333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
3333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
33333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
333333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
3333333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
33333333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
333333333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Cửa Việt
|
|
||||||
__ggml_vocab_test__
|
|
||||||
discards
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL
|
|
||||||
__ggml_vocab_test__
|
|
|
@ -1,46 +0,0 @@
|
||||||
1122 220 19 220 26062 3951
|
|
||||||
37 50753 261
|
|
||||||
|
|
||||||
220
|
|
||||||
256
|
|
||||||
262
|
|
||||||
197
|
|
||||||
198
|
|
||||||
271
|
|
||||||
1406
|
|
||||||
1572
|
|
||||||
9707 1879
|
|
||||||
21927 1879
|
|
||||||
9707 4337
|
|
||||||
21927 4337
|
|
||||||
21927 4337 0
|
|
||||||
9707 11 1879 0
|
|
||||||
21927 11 1879 0
|
|
||||||
419 374 11162 99 247 13 10821
|
|
||||||
86 15 19 23 220 22 83 1963 41808 11472 2940 16739
|
|
||||||
78762 14144 1456 13073 63471 33594 3038 133178 79012
|
|
||||||
146394 97529 241 44258 233 146568 44258 224 147603 20879 115 146280 44258 223 146280 147272 97529 227 147805 148301 147270 44258 223 146848
|
|
||||||
145836 320 8252 8 26525 114 378 235 149921 30543 320 35673 99066 97534 8 25521 227 320 3243 42365 429 702 1181 1828 3950 8
|
|
||||||
9707
|
|
||||||
21927
|
|
||||||
220 21927
|
|
||||||
256 21927
|
|
||||||
262 21927
|
|
||||||
262 21927 198 262 21927
|
|
||||||
320
|
|
||||||
198 284
|
|
||||||
6 11385
|
|
||||||
9707 11 379 64848 0 2585 525 498 26525 223 937 104100 18493 22377 99257 16 18 16 19 16 20 16 35727 21216
|
|
||||||
17085 2928
|
|
||||||
18
|
|
||||||
18 18
|
|
||||||
18 18 18
|
|
||||||
18 18 18 18
|
|
||||||
18 18 18 18 18
|
|
||||||
18 18 18 18 18 18
|
|
||||||
18 18 18 18 18 18 18
|
|
||||||
18 18 18 18 18 18 18 18
|
|
||||||
18 18 18 18 18 18 18 18 18
|
|
||||||
34 90063 128324
|
|
||||||
2560 2347
|
|
||||||
198 4710 14731 65497 7847 1572 2303 78672 10947 145836 320 8252 8 26525 114 378 235 149921 30543 320 35673 99066 97534 8 25521 227 11162 99 247 149955 220 18 220 18 18 220 18 18 18 220 18 18 18 18 220 18 18 18 18 18 220 18 18 18 18 18 18 220 18 18 18 18 18 18 18 220 18 18 18 18 18 18 18 18 220 18 13 18 220 18 496 18 220 18 1112 18 220 146394 97529 241 44258 233 146568 44258 224 147603 20879 115 146280 44258 223 146280 147272 97529 227 144534 937 104100 18493 22377 99257 16 18 16 19 16 20 16 35727 21216 55460 53237 18658 14144 1456 13073 63471 33594 3038 133178 79012 3355 4605 4605 13874 13874 73594 3014 3014 28149 17085 2928 26610 7646 358 3003 1012 364 83 813 566 594 1052 11 364 787 498 2704 30 364 44 537 2704 358 3278 1281 432 11 364 35 498 1075 1045 15243 30 1205 6 42612 264 63866 43
|
|
|
@ -1,112 +0,0 @@
|
||||||
ied 4 ½ months
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Führer
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello world
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello world
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello World
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello World
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello World!
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello, world!
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello, world!
|
|
||||||
__ggml_vocab_test__
|
|
||||||
this is 🦙.cpp
|
|
||||||
__ggml_vocab_test__
|
|
||||||
w048 7tuijk dsdfhu
|
|
||||||
__ggml_vocab_test__
|
|
||||||
нещо на Български
|
|
||||||
__ggml_vocab_test__
|
|
||||||
កាន់តែពិសេសអាចខលចេញ
|
|
||||||
__ggml_vocab_test__
|
|
||||||
🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello
|
|
||||||
Hello
|
|
||||||
__ggml_vocab_test__
|
|
||||||
(
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
=
|
|
||||||
__ggml_vocab_test__
|
|
||||||
' era
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Hello, y'all! How are you 😁 ?我想在apple工作1314151天~
|
|
||||||
__ggml_vocab_test__
|
|
||||||
!!!!!!
|
|
||||||
__ggml_vocab_test__
|
|
||||||
3
|
|
||||||
__ggml_vocab_test__
|
|
||||||
33
|
|
||||||
__ggml_vocab_test__
|
|
||||||
333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
3333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
33333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
333333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
3333333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
33333333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
333333333
|
|
||||||
__ggml_vocab_test__
|
|
||||||
Cửa Việt
|
|
||||||
__ggml_vocab_test__
|
|
||||||
discards
|
|
||||||
__ggml_vocab_test__
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL
|
|
||||||
__ggml_vocab_test__
|
|
|
@ -1,46 +0,0 @@
|
||||||
4833 225 38 225 143 140 17723
|
|
||||||
56 2006 3935 265
|
|
||||||
|
|
||||||
225
|
|
||||||
261
|
|
||||||
264
|
|
||||||
202
|
|
||||||
203
|
|
||||||
478
|
|
||||||
2831
|
|
||||||
15773
|
|
||||||
8279 5788
|
|
||||||
12000 5788
|
|
||||||
8279 10896
|
|
||||||
12000 10896
|
|
||||||
12000 10896 19
|
|
||||||
8279 30 5788 19
|
|
||||||
12000 30 5788 19
|
|
||||||
458 438 5945 118 252 32 3766
|
|
||||||
105 34 38 42 225 41 102 1707 12530 10180 1479 8278
|
|
||||||
39862 8372 1039 9446 40242 13852 2053 8949 12531 1520 10700
|
|
||||||
14574 227 14574 133 14574 246 30457 238 14574 242 30457 229 14574 249 14574 134 14574 258 30457 228 14574 258 14574 114 14574 133 14574 232 14574 228 14574 254 14574 232 30457 228 14574 236
|
|
||||||
3807 253 227 308 4382 27 18458 133 46113 44967 123 13868 308 12565 19775 33071 40824 733 27 41889 308 2585 22680 688 1401 2819 4369 2404 27
|
|
||||||
8279
|
|
||||||
12000
|
|
||||||
225 12000
|
|
||||||
261 12000
|
|
||||||
264 12000
|
|
||||||
264 12000 284 12000
|
|
||||||
308
|
|
||||||
203 280
|
|
||||||
25 34666
|
|
||||||
8279 30 533 25 464 19 4971 884 844 18458 228 1018 4982 13368 2909 9513 17827 35 37 35 38 35 39 35 11873 47838
|
|
||||||
9163 3202
|
|
||||||
37
|
|
||||||
37 37
|
|
||||||
37 37 37
|
|
||||||
37 37 37 37
|
|
||||||
37 37 37 37 37
|
|
||||||
37 37 37 37 37 37
|
|
||||||
37 37 37 37 37 37 37
|
|
||||||
37 37 37 37 37 37 37 37
|
|
||||||
37 37 37 37 37 37 37 37 37
|
|
||||||
53 33934 83 33217 17102 102
|
|
||||||
1214 12258
|
|
||||||
334 719 8878 202 10885 4222 16104 28570 203 3807 253 227 308 4382 27 18458 133 46113 44967 123 13868 308 12565 19775 33071 40824 733 27 41889 5945 118 252 3807 118 252 225 37 225 37 37 225 37 37 37 225 37 37 37 37 225 37 37 37 37 37 225 37 37 37 37 37 37 225 37 37 37 37 37 37 37 225 37 37 37 37 37 37 37 37 225 37 32 37 225 37 497 37 225 37 1179 37 225 14574 227 14574 133 14574 246 30457 238 14574 242 30457 229 14574 249 14574 134 14574 258 30457 228 14574 258 14574 114 14574 133 14574 232 36628 228 1018 4982 13368 2909 9513 17827 35 37 35 38 35 39 35 11873 47838 20921 16623 13028 8372 1039 9446 40242 13852 2053 8949 12531 1520 10700 5881 9592 13299 914 31753 31359 9163 3202 35472 10397 439 4763 2583 330 102 1455 938 1182 2017 30 330 613 844 3654 49 330 63 646 3654 439 4621 1930 561 30 330 54 844 2124 1629 35993 49 2688 25 7709 312 25 94 62
|
|
|
@ -1,2 +0,0 @@
|
||||||
-r ./requirements-convert_legacy_llama.txt
|
|
||||||
torch~=2.2.1
|
|
|
@ -1,2 +0,0 @@
|
||||||
-r ./requirements-convert_legacy_llama.txt
|
|
||||||
torch~=2.2.1
|
|
|
@ -1 +0,0 @@
|
||||||
-r ./requirements-convert_legacy_llama.txt
|
|
|
@ -1,203 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
PROG=${0##*/}
|
|
||||||
build_dir="build-ci-debug"
|
|
||||||
|
|
||||||
# Print Color Commands
|
|
||||||
red=$(tput setaf 1)
|
|
||||||
green=$(tput setaf 2)
|
|
||||||
yellow=$(tput setaf 3)
|
|
||||||
blue=$(tput setaf 4)
|
|
||||||
magenta=$(tput setaf 5)
|
|
||||||
cyan=$(tput setaf 6)
|
|
||||||
normal=$(tput sgr0)
|
|
||||||
|
|
||||||
|
|
||||||
# Print Help Message
|
|
||||||
####################
|
|
||||||
|
|
||||||
print_full_help() {
|
|
||||||
cat << EOF
|
|
||||||
Usage: $PROG [OPTION]... <test_regex> (test_number)
|
|
||||||
Debug specific ctest program.
|
|
||||||
|
|
||||||
Options:
|
|
||||||
-h, --help display this help and exit
|
|
||||||
-g run in gdb mode
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
<test_regex> (Mandatory) Supply one regex to the script to filter tests
|
|
||||||
(test_number) (Optional) Test number to run a specific test
|
|
||||||
|
|
||||||
Example:
|
|
||||||
$PROG test-tokenizer
|
|
||||||
$PROG test-tokenizer 3
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
abort() {
|
|
||||||
echo "Error: $1" >&2
|
|
||||||
cat << EOF >&2
|
|
||||||
Usage: $PROG [OPTION]... <test_regex> (test_number)
|
|
||||||
Debug specific ctest program.
|
|
||||||
Refer to --help for full instructions.
|
|
||||||
EOF
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# Dependency Sanity Check
|
|
||||||
#########################
|
|
||||||
|
|
||||||
check_dependency() {
|
|
||||||
command -v "$1" >/dev/null 2>&1 || {
|
|
||||||
abort "$1 is required but not found. Please install it and try again."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
check_dependency ctest
|
|
||||||
check_dependency cmake
|
|
||||||
|
|
||||||
|
|
||||||
# Step 0: Check the args
|
|
||||||
########################
|
|
||||||
|
|
||||||
if [ x"$1" = x"-h" ] || [ x"$1" = x"--help" ]; then
|
|
||||||
print_full_help >&2
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Parse command-line options
|
|
||||||
gdb_mode=false
|
|
||||||
while getopts "g" opt; do
|
|
||||||
case $opt in
|
|
||||||
g)
|
|
||||||
gdb_mode=true
|
|
||||||
echo "gdb_mode Mode Enabled"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# Shift the option parameters
|
|
||||||
shift $((OPTIND - 1))
|
|
||||||
|
|
||||||
# Positionial Argument Processing : <test_regex>
|
|
||||||
if [ -z "${1}" ]; then
|
|
||||||
abort "Test regex is required"
|
|
||||||
else
|
|
||||||
test_suite=${1:-}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Positionial Argument Processing : (test_number)
|
|
||||||
test_number=${2:-}
|
|
||||||
|
|
||||||
|
|
||||||
# Step 1: Reset and Setup folder context
|
|
||||||
########################################
|
|
||||||
|
|
||||||
## Sanity check that we are actually in a git repo
|
|
||||||
repo_root=$(git rev-parse --show-toplevel)
|
|
||||||
if [ ! -d "$repo_root" ]; then
|
|
||||||
abort "Not in a Git repository."
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Reset folder to root context of git repo and Create and enter build directory
|
|
||||||
pushd "$repo_root"
|
|
||||||
rm -rf "$build_dir" && mkdir "$build_dir" || abort "Failed to make $build_dir"
|
|
||||||
|
|
||||||
|
|
||||||
# Step 2: Setup Build Environment and Compile Test Binaries
|
|
||||||
###########################################################
|
|
||||||
|
|
||||||
# Note: test-eval-callback requires -DLLAMA_CURL
|
|
||||||
cmake -B "./$build_dir" -DCMAKE_BUILD_TYPE=Debug -DGGML_CUDA=1 -DLLAMA_CURL=1 || abort "Failed to build enviroment"
|
|
||||||
pushd "$build_dir"
|
|
||||||
make -j || abort "Failed to compile"
|
|
||||||
popd > /dev/null || exit 1
|
|
||||||
|
|
||||||
|
|
||||||
# Step 3: Find all tests available that matches REGEX
|
|
||||||
####################################################
|
|
||||||
|
|
||||||
# Ctest Gather Tests
|
|
||||||
# `-R test-tokenizer` : looks for all the test files named `test-tokenizer*` (R=Regex)
|
|
||||||
# `-N` : "show-only" disables test execution & shows test commands that you can feed to GDB.
|
|
||||||
# `-V` : Verbose Mode
|
|
||||||
printf "\n\nGathering tests that fit REGEX: ${test_suite} ...\n"
|
|
||||||
pushd "$build_dir"
|
|
||||||
tests=($(ctest -R ${test_suite} -V -N | grep -E " +Test +#[0-9]+*" | cut -d':' -f2 | awk '{$1=$1};1'))
|
|
||||||
if [ ${#tests[@]} -eq 0 ]; then
|
|
||||||
abort "No tests avaliable... check your compliation process..."
|
|
||||||
fi
|
|
||||||
popd > /dev/null || exit 1
|
|
||||||
|
|
||||||
|
|
||||||
# Step 4: Identify Test Command for Debugging
|
|
||||||
#############################################
|
|
||||||
|
|
||||||
# Select test number
|
|
||||||
if [ -z $test_number ]; then
|
|
||||||
# List out avaliable tests
|
|
||||||
printf "Which test would you like to debug?\n"
|
|
||||||
id=0
|
|
||||||
for s in "${tests[@]}"
|
|
||||||
do
|
|
||||||
echo "Test# ${id}"
|
|
||||||
echo " $s"
|
|
||||||
((id++))
|
|
||||||
done
|
|
||||||
|
|
||||||
# Prompt user which test they wanted to run
|
|
||||||
printf "\nRun test#? "
|
|
||||||
read test_number
|
|
||||||
|
|
||||||
else
|
|
||||||
printf "\nUser Already Requested #${test_number}\n"
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Grab all tests commands
|
|
||||||
pushd "$build_dir"
|
|
||||||
sIFS=$IFS # Save Initial IFS (Internal Field Separator)
|
|
||||||
IFS=$'\n' # Change IFS (Internal Field Separator) (So we split ctest output by newline rather than by spaces)
|
|
||||||
test_args=($(ctest -R ${test_suite} -V -N | grep "Test command" | cut -d':' -f3 | awk '{$1=$1};1' )) # Get test args
|
|
||||||
IFS=$sIFS # Reset IFS (Internal Field Separator)
|
|
||||||
popd > /dev/null || exit 1
|
|
||||||
|
|
||||||
# Grab specific test command
|
|
||||||
single_test_name="${tests[test_number]}"
|
|
||||||
single_test_command="${test_args[test_number]}"
|
|
||||||
|
|
||||||
|
|
||||||
# Step 5: Execute or GDB Debug
|
|
||||||
##############################
|
|
||||||
|
|
||||||
printf "${magenta}Running Test #${test_number}: ${single_test_name}${normal}\n"
|
|
||||||
printf "${cyan}single_test_command: ${single_test_command}${normal}\n"
|
|
||||||
|
|
||||||
if [ "$gdb_mode" = "true" ]; then
|
|
||||||
# Execute debugger
|
|
||||||
pushd "$repo_root" || exit 1
|
|
||||||
eval "gdb --args ${single_test_command}"
|
|
||||||
popd > /dev/null || exit 1
|
|
||||||
|
|
||||||
else
|
|
||||||
# Execute Test
|
|
||||||
pushd "$repo_root" || exit 1
|
|
||||||
eval "${single_test_command}"
|
|
||||||
exit_code=$?
|
|
||||||
popd > /dev/null || exit 1
|
|
||||||
|
|
||||||
# Print Result
|
|
||||||
printf "${blue}Ran Test #${test_number}: ${single_test_name}${normal}\n"
|
|
||||||
printf "${yellow}Command: ${single_test_command}${normal}\n"
|
|
||||||
if [ $exit_code -eq 0 ]; then
|
|
||||||
printf "${green}TEST PASS${normal}\n"
|
|
||||||
else
|
|
||||||
printf "${red}TEST FAIL${normal}\n"
|
|
||||||
fi
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Return to the directory from which the user ran the command.
|
|
||||||
popd > /dev/null || exit 1
|
|
|
@ -1,194 +0,0 @@
|
||||||
import array
|
|
||||||
import unicodedata
|
|
||||||
import requests
|
|
||||||
|
|
||||||
|
|
||||||
MAX_CODEPOINTS = 0x110000
|
|
||||||
|
|
||||||
UNICODE_DATA_URL = "https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt"
|
|
||||||
|
|
||||||
|
|
||||||
# see https://www.unicode.org/L2/L1999/UnicodeData.html
|
|
||||||
def unicode_data_iter():
|
|
||||||
res = requests.get(UNICODE_DATA_URL)
|
|
||||||
res.raise_for_status()
|
|
||||||
data = res.content.decode()
|
|
||||||
|
|
||||||
prev = []
|
|
||||||
|
|
||||||
for line in data.splitlines():
|
|
||||||
# ej: 0000;<control>;Cc;0;BN;;;;;N;NULL;;;;
|
|
||||||
line = line.split(";")
|
|
||||||
|
|
||||||
cpt = int(line[0], base=16)
|
|
||||||
assert cpt < MAX_CODEPOINTS
|
|
||||||
|
|
||||||
cpt_lower = int(line[-2] or "0", base=16)
|
|
||||||
assert cpt_lower < MAX_CODEPOINTS
|
|
||||||
|
|
||||||
cpt_upper = int(line[-3] or "0", base=16)
|
|
||||||
assert cpt_upper < MAX_CODEPOINTS
|
|
||||||
|
|
||||||
categ = line[2].strip()
|
|
||||||
assert len(categ) == 2
|
|
||||||
|
|
||||||
bidir = line[4].strip()
|
|
||||||
assert len(categ) == 2
|
|
||||||
|
|
||||||
name = line[1]
|
|
||||||
if name.endswith(", First>"):
|
|
||||||
prev = (cpt, cpt_lower, cpt_upper, categ, bidir)
|
|
||||||
continue
|
|
||||||
if name.endswith(", Last>"):
|
|
||||||
assert prev[1:] == (0, 0, categ, bidir)
|
|
||||||
for c in range(prev[0], cpt):
|
|
||||||
yield (c, cpt_lower, cpt_upper, categ, bidir)
|
|
||||||
|
|
||||||
yield (cpt, cpt_lower, cpt_upper, categ, bidir)
|
|
||||||
|
|
||||||
|
|
||||||
# see definition in unicode.h
|
|
||||||
CODEPOINT_FLAG_UNDEFINED = 0x0001 #
|
|
||||||
CODEPOINT_FLAG_NUMBER = 0x0002 # \p{N}
|
|
||||||
CODEPOINT_FLAG_LETTER = 0x0004 # \p{L}
|
|
||||||
CODEPOINT_FLAG_SEPARATOR = 0x0008 # \p{Z}
|
|
||||||
CODEPOINT_FLAG_MARK = 0x0010 # \p{M}
|
|
||||||
CODEPOINT_FLAG_PUNCTUATION = 0x0020 # \p{P}
|
|
||||||
CODEPOINT_FLAG_SYMBOL = 0x0040 # \p{S}
|
|
||||||
CODEPOINT_FLAG_CONTROL = 0x0080 # \p{C}
|
|
||||||
|
|
||||||
UNICODE_CATEGORY_TO_FLAG = {
|
|
||||||
"Cn": CODEPOINT_FLAG_UNDEFINED, # Undefined
|
|
||||||
"Cc": CODEPOINT_FLAG_CONTROL, # Control
|
|
||||||
"Cf": CODEPOINT_FLAG_CONTROL, # Format
|
|
||||||
"Co": CODEPOINT_FLAG_CONTROL, # Private Use
|
|
||||||
"Cs": CODEPOINT_FLAG_CONTROL, # Surrrogate
|
|
||||||
"Ll": CODEPOINT_FLAG_LETTER, # Lowercase Letter
|
|
||||||
"Lm": CODEPOINT_FLAG_LETTER, # Modifier Letter
|
|
||||||
"Lo": CODEPOINT_FLAG_LETTER, # Other Letter
|
|
||||||
"Lt": CODEPOINT_FLAG_LETTER, # Titlecase Letter
|
|
||||||
"Lu": CODEPOINT_FLAG_LETTER, # Uppercase Letter
|
|
||||||
"L&": CODEPOINT_FLAG_LETTER, # Cased Letter
|
|
||||||
"Mc": CODEPOINT_FLAG_MARK, # Spacing Mark
|
|
||||||
"Me": CODEPOINT_FLAG_MARK, # Enclosing Mark
|
|
||||||
"Mn": CODEPOINT_FLAG_MARK, # Nonspacing Mark
|
|
||||||
"Nd": CODEPOINT_FLAG_NUMBER, # Decimal Number
|
|
||||||
"Nl": CODEPOINT_FLAG_NUMBER, # Letter Number
|
|
||||||
"No": CODEPOINT_FLAG_NUMBER, # Other Number
|
|
||||||
"Pc": CODEPOINT_FLAG_PUNCTUATION, # Connector Punctuation
|
|
||||||
"Pd": CODEPOINT_FLAG_PUNCTUATION, # Dash Punctuation
|
|
||||||
"Pe": CODEPOINT_FLAG_PUNCTUATION, # Close Punctuation
|
|
||||||
"Pf": CODEPOINT_FLAG_PUNCTUATION, # Final Punctuation
|
|
||||||
"Pi": CODEPOINT_FLAG_PUNCTUATION, # Initial Punctuation
|
|
||||||
"Po": CODEPOINT_FLAG_PUNCTUATION, # Other Punctuation
|
|
||||||
"Ps": CODEPOINT_FLAG_PUNCTUATION, # Open Punctuation
|
|
||||||
"Sc": CODEPOINT_FLAG_SYMBOL, # Currency Symbol
|
|
||||||
"Sk": CODEPOINT_FLAG_SYMBOL, # Modifier Symbol
|
|
||||||
"Sm": CODEPOINT_FLAG_SYMBOL, # Math Symbol
|
|
||||||
"So": CODEPOINT_FLAG_SYMBOL, # Other Symbol
|
|
||||||
"Zl": CODEPOINT_FLAG_SEPARATOR, # Line Separator
|
|
||||||
"Zp": CODEPOINT_FLAG_SEPARATOR, # Paragraph Separator
|
|
||||||
"Zs": CODEPOINT_FLAG_SEPARATOR, # Space Separator
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
codepoint_flags = array.array('H', [CODEPOINT_FLAG_UNDEFINED]) * MAX_CODEPOINTS
|
|
||||||
table_whitespace = []
|
|
||||||
table_lowercase = []
|
|
||||||
table_uppercase = []
|
|
||||||
table_nfd = []
|
|
||||||
|
|
||||||
for (cpt, cpt_lower, cpt_upper, categ, bidir) in unicode_data_iter():
|
|
||||||
# convert codepoint to unicode character
|
|
||||||
char = chr(cpt)
|
|
||||||
|
|
||||||
# codepoint category flags
|
|
||||||
codepoint_flags[cpt] = UNICODE_CATEGORY_TO_FLAG[categ]
|
|
||||||
|
|
||||||
# lowercase conversion
|
|
||||||
if cpt_lower:
|
|
||||||
table_lowercase.append((cpt, cpt_lower))
|
|
||||||
|
|
||||||
# uppercase conversion
|
|
||||||
if cpt_upper:
|
|
||||||
table_uppercase.append((cpt, cpt_upper))
|
|
||||||
|
|
||||||
# NFD normalization
|
|
||||||
norm = ord(unicodedata.normalize('NFD', char)[0])
|
|
||||||
if cpt != norm:
|
|
||||||
table_nfd.append((cpt, norm))
|
|
||||||
|
|
||||||
|
|
||||||
# whitespaces, see "<White_Space>" https://www.unicode.org/Public/UCD/latest/ucd/PropList.txt
|
|
||||||
table_whitespace.extend(range(0x0009, 0x000D + 1))
|
|
||||||
table_whitespace.extend(range(0x2000, 0x200A + 1))
|
|
||||||
table_whitespace.extend([0x0020, 0x0085, 0x00A0, 0x1680, 0x2028, 0x2029, 0x202F, 0x205F, 0x3000])
|
|
||||||
|
|
||||||
|
|
||||||
# sort by codepoint
|
|
||||||
table_whitespace.sort()
|
|
||||||
table_lowercase.sort()
|
|
||||||
table_uppercase.sort()
|
|
||||||
table_nfd.sort()
|
|
||||||
|
|
||||||
|
|
||||||
# group ranges with same flags
|
|
||||||
ranges_flags = [(0, codepoint_flags[0])] # start, flags
|
|
||||||
for codepoint, flags in enumerate(codepoint_flags):
|
|
||||||
if flags != ranges_flags[-1][1]:
|
|
||||||
ranges_flags.append((codepoint, flags))
|
|
||||||
ranges_flags.append((MAX_CODEPOINTS, 0x0000))
|
|
||||||
|
|
||||||
|
|
||||||
# group ranges with same nfd
|
|
||||||
ranges_nfd = [(0, 0, 0)] # start, last, nfd
|
|
||||||
for codepoint, norm in table_nfd:
|
|
||||||
start = ranges_nfd[-1][0]
|
|
||||||
if ranges_nfd[-1] != (start, codepoint - 1, norm):
|
|
||||||
ranges_nfd.append(None)
|
|
||||||
start = codepoint
|
|
||||||
ranges_nfd[-1] = (start, codepoint, norm)
|
|
||||||
|
|
||||||
|
|
||||||
# Generate 'unicode-data.cpp':
|
|
||||||
# python ./scripts//gen-unicode-data.py > unicode-data.cpp
|
|
||||||
|
|
||||||
def out(line=""):
|
|
||||||
print(line, end='\n') # noqa
|
|
||||||
|
|
||||||
|
|
||||||
out("""\
|
|
||||||
// generated with scripts/gen-unicode-data.py
|
|
||||||
|
|
||||||
#include "unicode-data.h"
|
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
#include <vector>
|
|
||||||
#include <unordered_map>
|
|
||||||
#include <unordered_set>
|
|
||||||
""")
|
|
||||||
|
|
||||||
out("const std::vector<std::pair<uint32_t, uint16_t>> unicode_ranges_flags = { // start, flags // last=next_start-1")
|
|
||||||
for codepoint, flags in ranges_flags:
|
|
||||||
out("{0x%06X, 0x%04X}," % (codepoint, flags))
|
|
||||||
out("};\n")
|
|
||||||
|
|
||||||
out("const std::unordered_set<uint32_t> unicode_set_whitespace = {")
|
|
||||||
for codepoint in table_whitespace:
|
|
||||||
out("0x%06X," % codepoint)
|
|
||||||
out("};\n")
|
|
||||||
|
|
||||||
out("const std::unordered_map<uint32_t, uint32_t> unicode_map_lowercase = {")
|
|
||||||
for tuple in table_lowercase:
|
|
||||||
out("{0x%06X, 0x%06X}," % tuple)
|
|
||||||
out("};\n")
|
|
||||||
|
|
||||||
out("const std::unordered_map<uint32_t, uint32_t> unicode_map_uppercase = {")
|
|
||||||
for tuple in table_uppercase:
|
|
||||||
out("{0x%06X, 0x%06X}," % tuple)
|
|
||||||
out("};\n")
|
|
||||||
|
|
||||||
out("const std::vector<range_nfd> unicode_ranges_nfd = { // start, last, nfd")
|
|
||||||
for triple in ranges_nfd:
|
|
||||||
out("{0x%06X, 0x%06X, 0x%06X}," % triple)
|
|
||||||
out("};\n")
|
|
|
@ -1,16 +0,0 @@
|
||||||
# CMake equivalent of `xxd -i ${INPUT} ${OUTPUT}`
|
|
||||||
# Usage: cmake -DINPUT=examples/server/public/index.html -DOUTPUT=examples/server/index.html.hpp -P scripts/xxd.cmake
|
|
||||||
|
|
||||||
SET(INPUT "" CACHE STRING "Input File")
|
|
||||||
SET(OUTPUT "" CACHE STRING "Output File")
|
|
||||||
|
|
||||||
get_filename_component(filename "${INPUT}" NAME)
|
|
||||||
string(REGEX REPLACE "\\.|-" "_" name "${filename}")
|
|
||||||
|
|
||||||
file(READ "${INPUT}" hex_data HEX)
|
|
||||||
string(REGEX REPLACE "([0-9a-f][0-9a-f])" "0x\\1," hex_sequence "${hex_data}")
|
|
||||||
|
|
||||||
string(LENGTH ${hex_data} hex_len)
|
|
||||||
math(EXPR len "${hex_len} / 2")
|
|
||||||
|
|
||||||
file(WRITE "${OUTPUT}" "unsigned char ${name}[] = {${hex_sequence}};\nunsigned int ${name}_len = ${len};\n")
|
|
|
@ -1 +0,0 @@
|
||||||
../ggml/include/ggml-metal.h
|
|
254
src/llama.cpp
254
src/llama.cpp
|
@ -2023,18 +2023,19 @@ using llama_mlocks = std::vector<std::unique_ptr<llama_mlock>>;
|
||||||
|
|
||||||
// NOTE: avoid ever using this except for building the token_to_piece caches
|
// NOTE: avoid ever using this except for building the token_to_piece caches
|
||||||
static std::string llama_token_to_piece(const struct llama_model * model, llama_token token, bool special) {
|
static std::string llama_token_to_piece(const struct llama_model * model, llama_token token, bool special) {
|
||||||
std::vector<char> result(8, 0);
|
std::string piece;
|
||||||
const int n_tokens = llama_token_to_piece(model, token, result.data(), result.size(), special);
|
piece.resize(piece.capacity()); // using string internal cache
|
||||||
if (n_tokens < 0) {
|
const int n_chars = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special);
|
||||||
result.resize(-n_tokens);
|
if (n_chars < 0) {
|
||||||
int check = llama_token_to_piece(model, token, result.data(), result.size(), special);
|
piece.resize(-n_chars);
|
||||||
GGML_ASSERT(check == -n_tokens);
|
int check = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special);
|
||||||
|
GGML_ASSERT(check == -n_chars);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
result.resize(n_tokens);
|
piece.resize(n_chars);
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::string(result.data(), result.size());
|
return piece;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer) {
|
static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer) {
|
||||||
|
@ -2614,10 +2615,11 @@ struct llama_vocab {
|
||||||
id special_eot_id = -1; // TODO: move above after "eos_id", and here add "file separator" token
|
id special_eot_id = -1; // TODO: move above after "eos_id", and here add "file separator" token
|
||||||
|
|
||||||
// tokenizer flags
|
// tokenizer flags
|
||||||
bool tokenizer_add_space_prefix = true;
|
bool tokenizer_add_space_prefix = false;
|
||||||
bool tokenizer_add_bos = false;
|
bool tokenizer_add_bos = false;
|
||||||
bool tokenizer_add_eos = false;
|
bool tokenizer_add_eos = false;
|
||||||
bool tokenizer_ignore_merges = false;
|
bool tokenizer_ignore_merges = false;
|
||||||
|
bool tokenizer_clean_spaces = false; // clean_up_tokenization_spaces
|
||||||
bool tokenizer_remove_extra_whitespaces = false;
|
bool tokenizer_remove_extra_whitespaces = false;
|
||||||
bool tokenizer_escape_whitespaces = true;
|
bool tokenizer_escape_whitespaces = true;
|
||||||
bool tokenizer_treat_whitespace_as_suffix = false;
|
bool tokenizer_treat_whitespace_as_suffix = false;
|
||||||
|
@ -3297,6 +3299,8 @@ static void llama_kv_cache_seq_add(
|
||||||
|
|
||||||
if (p0 < 0) p0 = 0;
|
if (p0 < 0) p0 = 0;
|
||||||
if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
|
if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
|
||||||
|
// If there is no range then return early to avoid looping over the cache.
|
||||||
|
if (p0 == p1) return;
|
||||||
|
|
||||||
if (cache.recurrent) {
|
if (cache.recurrent) {
|
||||||
// for Mamba-like models, only the pos needs to be shifted
|
// for Mamba-like models, only the pos needs to be shifted
|
||||||
|
@ -3341,6 +3345,8 @@ static void llama_kv_cache_seq_div(
|
||||||
int d) {
|
int d) {
|
||||||
if (p0 < 0) p0 = 0;
|
if (p0 < 0) p0 = 0;
|
||||||
if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
|
if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
|
||||||
|
// If there is no range then return early to avoid looping over the cache.
|
||||||
|
if (p0 == p1) return;
|
||||||
|
|
||||||
if (cache.recurrent) {
|
if (cache.recurrent) {
|
||||||
// for Mamba-like models, only the pos needs to be changed
|
// for Mamba-like models, only the pos needs to be changed
|
||||||
|
@ -5284,11 +5290,6 @@ static void llm_load_vocab(
|
||||||
vocab.special_pad_id = -1;
|
vocab.special_pad_id = -1;
|
||||||
vocab.special_cls_id = -1;
|
vocab.special_cls_id = -1;
|
||||||
vocab.special_mask_id = -1;
|
vocab.special_mask_id = -1;
|
||||||
|
|
||||||
const int add_space_prefix_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_ADD_PREFIX).c_str());
|
|
||||||
if (add_space_prefix_keyidx != -1) {
|
|
||||||
vocab.tokenizer_add_space_prefix = gguf_get_val_bool(ctx, add_space_prefix_keyidx);
|
|
||||||
} // The default value of add_space_prefix is true.
|
|
||||||
} else if (tokenizer_model == "bert") {
|
} else if (tokenizer_model == "bert") {
|
||||||
vocab.type = LLAMA_VOCAB_TYPE_WPM;
|
vocab.type = LLAMA_VOCAB_TYPE_WPM;
|
||||||
|
|
||||||
|
@ -5300,15 +5301,9 @@ static void llm_load_vocab(
|
||||||
vocab.special_pad_id = 0;
|
vocab.special_pad_id = 0;
|
||||||
vocab.special_cls_id = 101;
|
vocab.special_cls_id = 101;
|
||||||
vocab.special_mask_id = 103;
|
vocab.special_mask_id = 103;
|
||||||
vocab.tokenizer_add_space_prefix = false;
|
|
||||||
} else if (tokenizer_model == "gpt2") {
|
} else if (tokenizer_model == "gpt2") {
|
||||||
vocab.type = LLAMA_VOCAB_TYPE_BPE;
|
vocab.type = LLAMA_VOCAB_TYPE_BPE;
|
||||||
|
|
||||||
const int add_space_prefix_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_ADD_PREFIX).c_str());
|
|
||||||
if (add_space_prefix_keyidx != -1) {
|
|
||||||
vocab.tokenizer_add_space_prefix = gguf_get_val_bool(ctx, add_space_prefix_keyidx);
|
|
||||||
}
|
|
||||||
|
|
||||||
// read bpe merges and populate bpe ranks
|
// read bpe merges and populate bpe ranks
|
||||||
const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
|
const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
|
||||||
if (merges_keyidx == -1) {
|
if (merges_keyidx == -1) {
|
||||||
|
@ -5396,6 +5391,8 @@ static void llm_load_vocab(
|
||||||
|
|
||||||
// for now, only BPE models have pre-tokenizers
|
// for now, only BPE models have pre-tokenizers
|
||||||
if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
|
if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
|
||||||
|
vocab.tokenizer_add_space_prefix = false;
|
||||||
|
vocab.tokenizer_clean_spaces = true;
|
||||||
if (tokenizer_pre.empty()) {
|
if (tokenizer_pre.empty()) {
|
||||||
LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__);
|
LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__);
|
||||||
LLAMA_LOG_WARN("%s: \n", __func__);
|
LLAMA_LOG_WARN("%s: \n", __func__);
|
||||||
|
@ -5417,9 +5414,11 @@ static void llm_load_vocab(
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "deepseek-llm") {
|
tokenizer_pre == "deepseek-llm") {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM;
|
||||||
|
vocab.tokenizer_clean_spaces = false;
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "deepseek-coder") {
|
tokenizer_pre == "deepseek-coder") {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER;
|
||||||
|
vocab.tokenizer_clean_spaces = false;
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "falcon") {
|
tokenizer_pre == "falcon") {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON;
|
||||||
|
@ -5431,6 +5430,7 @@ static void llm_load_vocab(
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER;
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "gpt-2" ||
|
tokenizer_pre == "gpt-2" ||
|
||||||
|
tokenizer_pre == "phi-2" ||
|
||||||
tokenizer_pre == "jina-es" ||
|
tokenizer_pre == "jina-es" ||
|
||||||
tokenizer_pre == "jina-de" ||
|
tokenizer_pre == "jina-de" ||
|
||||||
tokenizer_pre == "jina-v2-es" ||
|
tokenizer_pre == "jina-v2-es" ||
|
||||||
|
@ -5446,6 +5446,7 @@ static void llm_load_vocab(
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "qwen2") {
|
tokenizer_pre == "qwen2") {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
|
||||||
|
vocab.tokenizer_clean_spaces = false;
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "stablelm2") {
|
tokenizer_pre == "stablelm2") {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STABLELM2;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STABLELM2;
|
||||||
|
@ -5461,9 +5462,11 @@ static void llm_load_vocab(
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "poro-chat") {
|
tokenizer_pre == "poro-chat") {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO;
|
||||||
|
vocab.tokenizer_clean_spaces = false;
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "viking") {
|
tokenizer_pre == "viking") {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_VIKING;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_VIKING;
|
||||||
|
vocab.tokenizer_clean_spaces = false;
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "jais") {
|
tokenizer_pre == "jais") {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS;
|
||||||
|
@ -5472,10 +5475,14 @@ static void llm_load_vocab(
|
||||||
}
|
}
|
||||||
} else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
|
} else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
|
vocab.tokenizer_add_space_prefix = true;
|
||||||
|
vocab.tokenizer_clean_spaces = false;
|
||||||
vocab.tokenizer_add_bos = true;
|
vocab.tokenizer_add_bos = true;
|
||||||
vocab.tokenizer_add_eos = false;
|
vocab.tokenizer_add_eos = false;
|
||||||
} else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
|
} else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
|
vocab.tokenizer_add_space_prefix = false;
|
||||||
|
vocab.tokenizer_clean_spaces = true;
|
||||||
vocab.tokenizer_add_bos = true;
|
vocab.tokenizer_add_bos = true;
|
||||||
vocab.tokenizer_add_eos = false;
|
vocab.tokenizer_add_eos = false;
|
||||||
} else if (vocab.type == LLAMA_VOCAB_TYPE_UGM) {
|
} else if (vocab.type == LLAMA_VOCAB_TYPE_UGM) {
|
||||||
|
@ -5485,6 +5492,11 @@ static void llm_load_vocab(
|
||||||
} else {
|
} else {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const int add_space_prefix_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_ADD_PREFIX).c_str());
|
||||||
|
if (add_space_prefix_keyidx != -1) {
|
||||||
|
vocab.tokenizer_add_space_prefix = gguf_get_val_bool(ctx, add_space_prefix_keyidx);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
|
const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
|
||||||
|
@ -16400,7 +16412,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
|
||||||
// tokenizer.encode('', add_special_tokens=True) returns [1]
|
// tokenizer.encode('', add_special_tokens=True) returns [1]
|
||||||
// tokenizer.encode('', add_special_tokens=False) returns []
|
// tokenizer.encode('', add_special_tokens=False) returns []
|
||||||
|
|
||||||
bool is_prev_special = false;
|
bool is_prev_special = true; // prefix with space if first token
|
||||||
|
|
||||||
if (add_special && vocab.tokenizer_add_bos) {
|
if (add_special && vocab.tokenizer_add_bos) {
|
||||||
GGML_ASSERT(vocab.special_bos_id != -1);
|
GGML_ASSERT(vocab.special_bos_id != -1);
|
||||||
|
@ -16412,11 +16424,10 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
|
||||||
if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
|
if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
|
||||||
auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
|
auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
|
||||||
|
|
||||||
if (vocab.tokenizer_add_space_prefix) {
|
// prefix with space if previous is special
|
||||||
if (!output.size() || is_prev_special) { // prefix with space if first token
|
if (vocab.tokenizer_add_space_prefix && is_prev_special) {
|
||||||
raw_text = " " + raw_text;
|
raw_text = " " + raw_text;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef PRETOKENIZERDEBUG
|
#ifdef PRETOKENIZERDEBUG
|
||||||
LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
|
LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
|
||||||
|
@ -16424,6 +16435,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
|
||||||
llm_tokenizer_spm tokenizer(vocab);
|
llm_tokenizer_spm tokenizer(vocab);
|
||||||
llama_escape_whitespace(raw_text);
|
llama_escape_whitespace(raw_text);
|
||||||
tokenizer.tokenize(raw_text, output);
|
tokenizer.tokenize(raw_text, output);
|
||||||
|
is_prev_special = false;
|
||||||
} else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
|
} else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
|
||||||
output.push_back(fragment.token);
|
output.push_back(fragment.token);
|
||||||
is_prev_special = true;
|
is_prev_special = true;
|
||||||
|
@ -21247,7 +21259,7 @@ static std::string llama_decode_text(const std::string & text) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// does not write null-terminator to buf
|
// does not write null-terminator to buf
|
||||||
int32_t llama_token_to_piece(const struct llama_model * model, llama_token token, char * buf, int32_t length, bool special) {
|
int32_t llama_token_to_piece(const struct llama_model * model, llama_token token, char * buf, int32_t length, int32_t lstrip, bool special) {
|
||||||
|
|
||||||
if(OldBPETokenizerMode)
|
if(OldBPETokenizerMode)
|
||||||
{
|
{
|
||||||
|
@ -21255,83 +21267,64 @@ int32_t llama_token_to_piece(const struct llama_model * model, llama_token token
|
||||||
}
|
}
|
||||||
|
|
||||||
// ref: https://github.com/ggerganov/llama.cpp/pull/7587#discussion_r1620983843
|
// ref: https://github.com/ggerganov/llama.cpp/pull/7587#discussion_r1620983843
|
||||||
if (!special && llama_is_control_token(model->vocab, token)) {
|
static const int attr_special = LLAMA_TOKEN_ATTR_UNKNOWN | LLAMA_TOKEN_ATTR_CONTROL;
|
||||||
|
const llama_token_attr attr = llama_token_get_attr(model, token);
|
||||||
|
if (!special && (attr & attr_special)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// copy piece chars to output text buffer
|
||||||
|
// skip up to 'lstrip' leading spaces before copying
|
||||||
|
auto _try_copy = [=] (const char * token, size_t size) -> int32_t {
|
||||||
|
for (int32_t i = 0; i < lstrip && size && *token == ' '; ++i) {
|
||||||
|
token++;
|
||||||
|
size--;
|
||||||
|
}
|
||||||
|
if (length < (int32_t)size) {
|
||||||
|
return (int32_t) -size;
|
||||||
|
}
|
||||||
|
memcpy(buf, token, size);
|
||||||
|
return (int32_t) size;
|
||||||
|
};
|
||||||
|
|
||||||
// if we have a cache - use it
|
// if we have a cache - use it
|
||||||
{
|
{
|
||||||
const auto & cache = model->vocab.cache_token_to_piece;
|
const auto & cache = model->vocab.cache_token_to_piece;
|
||||||
|
|
||||||
if (!cache.empty()) {
|
if (!cache.empty()) {
|
||||||
const auto & res = cache.at(token);
|
const auto & result = cache.at(token);
|
||||||
if (length < (int) res.size()) {
|
return _try_copy(result.data(), result.size());
|
||||||
return -(int) res.size();
|
|
||||||
}
|
|
||||||
memcpy(buf, res.c_str(), res.size());
|
|
||||||
return res.size();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (0 <= token && token < llama_n_vocab(model)) {
|
if (0 <= token && token < llama_n_vocab(model)) {
|
||||||
|
const std::string & token_text = model->vocab.id_to_token[token].text;
|
||||||
switch (llama_vocab_get_type(model->vocab)) {
|
switch (llama_vocab_get_type(model->vocab)) {
|
||||||
case LLAMA_VOCAB_TYPE_WPM:
|
case LLAMA_VOCAB_TYPE_WPM:
|
||||||
case LLAMA_VOCAB_TYPE_SPM:
|
case LLAMA_VOCAB_TYPE_SPM:
|
||||||
case LLAMA_VOCAB_TYPE_UGM: {
|
case LLAMA_VOCAB_TYPE_UGM: {
|
||||||
// NOTE: we accept all unsupported token types,
|
// NOTE: we accept all unsupported token types,
|
||||||
// suppressing them like CONTROL tokens.
|
// suppressing them like CONTROL tokens.
|
||||||
if (llama_is_normal_token(model->vocab, token)) {
|
if (attr & (attr_special | LLAMA_TOKEN_ATTR_USER_DEFINED)) {
|
||||||
std::string result = model->vocab.id_to_token[token].text;
|
return _try_copy(token_text.data(), token_text.size());
|
||||||
|
} else if (attr & LLAMA_TOKEN_ATTR_NORMAL) {
|
||||||
|
std::string result = token_text;
|
||||||
llama_unescape_whitespace(result);
|
llama_unescape_whitespace(result);
|
||||||
if (length < (int) result.length()) {
|
return _try_copy(result.data(), result.size());
|
||||||
return -(int) result.length();
|
} else if (attr & LLAMA_TOKEN_ATTR_BYTE) {
|
||||||
}
|
char byte = (char) llama_token_to_byte(model->vocab, token);
|
||||||
memcpy(buf, result.c_str(), result.length());
|
return _try_copy((char*) &byte, 1);
|
||||||
return result.length();
|
|
||||||
} else if (
|
|
||||||
(llama_is_user_defined_token(model->vocab, token)) ||
|
|
||||||
(llama_is_control_token (model->vocab, token) && special)) {
|
|
||||||
std::string result = model->vocab.id_to_token[token].text;
|
|
||||||
if (length < (int) result.length()) {
|
|
||||||
return -(int) result.length();
|
|
||||||
}
|
|
||||||
memcpy(buf, result.c_str(), result.length());
|
|
||||||
return result.length();
|
|
||||||
} else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT
|
|
||||||
if (length < 3) {
|
|
||||||
return -3;
|
|
||||||
}
|
|
||||||
memcpy(buf, "\xe2\x96\x85", 3);
|
|
||||||
return 3;
|
|
||||||
} else if (llama_is_byte_token(model->vocab, token)) {
|
|
||||||
if (length < 1) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
buf[0] = llama_token_to_byte(model->vocab, token);
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case LLAMA_VOCAB_TYPE_BPE: {
|
case LLAMA_VOCAB_TYPE_BPE: {
|
||||||
// NOTE: we accept all unsupported token types,
|
// NOTE: we accept all unsupported token types,
|
||||||
// suppressing them like CONTROL tokens.
|
// suppressing them like CONTROL tokens.
|
||||||
if (llama_is_normal_token(model->vocab, token)) {
|
if (attr & (attr_special | LLAMA_TOKEN_ATTR_USER_DEFINED)) {
|
||||||
std::string result = model->vocab.id_to_token[token].text;
|
return _try_copy(token_text.data(), token_text.size());
|
||||||
result = llama_decode_text(result);
|
} else if (attr & LLAMA_TOKEN_ATTR_NORMAL) {
|
||||||
if (length < (int) result.length()) {
|
std::string result = llama_decode_text(token_text);
|
||||||
return -(int) result.length();
|
return _try_copy(result.data(), result.size());
|
||||||
}
|
|
||||||
memcpy(buf, result.c_str(), result.length());
|
|
||||||
return result.length();
|
|
||||||
} else if (
|
|
||||||
(llama_is_user_defined_token(model->vocab, token)) ||
|
|
||||||
(llama_is_control_token (model->vocab, token) && special)) {
|
|
||||||
std::string result = model->vocab.id_to_token[token].text;
|
|
||||||
if (length < (int) result.length()) {
|
|
||||||
return -(int) result.length();
|
|
||||||
}
|
|
||||||
memcpy(buf, result.c_str(), result.length());
|
|
||||||
return result.length();
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -21342,6 +21335,113 @@ int32_t llama_token_to_piece(const struct llama_model * model, llama_token token
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int32_t llama_detokenize(
|
||||||
|
const struct llama_model * model,
|
||||||
|
const llama_token * tokens,
|
||||||
|
int32_t n_tokens,
|
||||||
|
char * text,
|
||||||
|
int32_t text_len_max,
|
||||||
|
bool remove_special,
|
||||||
|
bool unparse_special) {
|
||||||
|
int32_t avail = text_len_max;
|
||||||
|
int32_t total = 0;
|
||||||
|
|
||||||
|
// remove the leading space
|
||||||
|
bool remove_space = model->vocab.tokenizer_add_space_prefix;
|
||||||
|
|
||||||
|
if (remove_special && model->vocab.tokenizer_add_bos) {
|
||||||
|
if (n_tokens > 0 && tokens[0] == model->vocab.special_bos_id) {
|
||||||
|
remove_space = false;
|
||||||
|
n_tokens--;
|
||||||
|
tokens++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (remove_special && model->vocab.tokenizer_add_eos) {
|
||||||
|
if (n_tokens > 0 && tokens[n_tokens-1] == model->vocab.special_eos_id) {
|
||||||
|
n_tokens--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < n_tokens; ++i) {
|
||||||
|
GGML_ASSERT(avail >= 0);
|
||||||
|
int32_t n_chars = llama_token_to_piece(model, tokens[i], text, avail, remove_space, unparse_special);
|
||||||
|
remove_space = false;
|
||||||
|
if (n_chars < 0) {
|
||||||
|
avail = 0;
|
||||||
|
total -= n_chars;
|
||||||
|
} else if (n_chars > 0) {
|
||||||
|
avail -= n_chars;
|
||||||
|
text += n_chars;
|
||||||
|
total += n_chars;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (total > text_len_max) {
|
||||||
|
return -total;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (model->vocab.tokenizer_clean_spaces) {
|
||||||
|
text -= total; // restart text
|
||||||
|
|
||||||
|
// first pass: characters ?!., //TODO: where do these characters come from?
|
||||||
|
const int32_t total1 = total;
|
||||||
|
total = total ? 1 : 0;
|
||||||
|
for (int32_t i = 1; i < total1; ++i) {
|
||||||
|
const char x = text[i];
|
||||||
|
if (text[i - 1] == ' ') {
|
||||||
|
if (x == '?' || x == '!' || x == '.' || x == ',') { // " ?", " !", " .", " ,"
|
||||||
|
total--; // remove space
|
||||||
|
}
|
||||||
|
}
|
||||||
|
text[total++] = x;
|
||||||
|
}
|
||||||
|
|
||||||
|
// second pass: strip single apostrophe between spaces
|
||||||
|
const int32_t total2 = total;
|
||||||
|
total = total ? 1 : 0;
|
||||||
|
for (int32_t i = 1; i < total2; ++i) {
|
||||||
|
const char x = text[i];
|
||||||
|
if (x == '\'' && i + 1 < total2 && text[i - 1] == ' ' && text[i + 1] == ' ') { // " ' "
|
||||||
|
total--; // remove prev space
|
||||||
|
text[++i] = '\0'; // remove next space
|
||||||
|
}
|
||||||
|
text[total++] = x;
|
||||||
|
}
|
||||||
|
|
||||||
|
// third pass: apostrophe contractions //NOTE: this makes sense?
|
||||||
|
const int32_t total3 = total;
|
||||||
|
total = total ? 1 : 0;
|
||||||
|
for (int32_t i = 1; i < total3; ++i) {
|
||||||
|
const char x = text[i];
|
||||||
|
if (text[i - 1] == ' ') {
|
||||||
|
if (x == '\'' && i + 1 < total3) {
|
||||||
|
const char x1 = text[i + 1];
|
||||||
|
if (x1 == 't' || x1 == 'd') { // " 't", " 'd"
|
||||||
|
//total--; // remove space
|
||||||
|
} else if (x1 == 's' || x1 == 'm') { // " 's", " 'm"
|
||||||
|
total--; // remove space
|
||||||
|
} else if (i + 2 < total3) {
|
||||||
|
const char x2 = text[i + 2];
|
||||||
|
if ((x1 == 'l' && x2 == 'l')) { // " 'll"
|
||||||
|
//total--; // remove space
|
||||||
|
} else if ((x1 == 'r' && x2 == 'e') || (x1 == 'v' && x2 == 'e')) { // " 're", " 've"
|
||||||
|
total--; // remove space
|
||||||
|
} else {
|
||||||
|
//total--; // remove space
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
//total--; // remove space
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
text[total++] = x;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return total <= text_len_max ? total : -total;
|
||||||
|
}
|
||||||
|
|
||||||
// trim whitespace from the beginning and end of a string
|
// trim whitespace from the beginning and end of a string
|
||||||
static std::string trim(const std::string & str) {
|
static std::string trim(const std::string & str) {
|
||||||
size_t start = 0;
|
size_t start = 0;
|
||||||
|
|
|
@ -242,8 +242,7 @@ static std::vector<size_t> unicode_regex_split_custom_gpt2(const std::string & t
|
||||||
};
|
};
|
||||||
|
|
||||||
auto _get_flags = [&] (const size_t pos) -> codepoint_flags {
|
auto _get_flags = [&] (const size_t pos) -> codepoint_flags {
|
||||||
static const codepoint_flags undef(codepoint_flags::UNDEFINED);
|
return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags(cpts[pos]) : codepoint_flags{};
|
||||||
return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags(cpts[pos]) : undef;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t _prev_end = offset_ini;
|
size_t _prev_end = offset_ini;
|
||||||
|
@ -305,9 +304,9 @@ static std::vector<size_t> unicode_regex_split_custom_gpt2(const std::string & t
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// regex: <space>?[^\s\p{L}\p{N}]+
|
// regex: <space>?[^\s\p{L}\p{N}]+
|
||||||
if (!(flags2.is_whitespace || flags2.is_letter || flags2.is_number || flags2.is_undefined)) {
|
if (!(flags2.is_whitespace | flags2.is_letter | flags2.is_number) && flags2.as_uint()) {
|
||||||
pos += (cpt == ' ');
|
pos += (cpt == ' ');
|
||||||
while (!(flags2.is_whitespace || flags2.is_letter || flags2.is_number || flags2.is_undefined)) {
|
while (!(flags2.is_whitespace | flags2.is_letter | flags2.is_number) && flags2.as_uint()) {
|
||||||
flags2 = _get_flags(++pos);
|
flags2 = _get_flags(++pos);
|
||||||
}
|
}
|
||||||
_add_token(pos);
|
_add_token(pos);
|
||||||
|
@ -361,8 +360,7 @@ static std::vector<size_t> unicode_regex_split_custom_llama3(const std::string &
|
||||||
};
|
};
|
||||||
|
|
||||||
auto _get_flags = [&] (const size_t pos) -> codepoint_flags {
|
auto _get_flags = [&] (const size_t pos) -> codepoint_flags {
|
||||||
static const codepoint_flags undef(codepoint_flags::UNDEFINED);
|
return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags(cpts[pos]) : codepoint_flags{};
|
||||||
return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags(cpts[pos]) : undef;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t _prev_end = offset_ini;
|
size_t _prev_end = offset_ini;
|
||||||
|
@ -404,8 +402,8 @@ static std::vector<size_t> unicode_regex_split_custom_llama3(const std::string &
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// regex: [^\r\n\p{L}\p{N}]?\p{L}+ //####FIXME: the first \p{L} is correct?
|
// regex: [^\r\n\p{L}\p{N}]?\p{L}+
|
||||||
if (!(cpt == '\r' || cpt == '\n' || /*flags.is_letter |*/ flags.is_number)) {
|
if (!(cpt == '\r' || cpt == '\n' || flags.is_number)) {
|
||||||
if (flags.is_letter || _get_flags(pos+1).is_letter) { // one or more letters
|
if (flags.is_letter || _get_flags(pos+1).is_letter) { // one or more letters
|
||||||
pos++;
|
pos++;
|
||||||
while (_get_flags(pos).is_letter) {
|
while (_get_flags(pos).is_letter) {
|
||||||
|
@ -431,9 +429,9 @@ static std::vector<size_t> unicode_regex_split_custom_llama3(const std::string &
|
||||||
|
|
||||||
// regex: <space>?[^\s\p{L}\p{N}]+[\r\n]*
|
// regex: <space>?[^\s\p{L}\p{N}]+[\r\n]*
|
||||||
auto flags2 = (cpt == ' ' ? _get_flags(pos+1) : flags);
|
auto flags2 = (cpt == ' ' ? _get_flags(pos+1) : flags);
|
||||||
if (!(flags2.is_whitespace || flags2.is_letter || flags2.is_number || flags2.is_undefined)) {
|
if (!(flags2.is_whitespace | flags2.is_letter | flags2.is_number) && flags.as_uint()) {
|
||||||
pos += (cpt == ' ');
|
pos += (cpt == ' ');
|
||||||
while (!(flags2.is_whitespace || flags2.is_letter || flags2.is_number || flags2.is_undefined)) {
|
while (!(flags2.is_whitespace | flags2.is_letter | flags2.is_number) && flags2.as_uint()) {
|
||||||
flags2 = _get_flags(++pos);
|
flags2 = _get_flags(++pos);
|
||||||
}
|
}
|
||||||
uint32_t cpt2 = _get_cpt(pos);
|
uint32_t cpt2 = _get_cpt(pos);
|
||||||
|
|
|
@ -1,46 +0,0 @@
|
||||||
import time
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
from transformers import AutoTokenizer
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
|
|
||||||
parser.add_argument("--fname-tok", help="path to a text file to tokenize", required=True)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
dir_tokenizer = args.dir_tokenizer
|
|
||||||
fname_tok = args.fname_tok
|
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)
|
|
||||||
|
|
||||||
print('tokenizing file: ', fname_tok) # noqa: NP100
|
|
||||||
fname_out = fname_tok + '.tok'
|
|
||||||
with open(fname_tok, 'r', encoding='utf-8') as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
s = ''.join(lines)
|
|
||||||
t_start = time.time()
|
|
||||||
res = tokenizer.encode(s, add_special_tokens=False)
|
|
||||||
t_end = time.time()
|
|
||||||
print('\nmain : tokenized in', "{:.3f}".format(1000.0 * (t_end - t_start)), 'ms (py)') # noqa: NP100
|
|
||||||
with open(fname_out, 'w', encoding='utf-8') as f:
|
|
||||||
for x in res:
|
|
||||||
# LLaMA v3 for some reason strips the space for these tokens (and others)
|
|
||||||
# if x == 662:
|
|
||||||
# f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
|
|
||||||
# elif x == 1174:
|
|
||||||
# f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
|
|
||||||
# elif x == 2564:
|
|
||||||
# f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
|
|
||||||
# elif x == 758:
|
|
||||||
# f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
|
|
||||||
# elif x == 949:
|
|
||||||
# f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
|
|
||||||
# elif x == 5354:
|
|
||||||
# f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
|
|
||||||
# else:
|
|
||||||
# f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
|
|
||||||
# f.write(str(x) + ' \'' + tokenizer.decode(x).strip() + '\'\n')
|
|
||||||
f.write(str(x) + '\n')
|
|
||||||
print('len(res): ', len(res)) # noqa: NP100
|
|
||||||
print('len(lines): ', len(lines)) # noqa: NP100
|
|
||||||
print('results written to: ', fname_out) # noqa: NP100
|
|
Loading…
Add table
Add a link
Reference in a new issue