mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-10 00:54:41 +00:00
initial whisper integration
This commit is contained in:
parent
4ed9ba7352
commit
f24aef8792
10 changed files with 16204 additions and 16 deletions
30
Makefile
30
Makefile
|
@ -1,5 +1,5 @@
|
|||
default: koboldcpp_default koboldcpp_failsafe koboldcpp_openblas koboldcpp_noavx2 koboldcpp_clblast koboldcpp_clblast_noavx2 koboldcpp_cublas koboldcpp_hipblas koboldcpp_vulkan koboldcpp_vulkan_noavx2
|
||||
tools: quantize_gpt2 quantize_gptj quantize_gguf quantize_neox quantize_mpt quantize_clip sdmain gguf-split
|
||||
tools: quantize_gpt2 quantize_gptj quantize_gguf quantize_neox quantize_mpt quantize_clip whispermain sdmain gguf-split
|
||||
dev: koboldcpp_openblas
|
||||
dev2: koboldcpp_clblast
|
||||
|
||||
|
@ -512,6 +512,10 @@ sdcpp_default.o: otherarch/sdcpp/sdtype_adapter.cpp otherarch/sdcpp/stable-diffu
|
|||
sdcpp_cublas.o: otherarch/sdcpp/sdtype_adapter.cpp otherarch/sdcpp/stable-diffusion.h otherarch/sdcpp/stable-diffusion.cpp otherarch/sdcpp/util.cpp otherarch/sdcpp/upscaler.cpp otherarch/sdcpp/model.cpp otherarch/sdcpp/thirdparty/zip.c
|
||||
$(CXX) $(CXXFLAGS) $(CUBLAS_FLAGS) $(HIPFLAGS) -c $< -o $@
|
||||
|
||||
#whisper objects
|
||||
whispercpp_default.o: otherarch/whispercpp/whisper_adapter.cpp
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||
|
||||
# idiotic "for easier compilation"
|
||||
GPTTYPE_ADAPTER = gpttype_adapter.cpp otherarch/llama_v2.cpp otherarch/llama_v3.cpp llama.cpp otherarch/utils.cpp otherarch/gptj_v1.cpp otherarch/gptj_v2.cpp otherarch/gptj_v3.cpp otherarch/gpt2_v1.cpp otherarch/gpt2_v2.cpp otherarch/gpt2_v3.cpp otherarch/rwkv_v2.cpp otherarch/rwkv_v3.cpp otherarch/neox_v2.cpp otherarch/neox_v3.cpp otherarch/mpt_v3.cpp ggml.h ggml-cuda.h llama.h otherarch/llama-util.h
|
||||
gpttype_adapter_failsafe.o: $(GPTTYPE_ADAPTER)
|
||||
|
@ -530,7 +534,7 @@ gpttype_adapter_vulkan_noavx2.o: $(GPTTYPE_ADAPTER)
|
|||
$(CXX) $(CXXFLAGS) $(FAILSAFE_FLAGS) $(VULKAN_FLAGS) -c $< -o $@
|
||||
|
||||
clean:
|
||||
rm -vf *.o main sdmain quantize_gguf quantize_clip quantize_gpt2 quantize_gptj quantize_neox quantize_mpt quantize-stats perplexity embedding benchmark-matmult save-load-state gguf imatrix imatrix.exe gguf.exe main.exe quantize_clip.exe quantize_gguf.exe quantize_gptj.exe quantize_gpt2.exe quantize_neox.exe quantize_mpt.exe koboldcpp_default.dll koboldcpp_openblas.dll koboldcpp_failsafe.dll koboldcpp_noavx2.dll koboldcpp_clblast.dll koboldcpp_clblast_noavx2.dll koboldcpp_cublas.dll koboldcpp_hipblas.dll koboldcpp_vulkan.dll koboldcpp_vulkan_noavx2.dll koboldcpp_default.so koboldcpp_openblas.so koboldcpp_failsafe.so koboldcpp_noavx2.so koboldcpp_clblast.so koboldcpp_clblast_noavx2.so koboldcpp_cublas.so koboldcpp_hipblas.so koboldcpp_vulkan.so koboldcpp_vulkan_noavx2.so
|
||||
rm -vf *.o main sdmain whispermain quantize_gguf quantize_clip quantize_gpt2 quantize_gptj quantize_neox quantize_mpt quantize-stats perplexity embedding benchmark-matmult save-load-state gguf imatrix imatrix.exe gguf.exe main.exe quantize_clip.exe quantize_gguf.exe quantize_gptj.exe quantize_gpt2.exe quantize_neox.exe quantize_mpt.exe koboldcpp_default.dll koboldcpp_openblas.dll koboldcpp_failsafe.dll koboldcpp_noavx2.dll koboldcpp_clblast.dll koboldcpp_clblast_noavx2.dll koboldcpp_cublas.dll koboldcpp_hipblas.dll koboldcpp_vulkan.dll koboldcpp_vulkan_noavx2.dll koboldcpp_default.so koboldcpp_openblas.so koboldcpp_failsafe.so koboldcpp_noavx2.so koboldcpp_clblast.so koboldcpp_clblast_noavx2.so koboldcpp_cublas.so koboldcpp_hipblas.so koboldcpp_vulkan.so koboldcpp_vulkan_noavx2.so
|
||||
rm -vrf ggml-cuda/*.o
|
||||
|
||||
# useful tools
|
||||
|
@ -539,6 +543,8 @@ main: examples/main/main.cpp build-info.h ggml.o llama.o console.o llavaclip_def
|
|||
@echo '==== Run ./main -h for help. ===='
|
||||
sdmain: otherarch/sdcpp/util.cpp otherarch/sdcpp/main.cpp otherarch/sdcpp/stable-diffusion.cpp otherarch/sdcpp/upscaler.cpp otherarch/sdcpp/model.cpp otherarch/sdcpp/thirdparty/zip.c build-info.h ggml.o llama.o console.o ggml-backend_default.o $(OBJS_FULL) $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||
whispermain: otherarch/whispercpp/main.cpp otherarch/whispercpp/whisper.cpp build-info.h ggml.o llama.o console.o ggml-backend_default.o $(OBJS_FULL) $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||
imatrix: examples/imatrix/imatrix.cpp build-info.h ggml.o llama.o console.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_FULL) $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||
gguf: examples/gguf/gguf.cpp build-info.h ggml.o llama.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_FULL) $(OBJS)
|
||||
|
@ -548,11 +554,11 @@ gguf-split: examples/gguf-split/gguf-split.cpp ggml.o llama.o build-info.h llava
|
|||
|
||||
|
||||
#generated libraries
|
||||
koboldcpp_default: ggml.o ggml_v3.o ggml_v2.o ggml_v1.o expose.o gpttype_adapter.o sdcpp_default.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_FULL) $(OBJS)
|
||||
koboldcpp_default: ggml.o ggml_v3.o ggml_v2.o ggml_v1.o expose.o gpttype_adapter.o sdcpp_default.o whispercpp_default.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_FULL) $(OBJS)
|
||||
$(DEFAULT_BUILD)
|
||||
|
||||
ifdef OPENBLAS_BUILD
|
||||
koboldcpp_openblas: ggml_v4_openblas.o ggml_v3_openblas.o ggml_v2_openblas.o ggml_v1.o expose.o gpttype_adapter.o sdcpp_default.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_FULL) $(OBJS)
|
||||
koboldcpp_openblas: ggml_v4_openblas.o ggml_v3_openblas.o ggml_v2_openblas.o ggml_v1.o expose.o gpttype_adapter.o sdcpp_default.o whispercpp_default.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_FULL) $(OBJS)
|
||||
$(OPENBLAS_BUILD)
|
||||
else
|
||||
koboldcpp_openblas:
|
||||
|
@ -560,7 +566,7 @@ koboldcpp_openblas:
|
|||
endif
|
||||
|
||||
ifdef FAILSAFE_BUILD
|
||||
koboldcpp_failsafe: ggml_v4_failsafe.o ggml_v3_failsafe.o ggml_v2_failsafe.o ggml_v1_failsafe.o expose.o gpttype_adapter_failsafe.o sdcpp_default.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_FAILSAFE) $(OBJS)
|
||||
koboldcpp_failsafe: ggml_v4_failsafe.o ggml_v3_failsafe.o ggml_v2_failsafe.o ggml_v1_failsafe.o expose.o gpttype_adapter_failsafe.o sdcpp_default.o whispercpp_default.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_FAILSAFE) $(OBJS)
|
||||
$(FAILSAFE_BUILD)
|
||||
else
|
||||
koboldcpp_failsafe:
|
||||
|
@ -568,7 +574,7 @@ koboldcpp_failsafe:
|
|||
endif
|
||||
|
||||
ifdef NOAVX2_BUILD
|
||||
koboldcpp_noavx2: ggml_v4_noavx2.o ggml_v3_noavx2.o ggml_v2_noavx2.o ggml_v1_failsafe.o expose.o gpttype_adapter_failsafe.o sdcpp_default.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_SIMPLE) $(OBJS)
|
||||
koboldcpp_noavx2: ggml_v4_noavx2.o ggml_v3_noavx2.o ggml_v2_noavx2.o ggml_v1_failsafe.o expose.o gpttype_adapter_failsafe.o sdcpp_default.o whispercpp_default.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_SIMPLE) $(OBJS)
|
||||
$(NOAVX2_BUILD)
|
||||
else
|
||||
koboldcpp_noavx2:
|
||||
|
@ -576,10 +582,10 @@ koboldcpp_noavx2:
|
|||
endif
|
||||
|
||||
ifdef CLBLAST_BUILD
|
||||
koboldcpp_clblast: ggml_v4_clblast.o ggml_v3_clblast.o ggml_v2_clblast.o ggml_v1.o expose.o gpttype_adapter_clblast.o ggml-opencl.o ggml_v3-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o sdcpp_default.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_FULL) $(OBJS)
|
||||
koboldcpp_clblast: ggml_v4_clblast.o ggml_v3_clblast.o ggml_v2_clblast.o ggml_v1.o expose.o gpttype_adapter_clblast.o ggml-opencl.o ggml_v3-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o sdcpp_default.o whispercpp_default.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_FULL) $(OBJS)
|
||||
$(CLBLAST_BUILD)
|
||||
ifdef NOAVX2_BUILD
|
||||
koboldcpp_clblast_noavx2: ggml_v4_clblast_noavx2.o ggml_v3_clblast_noavx2.o ggml_v2_clblast_noavx2.o ggml_v1_failsafe.o expose.o gpttype_adapter_clblast_noavx2.o ggml-opencl.o ggml_v3-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o sdcpp_default.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_SIMPLE) $(OBJS)
|
||||
koboldcpp_clblast_noavx2: ggml_v4_clblast_noavx2.o ggml_v3_clblast_noavx2.o ggml_v2_clblast_noavx2.o ggml_v1_failsafe.o expose.o gpttype_adapter_clblast_noavx2.o ggml-opencl.o ggml_v3-opencl.o ggml_v2-opencl.o ggml_v2-opencl-legacy.o sdcpp_default.o whispercpp_default.o llavaclip_default.o llava.o ggml-backend_default.o $(OBJS_SIMPLE) $(OBJS)
|
||||
$(CLBLAST_BUILD)
|
||||
else
|
||||
koboldcpp_clblast_noavx2:
|
||||
|
@ -593,7 +599,7 @@ koboldcpp_clblast_noavx2:
|
|||
endif
|
||||
|
||||
ifdef CUBLAS_BUILD
|
||||
koboldcpp_cublas: ggml_v4_cublas.o ggml_v3_cublas.o ggml_v2_cublas.o ggml_v1.o expose.o gpttype_adapter_cublas.o sdcpp_cublas.o llavaclip_cublas.o llava.o ggml-backend_cublas.o $(CUBLAS_OBJS) $(OBJS_FULL) $(OBJS)
|
||||
koboldcpp_cublas: ggml_v4_cublas.o ggml_v3_cublas.o ggml_v2_cublas.o ggml_v1.o expose.o gpttype_adapter_cublas.o sdcpp_cublas.o whispercpp_default.o llavaclip_cublas.o llava.o ggml-backend_cublas.o $(CUBLAS_OBJS) $(OBJS_FULL) $(OBJS)
|
||||
$(CUBLAS_BUILD)
|
||||
else
|
||||
koboldcpp_cublas:
|
||||
|
@ -601,7 +607,7 @@ koboldcpp_cublas:
|
|||
endif
|
||||
|
||||
ifdef HIPBLAS_BUILD
|
||||
koboldcpp_hipblas: ggml_v4_cublas.o ggml_v3_cublas.o ggml_v2_cublas.o ggml_v1.o expose.o gpttype_adapter_cublas.o sdcpp_cublas.o llavaclip_cublas.o llava.o ggml-backend_cublas.o $(HIP_OBJS) $(OBJS_FULL) $(OBJS)
|
||||
koboldcpp_hipblas: ggml_v4_cublas.o ggml_v3_cublas.o ggml_v2_cublas.o ggml_v1.o expose.o gpttype_adapter_cublas.o sdcpp_cublas.o whispercpp_default.o llavaclip_cublas.o llava.o ggml-backend_cublas.o $(HIP_OBJS) $(OBJS_FULL) $(OBJS)
|
||||
$(HIPBLAS_BUILD)
|
||||
else
|
||||
koboldcpp_hipblas:
|
||||
|
@ -609,10 +615,10 @@ koboldcpp_hipblas:
|
|||
endif
|
||||
|
||||
ifdef VULKAN_BUILD
|
||||
koboldcpp_vulkan: ggml_v4_vulkan.o ggml_v3.o ggml_v2.o ggml_v1.o expose.o gpttype_adapter_vulkan.o ggml-vulkan.o sdcpp_default.o llavaclip_default.o llava.o ggml-backend_vulkan.o $(OBJS_FULL) $(OBJS)
|
||||
koboldcpp_vulkan: ggml_v4_vulkan.o ggml_v3.o ggml_v2.o ggml_v1.o expose.o gpttype_adapter_vulkan.o ggml-vulkan.o sdcpp_default.o whispercpp_default.o llavaclip_default.o llava.o ggml-backend_vulkan.o $(OBJS_FULL) $(OBJS)
|
||||
$(VULKAN_BUILD)
|
||||
ifdef NOAVX2_BUILD
|
||||
koboldcpp_vulkan_noavx2: ggml_v4_vulkan_noavx2.o ggml_v3_noavx2.o ggml_v2_noavx2.o ggml_v1_failsafe.o expose.o gpttype_adapter_vulkan_noavx2.o ggml-vulkan.o sdcpp_default.o llavaclip_default.o llava.o ggml-backend_vulkan.o $(OBJS_SIMPLE) $(OBJS)
|
||||
koboldcpp_vulkan_noavx2: ggml_v4_vulkan_noavx2.o ggml_v3_noavx2.o ggml_v2_noavx2.o ggml_v1_failsafe.o expose.o gpttype_adapter_vulkan_noavx2.o ggml-vulkan.o sdcpp_default.o whispercpp_default.o llavaclip_default.o llava.o ggml-backend_vulkan.o $(OBJS_SIMPLE) $(OBJS)
|
||||
$(VULKAN_BUILD)
|
||||
else
|
||||
koboldcpp_vulkan_noavx2:
|
||||
|
|
|
@ -221,6 +221,15 @@ extern "C"
|
|||
return sdtype_generate(inputs);
|
||||
}
|
||||
|
||||
bool whisper_load_model(const whisper_load_model_inputs inputs)
|
||||
{
|
||||
return whispertype_load_model(inputs);
|
||||
}
|
||||
whisper_generation_outputs whisper_generate(const whisper_generation_inputs inputs)
|
||||
{
|
||||
return whispertype_generate(inputs);
|
||||
}
|
||||
|
||||
const char * new_token(int idx) {
|
||||
if (generated_tokens.size() <= idx || idx < 0) return nullptr;
|
||||
|
||||
|
|
20
expose.h
20
expose.h
|
@ -142,6 +142,26 @@ struct sd_generation_outputs
|
|||
int status = -1;
|
||||
const char * data = "";
|
||||
};
|
||||
struct whisper_load_model_inputs
|
||||
{
|
||||
const char * model_filename;
|
||||
const char * executable_path;
|
||||
const int clblast_info = 0;
|
||||
const int cublas_info = 0;
|
||||
const char * vulkan_info;
|
||||
const int debugmode = 0;
|
||||
};
|
||||
struct whisper_generation_inputs
|
||||
{
|
||||
const char * prompt;
|
||||
const char * audio_data;
|
||||
const bool quiet = false;
|
||||
};
|
||||
struct whisper_generation_outputs
|
||||
{
|
||||
int status = -1;
|
||||
const char * text = "";
|
||||
};
|
||||
|
||||
extern std::string executable_path;
|
||||
extern std::string lora_filename;
|
||||
|
|
80
koboldcpp.py
80
koboldcpp.py
|
@ -134,6 +134,23 @@ class sd_generation_outputs(ctypes.Structure):
|
|||
_fields_ = [("status", ctypes.c_int),
|
||||
("data", ctypes.c_char_p)]
|
||||
|
||||
class whisper_load_model_inputs(ctypes.Structure):
|
||||
_fields_ = [("model_filename", ctypes.c_char_p),
|
||||
("executable_path", ctypes.c_char_p),
|
||||
("clblast_info", ctypes.c_int),
|
||||
("cublas_info", ctypes.c_int),
|
||||
("vulkan_info", ctypes.c_char_p),
|
||||
("debugmode", ctypes.c_int)]
|
||||
|
||||
class whisper_generation_inputs(ctypes.Structure):
|
||||
_fields_ = [("prompt", ctypes.c_char_p),
|
||||
("audio_data", ctypes.c_char_p),
|
||||
("quiet", ctypes.c_bool)]
|
||||
|
||||
class whisper_generation_outputs(ctypes.Structure):
|
||||
_fields_ = [("status", ctypes.c_int),
|
||||
("data", ctypes.c_char_p)]
|
||||
|
||||
handle = None
|
||||
|
||||
def getdirpath():
|
||||
|
@ -304,6 +321,10 @@ def init_library():
|
|||
handle.sd_load_model.restype = ctypes.c_bool
|
||||
handle.sd_generate.argtypes = [sd_generation_inputs]
|
||||
handle.sd_generate.restype = sd_generation_outputs
|
||||
handle.whisper_load_model.argtypes = [whisper_load_model_inputs]
|
||||
handle.whisper_load_model.restype = ctypes.c_bool
|
||||
handle.whisper_generate.argtypes = [whisper_generation_inputs]
|
||||
handle.whisper_generate.restype = whisper_generation_outputs
|
||||
|
||||
def set_backend_props(inputs):
|
||||
clblastids = 0
|
||||
|
@ -612,6 +633,32 @@ def sd_generate(genparams):
|
|||
outstr = ret.data.decode("UTF-8","ignore")
|
||||
return outstr
|
||||
|
||||
|
||||
def whisper_load_model(model_filename):
|
||||
global args
|
||||
inputs = whisper_load_model_inputs()
|
||||
inputs.debugmode = args.debugmode
|
||||
inputs.executable_path = (getdirpath()+"/").encode("UTF-8")
|
||||
inputs.model_filename = model_filename.encode("UTF-8")
|
||||
inputs = set_backend_props(inputs)
|
||||
ret = handle.whisper_load_model(inputs)
|
||||
return ret
|
||||
|
||||
def whisper_generate(genparams):
|
||||
global args
|
||||
is_quiet = True if args.quiet else False
|
||||
prompt = genparams.get("prompt", "")
|
||||
audio_data = genparams.get("audio_data", "")
|
||||
inputs = whisper_generation_inputs()
|
||||
inputs.prompt = prompt.encode("UTF-8")
|
||||
inputs.audio_data = audio_data.encode("UTF-8")
|
||||
inputs.quiet = is_quiet
|
||||
ret = handle.whisper_generate(inputs)
|
||||
outstr = ""
|
||||
if ret.status==1:
|
||||
outstr = ret.data.decode("UTF-8","ignore")
|
||||
return outstr
|
||||
|
||||
def utfprint(str):
|
||||
maxlen = 99999
|
||||
strlength = len(str)
|
||||
|
@ -1547,7 +1594,7 @@ def show_new_gui():
|
|||
root.quit()
|
||||
if args.model_param and args.model_param!="" and args.model_param.lower().endswith('.kcpps'):
|
||||
loadconfigfile(args.model_param)
|
||||
if not args.model_param and not args.sdmodel:
|
||||
if not args.model_param and not args.sdmodel and not args.whispermodel:
|
||||
global exitcounter
|
||||
exitcounter = 999
|
||||
print("\nNo ggml model or kcpps file was selected. Exiting.")
|
||||
|
@ -2568,13 +2615,13 @@ def show_new_gui():
|
|||
if nextstate==0:
|
||||
exitcounter = 999
|
||||
print("Exiting by user request.")
|
||||
time.sleep(3)
|
||||
time.sleep(1)
|
||||
sys.exit(0)
|
||||
else:
|
||||
# processing vars
|
||||
export_vars()
|
||||
|
||||
if not args.model_param and not args.sdmodel:
|
||||
if not args.model_param and not args.sdmodel and not args.whispermodel:
|
||||
exitcounter = 999
|
||||
print("\nNo text or image model file was selected. Exiting.")
|
||||
time.sleep(3)
|
||||
|
@ -3050,7 +3097,7 @@ def main(launch_args,start_server=True):
|
|||
if not args.model_param:
|
||||
args.model_param = args.model
|
||||
|
||||
if not args.model_param and not args.sdmodel:
|
||||
if not args.model_param and not args.sdmodel and not args.whispermodel:
|
||||
#give them a chance to pick a file
|
||||
print("For command line arguments, please refer to --help")
|
||||
print("***")
|
||||
|
@ -3280,6 +3327,28 @@ def main(launch_args,start_server=True):
|
|||
time.sleep(3)
|
||||
sys.exit(3)
|
||||
|
||||
#handle whisper model
|
||||
if args.whispermodel and args.whispermodel!="":
|
||||
whispermodel = args.whispermodel
|
||||
if not whispermodel or not os.path.exists(whispermodel):
|
||||
print(f"Cannot find whisper model file: {whispermodel}")
|
||||
if args.ignoremissing:
|
||||
print(f"Ignoring missing whisper model file...")
|
||||
args.whispermodel = None
|
||||
else:
|
||||
exitcounter = 999
|
||||
time.sleep(3)
|
||||
sys.exit(2)
|
||||
else:
|
||||
whispermodel = os.path.abspath(whispermodel)
|
||||
loadok = whisper_load_model(whispermodel)
|
||||
print("Load Whisper Model OK: " + str(loadok))
|
||||
if not loadok:
|
||||
exitcounter = 999
|
||||
print("Could not load whisper model: " + imgmodel)
|
||||
time.sleep(3)
|
||||
sys.exit(3)
|
||||
|
||||
#load embedded lite
|
||||
try:
|
||||
basepath = os.path.abspath(os.path.dirname(__file__))
|
||||
|
@ -3537,6 +3606,9 @@ if __name__ == '__main__':
|
|||
sdparsergrouplora.add_argument("--sdlora", metavar=('[filename]'), help="Specify a stable diffusion LORA safetensors model to be applied. Cannot be used with quant models.", default="")
|
||||
sdparsergroup.add_argument("--sdloramult", metavar=('[amount]'), help="Multiplier for the LORA model to be applied.", type=float, default=1.0)
|
||||
|
||||
whisperparsergroup = parser.add_argument_group('Whisper Transcription Commands')
|
||||
whisperparsergroup.add_argument("--whispermodel", metavar=('[filename]'), help="Specify a Whisper bin model to enable Speech-To-Text transcription.", default="")
|
||||
|
||||
deprecatedgroup = parser.add_argument_group('Deprecated Commands, DO NOT USE!')
|
||||
deprecatedgroup.add_argument("--hordeconfig", help=argparse.SUPPRESS, nargs='+')
|
||||
deprecatedgroup.add_argument("--sdconfig", help=argparse.SUPPRESS, nargs='+')
|
||||
|
|
|
@ -82,6 +82,9 @@ std::vector<int> gpttype_get_token_arr(const std::string & input);
|
|||
bool sdtype_load_model(const sd_load_model_inputs inputs);
|
||||
sd_generation_outputs sdtype_generate(const sd_generation_inputs inputs);
|
||||
|
||||
bool whispertype_load_model(const whisper_load_model_inputs inputs);
|
||||
whisper_generation_outputs whispertype_generate(const whisper_generation_inputs inputs);
|
||||
|
||||
void timer_start();
|
||||
double timer_check();
|
||||
void print_tok_vec(std::vector<int> &embd);
|
||||
|
|
6434
otherarch/whispercpp/dr_wav.h
Normal file
6434
otherarch/whispercpp/dr_wav.h
Normal file
File diff suppressed because it is too large
Load diff
1374
otherarch/whispercpp/main.cpp
Normal file
1374
otherarch/whispercpp/main.cpp
Normal file
File diff suppressed because it is too large
Load diff
7346
otherarch/whispercpp/whisper.cpp
Normal file
7346
otherarch/whispercpp/whisper.cpp
Normal file
File diff suppressed because it is too large
Load diff
681
otherarch/whispercpp/whisper.h
Normal file
681
otherarch/whispercpp/whisper.h
Normal file
|
@ -0,0 +1,681 @@
|
|||
#ifndef WHISPER_H
|
||||
#define WHISPER_H
|
||||
|
||||
#include "ggml.h"
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <string>
|
||||
|
||||
#ifdef __GNUC__
|
||||
# define WHISPER_DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
|
||||
#elif defined(_MSC_VER)
|
||||
# define WHISPER_DEPRECATED(func, hint) __declspec(deprecated(hint)) func
|
||||
#else
|
||||
# define WHISPER_DEPRECATED(func, hint) func
|
||||
#endif
|
||||
|
||||
#ifdef WHISPER_SHARED
|
||||
# ifdef _WIN32
|
||||
# ifdef WHISPER_BUILD
|
||||
# define WHISPER_API __declspec(dllexport)
|
||||
# else
|
||||
# define WHISPER_API __declspec(dllimport)
|
||||
# endif
|
||||
# else
|
||||
# define WHISPER_API __attribute__ ((visibility ("default")))
|
||||
# endif
|
||||
#else
|
||||
# define WHISPER_API
|
||||
#endif
|
||||
|
||||
#define WHISPER_SAMPLE_RATE 16000
|
||||
#define WHISPER_N_FFT 400
|
||||
#define WHISPER_HOP_LENGTH 160
|
||||
#define WHISPER_CHUNK_SIZE 30
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
//
|
||||
// C interface
|
||||
//
|
||||
// The following interface is thread-safe as long as the sample whisper_context is not used by multiple threads
|
||||
// concurrently.
|
||||
//
|
||||
// Basic usage:
|
||||
//
|
||||
// #include "whisper.h"
|
||||
//
|
||||
// ...
|
||||
//
|
||||
// whisper_context_params cparams = whisper_context_default_params();
|
||||
//
|
||||
// struct whisper_context * ctx = whisper_init_from_file_with_params("/path/to/ggml-base.en.bin", cparams);
|
||||
//
|
||||
// if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) {
|
||||
// fprintf(stderr, "failed to process audio\n");
|
||||
// return 7;
|
||||
// }
|
||||
//
|
||||
// const int n_segments = whisper_full_n_segments(ctx);
|
||||
// for (int i = 0; i < n_segments; ++i) {
|
||||
// const char * text = whisper_full_get_segment_text(ctx, i);
|
||||
// printf("%s", text);
|
||||
// }
|
||||
//
|
||||
// whisper_free(ctx);
|
||||
//
|
||||
// ...
|
||||
//
|
||||
// This is a demonstration of the most straightforward usage of the library.
|
||||
// "pcmf32" contains the RAW audio data in 32-bit floating point format.
|
||||
//
|
||||
// The interface also allows for more fine-grained control over the computation, but it requires a deeper
|
||||
// understanding of how the model works.
|
||||
//
|
||||
|
||||
struct whisper_context;
|
||||
struct whisper_state;
|
||||
struct whisper_full_params;
|
||||
|
||||
typedef int32_t whisper_pos;
|
||||
typedef int32_t whisper_token;
|
||||
typedef int32_t whisper_seq_id;
|
||||
|
||||
enum whisper_alignment_heads_preset {
|
||||
WHISPER_AHEADS_NONE,
|
||||
WHISPER_AHEADS_N_TOP_MOST, // All heads from the N-top-most text-layers
|
||||
WHISPER_AHEADS_CUSTOM,
|
||||
WHISPER_AHEADS_TINY_EN,
|
||||
WHISPER_AHEADS_TINY,
|
||||
WHISPER_AHEADS_BASE_EN,
|
||||
WHISPER_AHEADS_BASE,
|
||||
WHISPER_AHEADS_SMALL_EN,
|
||||
WHISPER_AHEADS_SMALL,
|
||||
WHISPER_AHEADS_MEDIUM_EN,
|
||||
WHISPER_AHEADS_MEDIUM,
|
||||
WHISPER_AHEADS_LARGE_V1,
|
||||
WHISPER_AHEADS_LARGE_V2,
|
||||
WHISPER_AHEADS_LARGE_V3,
|
||||
};
|
||||
|
||||
typedef struct whisper_ahead {
|
||||
int n_text_layer;
|
||||
int n_head;
|
||||
} whisper_ahead;
|
||||
|
||||
typedef struct whisper_aheads {
|
||||
size_t n_heads;
|
||||
const whisper_ahead * heads;
|
||||
} whisper_aheads;
|
||||
|
||||
struct whisper_context_params {
|
||||
bool use_gpu;
|
||||
bool flash_attn;
|
||||
int gpu_device; // CUDA device
|
||||
|
||||
// [EXPERIMENTAL] Token-level timestamps with DTW
|
||||
bool dtw_token_timestamps;
|
||||
enum whisper_alignment_heads_preset dtw_aheads_preset;
|
||||
|
||||
int dtw_n_top;
|
||||
struct whisper_aheads dtw_aheads;
|
||||
|
||||
size_t dtw_mem_size; // TODO: remove
|
||||
};
|
||||
|
||||
typedef struct whisper_token_data {
|
||||
whisper_token id; // token id
|
||||
whisper_token tid; // forced timestamp token id
|
||||
|
||||
float p; // probability of the token
|
||||
float plog; // log probability of the token
|
||||
float pt; // probability of the timestamp token
|
||||
float ptsum; // sum of probabilities of all timestamp tokens
|
||||
|
||||
// token-level timestamp data
|
||||
// do not use if you haven't computed token-level timestamps
|
||||
int64_t t0; // start time of the token
|
||||
int64_t t1; // end time of the token
|
||||
|
||||
// [EXPERIMENTAL] Token-level timestamps with DTW
|
||||
// do not use if you haven't computed token-level timestamps with dtw
|
||||
// Roughly corresponds to the moment in audio in which the token was output
|
||||
int64_t t_dtw;
|
||||
|
||||
float vlen; // voice length of the token
|
||||
} whisper_token_data;
|
||||
|
||||
typedef struct whisper_model_loader {
|
||||
void * context;
|
||||
|
||||
size_t (*read)(void * ctx, void * output, size_t read_size);
|
||||
bool (*eof)(void * ctx);
|
||||
void (*close)(void * ctx);
|
||||
} whisper_model_loader;
|
||||
|
||||
// grammar element type
|
||||
enum whisper_gretype {
|
||||
// end of rule definition
|
||||
WHISPER_GRETYPE_END = 0,
|
||||
|
||||
// start of alternate definition for rule
|
||||
WHISPER_GRETYPE_ALT = 1,
|
||||
|
||||
// non-terminal element: reference to rule
|
||||
WHISPER_GRETYPE_RULE_REF = 2,
|
||||
|
||||
// terminal element: character (code point)
|
||||
WHISPER_GRETYPE_CHAR = 3,
|
||||
|
||||
// inverse char(s) ([^a], [^a-b] [^abc])
|
||||
WHISPER_GRETYPE_CHAR_NOT = 4,
|
||||
|
||||
// modifies a preceding WHISPER_GRETYPE_CHAR or LLAMA_GRETYPE_CHAR_ALT to
|
||||
// be an inclusive range ([a-z])
|
||||
WHISPER_GRETYPE_CHAR_RNG_UPPER = 5,
|
||||
|
||||
// modifies a preceding WHISPER_GRETYPE_CHAR or
|
||||
// WHISPER_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA])
|
||||
WHISPER_GRETYPE_CHAR_ALT = 6,
|
||||
};
|
||||
|
||||
typedef struct whisper_grammar_element {
|
||||
enum whisper_gretype type;
|
||||
uint32_t value; // Unicode code point or rule ID
|
||||
} whisper_grammar_element;
|
||||
|
||||
// Various functions for loading a ggml whisper model.
|
||||
// Allocate (almost) all memory needed for the model.
|
||||
// Return NULL on failure
|
||||
WHISPER_API struct whisper_context * whisper_init_from_file_with_params (const char * path_model, struct whisper_context_params params);
|
||||
WHISPER_API struct whisper_context * whisper_init_from_buffer_with_params(void * buffer, size_t buffer_size, struct whisper_context_params params);
|
||||
WHISPER_API struct whisper_context * whisper_init_with_params (struct whisper_model_loader * loader, struct whisper_context_params params);
|
||||
|
||||
// These are the same as the above, but the internal state of the context is not allocated automatically
|
||||
// It is the responsibility of the caller to allocate the state using whisper_init_state() (#523)
|
||||
WHISPER_API struct whisper_context * whisper_init_from_file_with_params_no_state (const char * path_model, struct whisper_context_params params);
|
||||
WHISPER_API struct whisper_context * whisper_init_from_buffer_with_params_no_state(void * buffer, size_t buffer_size, struct whisper_context_params params);
|
||||
WHISPER_API struct whisper_context * whisper_init_with_params_no_state (struct whisper_model_loader * loader, struct whisper_context_params params);
|
||||
|
||||
WHISPER_DEPRECATED(
|
||||
WHISPER_API struct whisper_context * whisper_init_from_file(const char * path_model),
|
||||
"use whisper_init_from_file_with_params instead"
|
||||
);
|
||||
WHISPER_DEPRECATED(
|
||||
WHISPER_API struct whisper_context * whisper_init_from_buffer(void * buffer, size_t buffer_size),
|
||||
"use whisper_init_from_buffer_with_params instead"
|
||||
);
|
||||
WHISPER_DEPRECATED(
|
||||
WHISPER_API struct whisper_context * whisper_init(struct whisper_model_loader * loader),
|
||||
"use whisper_init_with_params instead"
|
||||
);
|
||||
WHISPER_DEPRECATED(
|
||||
WHISPER_API struct whisper_context * whisper_init_from_file_no_state(const char * path_model),
|
||||
"use whisper_init_from_file_with_params_no_state instead"
|
||||
);
|
||||
WHISPER_DEPRECATED(
|
||||
WHISPER_API struct whisper_context * whisper_init_from_buffer_no_state(void * buffer, size_t buffer_size),
|
||||
"use whisper_init_from_buffer_with_params_no_state instead"
|
||||
);
|
||||
WHISPER_DEPRECATED(
|
||||
WHISPER_API struct whisper_context * whisper_init_no_state(struct whisper_model_loader * loader),
|
||||
"use whisper_init_with_params_no_state instead"
|
||||
);
|
||||
|
||||
WHISPER_API struct whisper_state * whisper_init_state(struct whisper_context * ctx);
|
||||
|
||||
// Given a context, enable use of OpenVINO for encode inference.
|
||||
// model_path: Optional path to OpenVINO encoder IR model. If set to nullptr,
|
||||
// the path will be generated from the ggml model path that was passed
|
||||
// in to whisper_init_from_file. For example, if 'path_model' was
|
||||
// "/path/to/ggml-base.en.bin", then OpenVINO IR model path will be
|
||||
// assumed to be "/path/to/ggml-base.en-encoder-openvino.xml".
|
||||
// device: OpenVINO device to run inference on ("CPU", "GPU", etc.)
|
||||
// cache_dir: Optional cache directory that can speed up init time, especially for
|
||||
// GPU, by caching compiled 'blobs' there.
|
||||
// Set to nullptr if not used.
|
||||
// Returns 0 on success. If OpenVINO is not enabled in build, this simply returns 1.
|
||||
WHISPER_API int whisper_ctx_init_openvino_encoder(
|
||||
struct whisper_context * ctx,
|
||||
const char * model_path,
|
||||
const char * device,
|
||||
const char * cache_dir);
|
||||
|
||||
// Frees all allocated memory
|
||||
WHISPER_API void whisper_free (struct whisper_context * ctx);
|
||||
WHISPER_API void whisper_free_state(struct whisper_state * state);
|
||||
WHISPER_API void whisper_free_params(struct whisper_full_params * params);
|
||||
WHISPER_API void whisper_free_context_params(struct whisper_context_params * params);
|
||||
|
||||
// Convert RAW PCM audio to log mel spectrogram.
|
||||
// The resulting spectrogram is stored inside the default state of the provided whisper context.
|
||||
// Returns 0 on success
|
||||
WHISPER_API int whisper_pcm_to_mel(
|
||||
struct whisper_context * ctx,
|
||||
const float * samples,
|
||||
int n_samples,
|
||||
int n_threads);
|
||||
|
||||
WHISPER_API int whisper_pcm_to_mel_with_state(
|
||||
struct whisper_context * ctx,
|
||||
struct whisper_state * state,
|
||||
const float * samples,
|
||||
int n_samples,
|
||||
int n_threads);
|
||||
|
||||
// Convert RAW PCM audio to log mel spectrogram but applies a Phase Vocoder to speed up the audio x2.
|
||||
// The resulting spectrogram is stored inside the default state of the provided whisper context.
|
||||
// Returns 0 on success
|
||||
WHISPER_API int whisper_pcm_to_mel_phase_vocoder(
|
||||
struct whisper_context * ctx,
|
||||
const float * samples,
|
||||
int n_samples,
|
||||
int n_threads);
|
||||
|
||||
WHISPER_API int whisper_pcm_to_mel_phase_vocoder_with_state(
|
||||
struct whisper_context * ctx,
|
||||
struct whisper_state * state,
|
||||
const float * samples,
|
||||
int n_samples,
|
||||
int n_threads);
|
||||
|
||||
// This can be used to set a custom log mel spectrogram inside the default state of the provided whisper context.
|
||||
// Use this instead of whisper_pcm_to_mel() if you want to provide your own log mel spectrogram.
|
||||
// n_mel must be 80
|
||||
// Returns 0 on success
|
||||
WHISPER_API int whisper_set_mel(
|
||||
struct whisper_context * ctx,
|
||||
const float * data,
|
||||
int n_len,
|
||||
int n_mel);
|
||||
|
||||
WHISPER_API int whisper_set_mel_with_state(
|
||||
struct whisper_context * ctx,
|
||||
struct whisper_state * state,
|
||||
const float * data,
|
||||
int n_len,
|
||||
int n_mel);
|
||||
|
||||
// Run the Whisper encoder on the log mel spectrogram stored inside the default state in the provided whisper context.
|
||||
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first.
|
||||
// offset can be used to specify the offset of the first frame in the spectrogram.
|
||||
// Returns 0 on success
|
||||
WHISPER_API int whisper_encode(
|
||||
struct whisper_context * ctx,
|
||||
int offset,
|
||||
int n_threads);
|
||||
|
||||
WHISPER_API int whisper_encode_with_state(
|
||||
struct whisper_context * ctx,
|
||||
struct whisper_state * state,
|
||||
int offset,
|
||||
int n_threads);
|
||||
|
||||
// Run the Whisper decoder to obtain the logits and probabilities for the next token.
|
||||
// Make sure to call whisper_encode() first.
|
||||
// tokens + n_tokens is the provided context for the decoder.
|
||||
// n_past is the number of tokens to use from previous decoder calls.
|
||||
// Returns 0 on success
|
||||
// TODO: add support for multiple decoders
|
||||
WHISPER_API int whisper_decode(
|
||||
struct whisper_context * ctx,
|
||||
const whisper_token * tokens,
|
||||
int n_tokens,
|
||||
int n_past,
|
||||
int n_threads);
|
||||
|
||||
WHISPER_API int whisper_decode_with_state(
|
||||
struct whisper_context * ctx,
|
||||
struct whisper_state * state,
|
||||
const whisper_token * tokens,
|
||||
int n_tokens,
|
||||
int n_past,
|
||||
int n_threads);
|
||||
|
||||
// Convert the provided text into tokens.
|
||||
// The tokens pointer must be large enough to hold the resulting tokens.
|
||||
// Returns the number of tokens on success, no more than n_max_tokens
|
||||
// Returns a negative number on failure - the number of tokens that would have been returned
|
||||
// TODO: not sure if correct
|
||||
WHISPER_API int whisper_tokenize(
|
||||
struct whisper_context * ctx,
|
||||
const char * text,
|
||||
whisper_token * tokens,
|
||||
int n_max_tokens);
|
||||
|
||||
// Return the number of tokens in the provided text
|
||||
// Equivalent to: -whisper_tokenize(ctx, text, NULL, 0)
|
||||
int whisper_token_count(struct whisper_context * ctx, const char * text);
|
||||
|
||||
// Largest language id (i.e. number of available languages - 1)
|
||||
WHISPER_API int whisper_lang_max_id();
|
||||
|
||||
// Return the id of the specified language, returns -1 if not found
|
||||
// Examples:
|
||||
// "de" -> 2
|
||||
// "german" -> 2
|
||||
WHISPER_API int whisper_lang_id(const char * lang);
|
||||
|
||||
// Return the short string of the specified language id (e.g. 2 -> "de"), returns nullptr if not found
|
||||
WHISPER_API const char * whisper_lang_str(int id);
|
||||
|
||||
// Return the short string of the specified language name (e.g. 2 -> "german"), returns nullptr if not found
|
||||
WHISPER_API const char * whisper_lang_str_full(int id);
|
||||
|
||||
// Use mel data at offset_ms to try and auto-detect the spoken language
|
||||
// Make sure to call whisper_pcm_to_mel() or whisper_set_mel() first
|
||||
// Returns the top language id or negative on failure
|
||||
// If not null, fills the lang_probs array with the probabilities of all languages
|
||||
// The array must be whisper_lang_max_id() + 1 in size
|
||||
// ref: https://github.com/openai/whisper/blob/main/whisper/decoding.py#L18-L69
|
||||
WHISPER_API int whisper_lang_auto_detect(
|
||||
struct whisper_context * ctx,
|
||||
int offset_ms,
|
||||
int n_threads,
|
||||
float * lang_probs);
|
||||
|
||||
WHISPER_API int whisper_lang_auto_detect_with_state(
|
||||
struct whisper_context * ctx,
|
||||
struct whisper_state * state,
|
||||
int offset_ms,
|
||||
int n_threads,
|
||||
float * lang_probs);
|
||||
|
||||
WHISPER_API int whisper_n_len (struct whisper_context * ctx); // mel length
|
||||
WHISPER_API int whisper_n_len_from_state(struct whisper_state * state); // mel length
|
||||
WHISPER_API int whisper_n_vocab (struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_n_text_ctx (struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_n_audio_ctx (struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_is_multilingual (struct whisper_context * ctx);
|
||||
|
||||
WHISPER_API int whisper_model_n_vocab (struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_model_n_audio_ctx (struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_model_n_audio_state(struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_model_n_audio_head (struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_model_n_audio_layer(struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_model_n_text_ctx (struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_model_n_text_state (struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_model_n_text_head (struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_model_n_text_layer (struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_model_n_mels (struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_model_ftype (struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_model_type (struct whisper_context * ctx);
|
||||
|
||||
// Token logits obtained from the last call to whisper_decode()
|
||||
// The logits for the last token are stored in the last row
|
||||
// Rows: n_tokens
|
||||
// Cols: n_vocab
|
||||
WHISPER_API float * whisper_get_logits (struct whisper_context * ctx);
|
||||
WHISPER_API float * whisper_get_logits_from_state(struct whisper_state * state);
|
||||
|
||||
// Token Id -> String. Uses the vocabulary in the provided context
|
||||
WHISPER_API const char * whisper_token_to_str(struct whisper_context * ctx, whisper_token token);
|
||||
WHISPER_API const char * whisper_model_type_readable(struct whisper_context * ctx);
|
||||
|
||||
|
||||
// Special tokens
|
||||
WHISPER_API whisper_token whisper_token_eot (struct whisper_context * ctx);
|
||||
WHISPER_API whisper_token whisper_token_sot (struct whisper_context * ctx);
|
||||
WHISPER_API whisper_token whisper_token_solm(struct whisper_context * ctx);
|
||||
WHISPER_API whisper_token whisper_token_prev(struct whisper_context * ctx);
|
||||
WHISPER_API whisper_token whisper_token_nosp(struct whisper_context * ctx);
|
||||
WHISPER_API whisper_token whisper_token_not (struct whisper_context * ctx);
|
||||
WHISPER_API whisper_token whisper_token_beg (struct whisper_context * ctx);
|
||||
WHISPER_API whisper_token whisper_token_lang(struct whisper_context * ctx, int lang_id);
|
||||
|
||||
// Task tokens
|
||||
WHISPER_API whisper_token whisper_token_translate (struct whisper_context * ctx);
|
||||
WHISPER_API whisper_token whisper_token_transcribe(struct whisper_context * ctx);
|
||||
|
||||
// Performance information from the default state.
|
||||
WHISPER_API void whisper_print_timings(struct whisper_context * ctx);
|
||||
WHISPER_API void whisper_reset_timings(struct whisper_context * ctx);
|
||||
|
||||
// Print system information
|
||||
WHISPER_API const char * whisper_print_system_info(void);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Available sampling strategies
|
||||
enum whisper_sampling_strategy {
|
||||
WHISPER_SAMPLING_GREEDY, // similar to OpenAI's GreedyDecoder
|
||||
WHISPER_SAMPLING_BEAM_SEARCH, // similar to OpenAI's BeamSearchDecoder
|
||||
};
|
||||
|
||||
// Text segment callback
|
||||
// Called on every newly generated text segment
|
||||
// Use the whisper_full_...() functions to obtain the text segments
|
||||
typedef void (*whisper_new_segment_callback)(struct whisper_context * ctx, struct whisper_state * state, int n_new, void * user_data);
|
||||
|
||||
// Progress callback
|
||||
typedef void (*whisper_progress_callback)(struct whisper_context * ctx, struct whisper_state * state, int progress, void * user_data);
|
||||
|
||||
// Encoder begin callback
|
||||
// If not NULL, called before the encoder starts
|
||||
// If it returns false, the computation is aborted
|
||||
typedef bool (*whisper_encoder_begin_callback)(struct whisper_context * ctx, struct whisper_state * state, void * user_data);
|
||||
|
||||
// Logits filter callback
|
||||
// Can be used to modify the logits before sampling
|
||||
// If not NULL, called after applying temperature to logits
|
||||
typedef void (*whisper_logits_filter_callback)(
|
||||
struct whisper_context * ctx,
|
||||
struct whisper_state * state,
|
||||
const whisper_token_data * tokens,
|
||||
int n_tokens,
|
||||
float * logits,
|
||||
void * user_data);
|
||||
|
||||
// Parameters for the whisper_full() function
|
||||
// If you change the order or add new parameters, make sure to update the default values in whisper.cpp:
|
||||
// whisper_full_default_params()
|
||||
struct whisper_full_params {
|
||||
enum whisper_sampling_strategy strategy;
|
||||
|
||||
int n_threads;
|
||||
int n_max_text_ctx; // max tokens to use from past text as prompt for the decoder
|
||||
int offset_ms; // start offset in ms
|
||||
int duration_ms; // audio duration to process in ms
|
||||
|
||||
bool translate;
|
||||
bool no_context; // do not use past transcription (if any) as initial prompt for the decoder
|
||||
bool no_timestamps; // do not generate timestamps
|
||||
bool single_segment; // force single segment output (useful for streaming)
|
||||
bool print_special; // print special tokens (e.g. <SOT>, <EOT>, <BEG>, etc.)
|
||||
bool print_progress; // print progress information
|
||||
bool print_realtime; // print results from within whisper.cpp (avoid it, use callback instead)
|
||||
bool print_timestamps; // print timestamps for each text segment when printing realtime
|
||||
|
||||
// [EXPERIMENTAL] token-level timestamps
|
||||
bool token_timestamps; // enable token-level timestamps
|
||||
float thold_pt; // timestamp token probability threshold (~0.01)
|
||||
float thold_ptsum; // timestamp token sum probability threshold (~0.01)
|
||||
int max_len; // max segment length in characters
|
||||
bool split_on_word; // split on word rather than on token (when used with max_len)
|
||||
int max_tokens; // max tokens per segment (0 = no limit)
|
||||
|
||||
// [EXPERIMENTAL] speed-up techniques
|
||||
// note: these can significantly reduce the quality of the output
|
||||
bool speed_up; // speed-up the audio by 2x using Phase Vocoder
|
||||
bool debug_mode; // enable debug_mode provides extra info (eg. Dump log_mel)
|
||||
int audio_ctx; // overwrite the audio context size (0 = use default)
|
||||
|
||||
// [EXPERIMENTAL] [TDRZ] tinydiarize
|
||||
bool tdrz_enable; // enable tinydiarize speaker turn detection
|
||||
|
||||
// A regular expression that matches tokens to suppress
|
||||
const char * suppress_regex;
|
||||
|
||||
// tokens to provide to the whisper decoder as initial prompt
|
||||
// these are prepended to any existing text context from a previous call
|
||||
// use whisper_tokenize() to convert text to tokens
|
||||
// maximum of whisper_n_text_ctx()/2 tokens are used (typically 224)
|
||||
const char * initial_prompt;
|
||||
const whisper_token * prompt_tokens;
|
||||
int prompt_n_tokens;
|
||||
|
||||
// for auto-detection, set to nullptr, "" or "auto"
|
||||
const char * language;
|
||||
bool detect_language;
|
||||
|
||||
// common decoding parameters:
|
||||
bool suppress_blank; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/decoding.py#L89
|
||||
bool suppress_non_speech_tokens; // ref: https://github.com/openai/whisper/blob/7858aa9c08d98f75575035ecd6481f462d66ca27/whisper/tokenizer.py#L224-L253
|
||||
|
||||
float temperature; // initial decoding temperature, ref: https://ai.stackexchange.com/a/32478
|
||||
float max_initial_ts; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/decoding.py#L97
|
||||
float length_penalty; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/transcribe.py#L267
|
||||
|
||||
// fallback parameters
|
||||
// ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/transcribe.py#L274-L278
|
||||
float temperature_inc;
|
||||
float entropy_thold; // similar to OpenAI's "compression_ratio_threshold"
|
||||
float logprob_thold;
|
||||
float no_speech_thold; // TODO: not implemented
|
||||
|
||||
struct {
|
||||
int best_of; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/transcribe.py#L264
|
||||
} greedy;
|
||||
|
||||
struct {
|
||||
int beam_size; // ref: https://github.com/openai/whisper/blob/f82bc59f5ea234d4b97fb2860842ed38519f7e65/whisper/transcribe.py#L265
|
||||
|
||||
float patience; // TODO: not implemented, ref: https://arxiv.org/pdf/2204.05424.pdf
|
||||
} beam_search;
|
||||
|
||||
// called for every newly generated text segment
|
||||
whisper_new_segment_callback new_segment_callback;
|
||||
void * new_segment_callback_user_data;
|
||||
|
||||
// called on each progress update
|
||||
whisper_progress_callback progress_callback;
|
||||
void * progress_callback_user_data;
|
||||
|
||||
// called each time before the encoder starts
|
||||
whisper_encoder_begin_callback encoder_begin_callback;
|
||||
void * encoder_begin_callback_user_data;
|
||||
|
||||
// called each time before ggml computation starts
|
||||
ggml_abort_callback abort_callback;
|
||||
void * abort_callback_user_data;
|
||||
|
||||
// called by each decoder to filter obtained logits
|
||||
whisper_logits_filter_callback logits_filter_callback;
|
||||
void * logits_filter_callback_user_data;
|
||||
|
||||
const whisper_grammar_element ** grammar_rules;
|
||||
size_t n_grammar_rules;
|
||||
size_t i_start_rule;
|
||||
float grammar_penalty;
|
||||
};
|
||||
|
||||
// NOTE: this function allocates memory, and it is the responsibility of the caller to free the pointer - see whisper_free_context_params & whisper_free_params()
|
||||
WHISPER_API struct whisper_context_params * whisper_context_default_params_by_ref();
|
||||
WHISPER_API struct whisper_context_params whisper_context_default_params(void);
|
||||
WHISPER_API struct whisper_full_params * whisper_full_default_params_by_ref(enum whisper_sampling_strategy strategy);
|
||||
WHISPER_API struct whisper_full_params whisper_full_default_params(enum whisper_sampling_strategy strategy);
|
||||
|
||||
// Run the entire model: PCM -> log mel spectrogram -> encoder -> decoder -> text
|
||||
// Not thread safe for same context
|
||||
// Uses the specified decoding strategy to obtain the text.
|
||||
WHISPER_API int whisper_full(
|
||||
struct whisper_context * ctx,
|
||||
struct whisper_full_params params,
|
||||
const float * samples,
|
||||
int n_samples);
|
||||
|
||||
WHISPER_API int whisper_full_with_state(
|
||||
struct whisper_context * ctx,
|
||||
struct whisper_state * state,
|
||||
struct whisper_full_params params,
|
||||
const float * samples,
|
||||
int n_samples);
|
||||
|
||||
// Split the input audio in chunks and process each chunk separately using whisper_full_with_state()
|
||||
// Result is stored in the default state of the context
|
||||
// Not thread safe if executed in parallel on the same context.
|
||||
// It seems this approach can offer some speedup in some cases.
|
||||
// However, the transcription accuracy can be worse at the beginning and end of each chunk.
|
||||
WHISPER_API int whisper_full_parallel(
|
||||
struct whisper_context * ctx,
|
||||
struct whisper_full_params params,
|
||||
const float * samples,
|
||||
int n_samples,
|
||||
int n_processors);
|
||||
|
||||
// Number of generated text segments
|
||||
// A segment can be a few words, a sentence, or even a paragraph.
|
||||
WHISPER_API int whisper_full_n_segments (struct whisper_context * ctx);
|
||||
WHISPER_API int whisper_full_n_segments_from_state(struct whisper_state * state);
|
||||
|
||||
// Language id associated with the context's default state
|
||||
WHISPER_API int whisper_full_lang_id(struct whisper_context * ctx);
|
||||
|
||||
// Language id associated with the provided state
|
||||
WHISPER_API int whisper_full_lang_id_from_state(struct whisper_state * state);
|
||||
|
||||
// Get the start and end time of the specified segment
|
||||
WHISPER_API int64_t whisper_full_get_segment_t0 (struct whisper_context * ctx, int i_segment);
|
||||
WHISPER_API int64_t whisper_full_get_segment_t0_from_state(struct whisper_state * state, int i_segment);
|
||||
|
||||
WHISPER_API int64_t whisper_full_get_segment_t1 (struct whisper_context * ctx, int i_segment);
|
||||
WHISPER_API int64_t whisper_full_get_segment_t1_from_state(struct whisper_state * state, int i_segment);
|
||||
|
||||
// Get whether the next segment is predicted as a speaker turn
|
||||
WHISPER_API bool whisper_full_get_segment_speaker_turn_next(struct whisper_context * ctx, int i_segment);
|
||||
WHISPER_API bool whisper_full_get_segment_speaker_turn_next_from_state(struct whisper_state * state, int i_segment);
|
||||
|
||||
// Get the text of the specified segment
|
||||
WHISPER_API const char * whisper_full_get_segment_text (struct whisper_context * ctx, int i_segment);
|
||||
WHISPER_API const char * whisper_full_get_segment_text_from_state(struct whisper_state * state, int i_segment);
|
||||
|
||||
// Get number of tokens in the specified segment
|
||||
WHISPER_API int whisper_full_n_tokens (struct whisper_context * ctx, int i_segment);
|
||||
WHISPER_API int whisper_full_n_tokens_from_state(struct whisper_state * state, int i_segment);
|
||||
|
||||
// Get the token text of the specified token in the specified segment
|
||||
WHISPER_API const char * whisper_full_get_token_text (struct whisper_context * ctx, int i_segment, int i_token);
|
||||
WHISPER_API const char * whisper_full_get_token_text_from_state(struct whisper_context * ctx, struct whisper_state * state, int i_segment, int i_token);
|
||||
|
||||
WHISPER_API whisper_token whisper_full_get_token_id (struct whisper_context * ctx, int i_segment, int i_token);
|
||||
WHISPER_API whisper_token whisper_full_get_token_id_from_state(struct whisper_state * state, int i_segment, int i_token);
|
||||
|
||||
// Get token data for the specified token in the specified segment
|
||||
// This contains probabilities, timestamps, etc.
|
||||
WHISPER_API whisper_token_data whisper_full_get_token_data (struct whisper_context * ctx, int i_segment, int i_token);
|
||||
WHISPER_API whisper_token_data whisper_full_get_token_data_from_state(struct whisper_state * state, int i_segment, int i_token);
|
||||
|
||||
// Get the probability of the specified token in the specified segment
|
||||
WHISPER_API float whisper_full_get_token_p (struct whisper_context * ctx, int i_segment, int i_token);
|
||||
WHISPER_API float whisper_full_get_token_p_from_state(struct whisper_state * state, int i_segment, int i_token);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Temporary helpers needed for exposing ggml interface
|
||||
|
||||
WHISPER_API int whisper_bench_memcpy (int n_threads);
|
||||
WHISPER_API const char * whisper_bench_memcpy_str (int n_threads);
|
||||
WHISPER_API int whisper_bench_ggml_mul_mat (int n_threads);
|
||||
WHISPER_API const char * whisper_bench_ggml_mul_mat_str(int n_threads);
|
||||
|
||||
// Control logging output; default behavior is to print to stderr
|
||||
|
||||
WHISPER_API void whisper_log_set(ggml_log_callback log_callback, void * user_data);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
//don't want to modify common.h, throw functions here
|
||||
// convert timestamp to string, 6000 -> 01:00.000
|
||||
std::string to_timestamp(int64_t t, bool comma = false);
|
||||
// given a timestamp get the sample
|
||||
int timestamp_to_sample(int64_t t, int n_samples, int whisper_sample_rate);
|
||||
// check if file exists using ifstream
|
||||
bool is_file_exist(const char *fileName);
|
243
otherarch/whispercpp/whisper_adapter.cpp
Normal file
243
otherarch/whispercpp/whisper_adapter.cpp
Normal file
|
@ -0,0 +1,243 @@
|
|||
#include "model_adapter.h"
|
||||
#include "otherarch/utils.h"
|
||||
|
||||
#include "whisper.cpp"
|
||||
|
||||
#define DR_WAV_IMPLEMENTATION
|
||||
#include "dr_wav.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include <cstdio>
|
||||
#include <regex>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
#include <cstring>
|
||||
#include <mutex>
|
||||
|
||||
#define COMMON_SAMPLE_RATE 16000
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||
#endif
|
||||
|
||||
static int whisperdebugmode = 0;
|
||||
static whisper_context * whisper_ctx = nullptr;
|
||||
static std::string whisper_output_text = "";
|
||||
|
||||
|
||||
static bool is_wav_buffer(const std::string buf) {
|
||||
// RIFF ref: https://en.wikipedia.org/wiki/Resource_Interchange_File_Format
|
||||
// WAV ref: https://www.mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html
|
||||
if (buf.size() < 12 || buf.substr(0, 4) != "RIFF" || buf.substr(8, 4) != "WAVE") {
|
||||
return false;
|
||||
}
|
||||
uint32_t chunk_size = *reinterpret_cast<const uint32_t*>(buf.data() + 4);
|
||||
if (chunk_size + 8 != buf.size()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool read_wav(const std::string & b64data, std::vector<float>& pcmf32, std::vector<std::vector<float>>& pcmf32s, bool stereo)
|
||||
{
|
||||
drwav wav;
|
||||
std::vector<uint8_t> wav_data = kcpp_base64_decode(b64data);
|
||||
|
||||
if (drwav_init_memory(&wav, wav_data.data(), wav_data.size(), nullptr) == false) {
|
||||
printf("error: failed to open WAV file from stdin\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (wav.channels != 1 && wav.channels != 2) {
|
||||
printf("WAV file must be mono or stereo\n");
|
||||
drwav_uninit(&wav);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (wav.sampleRate != COMMON_SAMPLE_RATE) {
|
||||
printf("WAV file must be %i kHz\n", COMMON_SAMPLE_RATE/1000);
|
||||
drwav_uninit(&wav);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (wav.bitsPerSample != 16) {
|
||||
printf("WAV file must be 16-bit\n");
|
||||
drwav_uninit(&wav);
|
||||
return false;
|
||||
}
|
||||
|
||||
const uint64_t n = wav_data.empty() ? wav.totalPCMFrameCount : wav_data.size()/(wav.channels*wav.bitsPerSample/8);
|
||||
|
||||
std::vector<int16_t> pcm16;
|
||||
pcm16.resize(n*wav.channels);
|
||||
drwav_read_pcm_frames_s16(&wav, n, pcm16.data());
|
||||
drwav_uninit(&wav);
|
||||
|
||||
// convert to mono, float
|
||||
pcmf32.resize(n);
|
||||
if (wav.channels == 1) {
|
||||
for (uint64_t i = 0; i < n; i++) {
|
||||
pcmf32[i] = float(pcm16[i])/32768.0f;
|
||||
}
|
||||
} else {
|
||||
for (uint64_t i = 0; i < n; i++) {
|
||||
pcmf32[i] = float(pcm16[2*i] + pcm16[2*i + 1])/65536.0f;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static std::string output_txt(struct whisper_context * ctx, std::vector<std::vector<float>> pcmf32s) {
|
||||
|
||||
std::string outtxt = "";
|
||||
const int n_segments = whisper_full_n_segments(ctx);
|
||||
for (int i = 0; i < n_segments; ++i) {
|
||||
const char * text = whisper_full_get_segment_text(ctx, i);
|
||||
outtxt += text;
|
||||
}
|
||||
return outtxt;
|
||||
}
|
||||
|
||||
void cb_log_disable(enum ggml_log_level , const char * , void * ) { }
|
||||
|
||||
static std::string whisperplatformenv, whisperdeviceenv, whispervulkandeviceenv;
|
||||
bool whispertype_load_model(const whisper_load_model_inputs inputs)
|
||||
{
|
||||
//duplicated from expose.cpp
|
||||
int cl_parseinfo = inputs.clblast_info; //first digit is whether configured, second is platform, third is devices
|
||||
std::string usingclblast = "GGML_OPENCL_CONFIGURED="+std::to_string(cl_parseinfo>0?1:0);
|
||||
putenv((char*)usingclblast.c_str());
|
||||
cl_parseinfo = cl_parseinfo%100; //keep last 2 digits
|
||||
int platform = cl_parseinfo/10;
|
||||
int devices = cl_parseinfo%10;
|
||||
whisperplatformenv = "GGML_OPENCL_PLATFORM="+std::to_string(platform);
|
||||
whisperdeviceenv = "GGML_OPENCL_DEVICE="+std::to_string(devices);
|
||||
putenv((char*)whisperplatformenv.c_str());
|
||||
putenv((char*)whisperdeviceenv.c_str());
|
||||
std::string vulkan_info_raw = inputs.vulkan_info;
|
||||
std::string vulkan_info_str = "";
|
||||
for (size_t i = 0; i < vulkan_info_raw.length(); ++i) {
|
||||
vulkan_info_str += vulkan_info_raw[i];
|
||||
if (i < vulkan_info_raw.length() - 1) {
|
||||
vulkan_info_str += ",";
|
||||
}
|
||||
}
|
||||
if(vulkan_info_str=="")
|
||||
{
|
||||
vulkan_info_str = "0";
|
||||
}
|
||||
whispervulkandeviceenv = "GGML_VK_VISIBLE_DEVICES="+vulkan_info_str;
|
||||
putenv((char*)whispervulkandeviceenv.c_str());
|
||||
|
||||
|
||||
std::string modelfile = inputs.model_filename;
|
||||
printf("\nLoading Whisper Model: %s",modelfile.c_str());
|
||||
|
||||
whisperdebugmode = inputs.debugmode;
|
||||
if (whisperdebugmode!=1) {
|
||||
whisper_log_set(cb_log_disable, NULL);
|
||||
}
|
||||
|
||||
// whisper init
|
||||
struct whisper_context_params cparams = whisper_context_default_params();
|
||||
cparams.use_gpu = true;
|
||||
cparams.flash_attn = false;
|
||||
|
||||
whisper_ctx = whisper_init_from_file_with_params(modelfile.c_str(), cparams);
|
||||
|
||||
if (whisper_ctx == nullptr) {
|
||||
printf("\nWhisper Load Error: Failed to initialize whisper context!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
printf("\nWhisper Load Complete.\n");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
whisper_generation_outputs whispertype_generate(const whisper_generation_inputs inputs)
|
||||
{
|
||||
whisper_generation_outputs output;
|
||||
|
||||
if(whisper_ctx==nullptr)
|
||||
{
|
||||
printf("\nWarning: KCPP whisper not initialized!\n");
|
||||
output.text = "";
|
||||
output.status = 0;
|
||||
return output;
|
||||
}
|
||||
|
||||
if(!inputs.quiet)
|
||||
{
|
||||
printf("\nWhisper Transcribe Generating...");
|
||||
}
|
||||
|
||||
const std::string b64data = std::string(inputs.audio_data);
|
||||
const std::string initprompt = std::string(inputs.prompt);
|
||||
|
||||
std::vector<float> pcmf32; // mono-channel F32 PCM
|
||||
std::vector<std::vector<float>> pcmf32s; // stereo-channel F32 PCM
|
||||
|
||||
if (!::read_wav(b64data, pcmf32, pcmf32s, false)) {
|
||||
printf("\nWhisper: Failed to read input wav data!\n");
|
||||
output.text = "";
|
||||
output.status = 0;
|
||||
return output;
|
||||
}
|
||||
|
||||
// run the inference
|
||||
whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
|
||||
wparams.strategy = WHISPER_SAMPLING_GREEDY;
|
||||
wparams.print_realtime = false;
|
||||
wparams.print_progress = false;
|
||||
wparams.print_timestamps = false;
|
||||
wparams.print_special = false;
|
||||
wparams.translate = false;
|
||||
wparams.language = "en";
|
||||
wparams.detect_language = false;
|
||||
wparams.n_threads = 4;
|
||||
wparams.n_max_text_ctx = wparams.n_max_text_ctx;
|
||||
wparams.offset_ms = 0;
|
||||
wparams.duration_ms = 0;
|
||||
wparams.token_timestamps = false;
|
||||
wparams.thold_pt = 0.01f;
|
||||
wparams.max_len = 100;
|
||||
wparams.split_on_word = false;
|
||||
wparams.audio_ctx = 0;
|
||||
wparams.speed_up = false;
|
||||
wparams.debug_mode = false;
|
||||
wparams.tdrz_enable = false;
|
||||
wparams.suppress_regex = nullptr;
|
||||
wparams.initial_prompt = initprompt.c_str();
|
||||
wparams.greedy.best_of = -1;
|
||||
wparams.beam_search.beam_size = -1;
|
||||
wparams.temperature_inc = 0.2f;
|
||||
wparams.temperature = 0.0f;
|
||||
wparams.entropy_thold = 2.40f;
|
||||
wparams.logprob_thold = -1.00f;
|
||||
wparams.no_timestamps = true;
|
||||
|
||||
if (whisper_full_parallel(whisper_ctx, wparams, pcmf32.data(), pcmf32.size(), 1) != 0) {
|
||||
printf("\nWhisper: Failed to process audio!\n");
|
||||
output.text = "";
|
||||
output.status = 0;
|
||||
return output;
|
||||
}
|
||||
|
||||
if (!inputs.quiet && whisperdebugmode==1) {
|
||||
whisper_print_timings(whisper_ctx);
|
||||
}
|
||||
|
||||
// output text transcription
|
||||
whisper_output_text = output_txt(whisper_ctx, pcmf32s);
|
||||
if(!inputs.quiet)
|
||||
{
|
||||
printf("\nWhisper Transcribe Output: %s",whisper_output_text.c_str());
|
||||
}
|
||||
output.text = whisper_output_text.c_str();
|
||||
output.status = 1;
|
||||
return output;
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue