mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-10 17:14:36 +00:00
handle inconsistent final message content being sent with finish_reason
This commit is contained in:
parent
7e5582afdb
commit
b7f8d0fe2b
1 changed files with 12 additions and 0 deletions
12
koboldcpp.py
12
koboldcpp.py
|
@ -2830,6 +2830,18 @@ class KcppServerRequestHandler(http.server.SimpleHTTPRequestHandler):
|
|||
tokenStr = tokenStr[:sindex]
|
||||
|
||||
if tokenStr!="" or streamDone:
|
||||
need_split_final_msg = True if (currfinishreason is not None and streamDone and tokenStr!="") else False
|
||||
if need_split_final_msg: #we need to send one message without the finish reason, then send a finish reason with no msg to follow standards
|
||||
if api_format == 4: # if oai chat, set format to expected openai streaming response
|
||||
event_str = json.dumps({"id":"koboldcpp","object":"chat.completion.chunk","created":int(time.time()),"model":friendlymodelname,"choices":[{"index":0,"finish_reason":None,"delta":{'role':'assistant','content':tokenStr}}]})
|
||||
await self.send_oai_sse_event(event_str)
|
||||
elif api_format == 3: # non chat completions
|
||||
event_str = json.dumps({"id":"koboldcpp","object":"text_completion","created":int(time.time()),"model":friendlymodelname,"choices":[{"index":0,"finish_reason":None,"text":tokenStr}]})
|
||||
await self.send_oai_sse_event(event_str)
|
||||
else:
|
||||
event_str = json.dumps({"token": tokenStr, "finish_reason":None})
|
||||
await self.send_kai_sse_event(event_str)
|
||||
tokenStr = "" # now the final finish reason can be sent alone
|
||||
if api_format == 4: # if oai chat, set format to expected openai streaming response
|
||||
event_str = json.dumps({"id":"koboldcpp","object":"chat.completion.chunk","created":int(time.time()),"model":friendlymodelname,"choices":[{"index":0,"finish_reason":currfinishreason,"delta":{'role':'assistant','content':tokenStr}}]})
|
||||
await self.send_oai_sse_event(event_str)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue