mirror of
https://github.com/LostRuins/koboldcpp.git
synced 2025-09-11 01:24:36 +00:00
convert : fix broken sentencepiece vocab (#14416)
This commit is contained in:
parent
8846aace49
commit
f667f1e624
1 changed files with 5 additions and 5 deletions
|
@ -936,7 +936,11 @@ class TextModel(ModelBase):
|
||||||
scores: list[float] = [-10000.0] * vocab_size
|
scores: list[float] = [-10000.0] * vocab_size
|
||||||
toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
|
toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size
|
||||||
|
|
||||||
for token_id in range(vocab_size):
|
for token_id in range(tokenizer.vocab_size()):
|
||||||
|
if token_id >= vocab_size:
|
||||||
|
logger.warning(f'ignore tokens from {token_id}: id is out of range, max={vocab_size - 1}')
|
||||||
|
break
|
||||||
|
|
||||||
piece = tokenizer.IdToPiece(token_id)
|
piece = tokenizer.IdToPiece(token_id)
|
||||||
text = piece.encode("utf-8")
|
text = piece.encode("utf-8")
|
||||||
score = tokenizer.GetScore(token_id)
|
score = tokenizer.GetScore(token_id)
|
||||||
|
@ -951,10 +955,6 @@ class TextModel(ModelBase):
|
||||||
elif tokenizer.IsByte(token_id):
|
elif tokenizer.IsByte(token_id):
|
||||||
toktype = SentencePieceTokenTypes.BYTE
|
toktype = SentencePieceTokenTypes.BYTE
|
||||||
|
|
||||||
if token_id >= vocab_size:
|
|
||||||
logger.warning(f'ignore tokens from {token_id}: id is out of range, max={vocab_size - 1}')
|
|
||||||
break
|
|
||||||
|
|
||||||
tokens[token_id] = text
|
tokens[token_id] = text
|
||||||
scores[token_id] = score
|
scores[token_id] = score
|
||||||
toktypes[token_id] = toktype
|
toktypes[token_id] = toktype
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue