mirror of
https://github.com/kvcache-ai/ktransformers.git
synced 2025-09-09 22:05:30 +00:00
⚡ update force_think config
This commit is contained in:
parent
e536e1420d
commit
6f3a39be08
2 changed files with 3 additions and 3 deletions
|
@ -324,7 +324,7 @@ class TransformersInterface(BackendInterfaceBase):
|
|||
#input_ids = torch.tensor([[6366]], device=input_ids.device)
|
||||
else:
|
||||
raise ValueError("local_messages should be List or str")
|
||||
if Config().force_think:
|
||||
if Config().user_force_think:
|
||||
token_thinks = torch.tensor([self.tokenizer.encode("<think>\\n",add_special_tokens=False)])
|
||||
input_ids = torch.cat(
|
||||
[input_ids, token_thinks], dim=1
|
||||
|
@ -333,7 +333,7 @@ class TransformersInterface(BackendInterfaceBase):
|
|||
self.profiler.pause_timer("tokenize")
|
||||
|
||||
self.profiler.create_and_start_timer("prefill")
|
||||
if Config().force_think:
|
||||
if Config().user_force_think:
|
||||
print("<think>\n")
|
||||
yield "<think>\n"
|
||||
for t in self.prefill(input_ids, self.check_is_new(thread_id)):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue