mirror of
https://github.com/onestardao/WFGY.git
synced 2026-04-28 03:29:51 +00:00
Update wfgy_core.py
This commit is contained in:
parent
2327e09b93
commit
b83e34316f
1 changed files with 2 additions and 2 deletions
|
|
@ -4,7 +4,7 @@ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
|||
from huggingface_hub import InferenceClient
|
||||
|
||||
class WFGYRunner:
|
||||
def __init__(self, model_id="google/flan-t5-xxl", use_remote=False):
|
||||
def __init__(self, model_id="mistralai/Mistral-7B-Instruct-v0.1", use_remote=False):
|
||||
self.use_remote = use_remote
|
||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
self.model_id = model_id
|
||||
|
|
@ -33,7 +33,7 @@ class WFGYRunner:
|
|||
print(prompt)
|
||||
|
||||
if self.use_remote:
|
||||
result = self.client.text2text_generation(
|
||||
result = self.client.text_generation(
|
||||
prompt=prompt,
|
||||
max_new_tokens=max_new_tokens,
|
||||
temperature=temperature
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue