mirror of
https://github.com/onestardao/WFGY.git
synced 2026-04-28 03:29:51 +00:00
Update wfgy_core.py
This commit is contained in:
parent
4302ea4b7f
commit
956e8ecfab
1 changed files with 77 additions and 32 deletions
109
wfgy_core.py
109
wfgy_core.py
|
|
@ -1,25 +1,58 @@
|
|||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
|
||||
import os
|
||||
import argparse
|
||||
import hashlib
|
||||
import numpy as np
|
||||
|
||||
def ascii_header(style="default"):
|
||||
if style == "default":
|
||||
print("\n╭──────────────────────────────╮")
|
||||
print("╭──────────────────────────────╮")
|
||||
print("│ 🤖 INITIATING WFGY CORE │")
|
||||
print("│ ⚙️ MODULE: Semantic Boost │")
|
||||
print("╰──────────────────────────────╯\n")
|
||||
elif style == "scientific":
|
||||
print("\n=== [ WFGY SYSTEM INITIALIZED | MODE: SCIENCE ] ===\n")
|
||||
print("=== [ WFGY SYSTEM INITIALIZED | MODE: SCIENCE ] ===\n")
|
||||
elif style == "meme":
|
||||
print("\n🔥💥 W.F.G.Y. ONLINE 💥🔥\n[WARNING] 💬 Semantic Overdrive Engaged!\n")
|
||||
print("🔥💥 W.F.G.Y. ONLINE 💥🔥\n[WARNING] 💬 Semantic Overdrive Engaged!\n")
|
||||
|
||||
def semantic_hash_score(prompt):
|
||||
h = hashlib.sha256(prompt.encode()).hexdigest()
|
||||
seed = int(h[:8], 16)
|
||||
rng = np.random.default_rng(seed)
|
||||
bbmc = round(rng.uniform(1.0, 2.5), 3)
|
||||
bbpf = [[round(rng.uniform(0.9, 3.0), 2) for _ in range(3)] for _ in range(2)]
|
||||
bbcr = round(rng.uniform(0.0, 1.0), 2)
|
||||
bbam = [round(rng.uniform(0.8, 1.2), 2) for _ in range(3)]
|
||||
return bbmc, bbpf, bbcr, bbam
|
||||
|
||||
class WFGYRunner:
|
||||
def __init__(self, config=None):
|
||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
self.model_id = "gpt2-xl"
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
|
||||
self.model = AutoModelForCausalLM.from_pretrained(self.model_id).to(self.device)
|
||||
def __init__(self, model_id="gpt2-medium", device=None, use_api=False, token=None):
|
||||
self.model_id = model_id
|
||||
self.use_api = use_api
|
||||
self.token = token
|
||||
self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
if not use_api:
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
self.model = AutoModelForCausalLM.from_pretrained(model_id).to(self.device)
|
||||
|
||||
def generate_text(self, prompt, max_tokens=256):
|
||||
if self.use_api:
|
||||
import requests
|
||||
api_url = f"https://api-inference.huggingface.co/models/{self.model_id}"
|
||||
headers = {"Authorization": f"Bearer {self.token}"}
|
||||
payload = {"inputs": prompt, "parameters": {"max_new_tokens": max_tokens}}
|
||||
response = requests.post(api_url, headers=headers, json=payload)
|
||||
return response.json()[0]["generated_text"]
|
||||
else:
|
||||
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(self.device)
|
||||
output = self.model.generate(
|
||||
input_ids=input_ids,
|
||||
max_new_tokens=max_tokens,
|
||||
pad_token_id=self.tokenizer.eos_token_id
|
||||
)
|
||||
return self.tokenizer.decode(output[0], skip_special_tokens=True)
|
||||
|
||||
def run(self, prompt, reflect=False, style="default", show_ascii=False):
|
||||
if show_ascii:
|
||||
|
|
@ -28,44 +61,56 @@ class WFGYRunner:
|
|||
print("=== Prompt ===")
|
||||
print(prompt)
|
||||
|
||||
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(self.device)
|
||||
output_text = self.generate_text(prompt)
|
||||
|
||||
output = self.model.generate(
|
||||
input_ids=input_ids,
|
||||
max_new_tokens=256,
|
||||
pad_token_id=self.tokenizer.eos_token_id
|
||||
)
|
||||
|
||||
decoded = self.tokenizer.decode(output[0], skip_special_tokens=True)
|
||||
print("=== Output ===")
|
||||
print(decoded)
|
||||
print(output_text)
|
||||
|
||||
bbmc, bbpf, bbcr, bbam = semantic_hash_score(prompt)
|
||||
print("=== BBMC Residue ===")
|
||||
print("1.618")
|
||||
print(bbmc)
|
||||
print("=== BBPF Paths ===")
|
||||
print("[[1.0, 2.0, 3.0], [1.1, 2.1, 2.9]]")
|
||||
print(bbpf)
|
||||
print("=== BBCR Reset State ===")
|
||||
print("0.1")
|
||||
print(bbcr)
|
||||
print("=== BBAM Modulated ===")
|
||||
print("[1.12, 0.87, 1.05]")
|
||||
print(bbam)
|
||||
|
||||
if reflect:
|
||||
print("=== Self-Reflection ===")
|
||||
self_reflection = f"""
|
||||
reflection_prompt = f"""
|
||||
You are a linguistic critic AI.
|
||||
Here is a prompt before transformation:
|
||||
{prompt}
|
||||
|
||||
Here is the transformed response:
|
||||
{decoded}
|
||||
{output_text}
|
||||
|
||||
In exactly one sentence, explain how the tone or style has changed.
|
||||
"""
|
||||
reflection_ids = self.tokenizer(self_reflection, return_tensors="pt").input_ids.to(self.device)
|
||||
reflection_output = self.model.generate(
|
||||
input_ids=reflection_ids,
|
||||
max_new_tokens=128,
|
||||
pad_token_id=self.tokenizer.eos_token_id
|
||||
)
|
||||
reflection_text = self.tokenizer.decode(reflection_output[0], skip_special_tokens=True)
|
||||
print(reflection_text)
|
||||
reflection = self.generate_text(reflection_prompt, max_tokens=128)
|
||||
print(reflection)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--prompt", type=str, default="Why don't AIs like to take showers?")
|
||||
parser.add_argument("--reflect", action="store_true")
|
||||
parser.add_argument("--ascii", action="store_true")
|
||||
parser.add_argument("--style", type=str, default="default", choices=["default", "scientific", "meme"])
|
||||
parser.add_argument("--model", type=str, default="gpt2-medium")
|
||||
parser.add_argument("--api", action="store_true")
|
||||
parser.add_argument("--token", type=str, default=os.getenv("HF_TOKEN"))
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
runner = WFGYRunner(
|
||||
model_id=args.model,
|
||||
use_api=args.api,
|
||||
token=args.token
|
||||
)
|
||||
runner.run(
|
||||
prompt=args.prompt,
|
||||
reflect=args.reflect,
|
||||
style=args.style,
|
||||
show_ascii=args.ascii
|
||||
)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue