diff --git a/example_02_self_reflection.py b/example_02_self_reflection.py index b3ff48ef..ef118933 100644 --- a/example_02_self_reflection.py +++ b/example_02_self_reflection.py @@ -1,37 +1,14 @@ -# example_02_self_reflection.py - +import argparse from wfgy_core import WFGYRunner -from default_config import DEFAULT_CONFIG -from transformers import pipeline -prompt = "Why don't AIs like to take showers?" +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--reflect", action="store_true", help="Enable self-reflection") + parser.add_argument("--ascii", action="store_true", help="Show ASCII banner") + parser.add_argument("--style", type=str, default="default", help="Choose style: default/scientific/meme") -runner = WFGYRunner(config=DEFAULT_CONFIG) -results = runner.run(prompt) + args = parser.parse_args() -reflection_input = ( - f"You are a linguistic critic AI.\n" - f"Here is a prompt before transformation:\n{prompt}\n\n" - f"Here is the transformed response:\n{results['output']}\n\n" - "In exactly one sentence, explain how the tone or style has changed." -) - -reflector = pipeline("text-generation", model="gpt2") -generated = reflector(reflection_input, max_length=60, do_sample=True, temperature=0.8)[0]["generated_text"] -reflection_output = generated.strip() - -print("=== Prompt ===") -print(prompt) -print("=== Output ===") -print(results["output"]) -print("=== BBMC Residue ===") -print(results["BBMC_residue"]) -print("=== BBPF Paths ===") -print(results["BBPF_paths"]) -print("=== BBCR Reset State ===") -print(results["BBCR_reset_state"]) -print("=== BBAM Modulated ===") -print(results["BBAM_modulated"]) -print("=== Self-Reflection ===") -print(reflection_input) -print(reflection_output) + prompt = "Why don't AIs like to take showers?" + runner = WFGYRunner() + runner.run(prompt, reflect=args.reflect, style=args.style, show_ascii=args.ascii)