File size: 598 Bytes
cb2598d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
from transformers import AutoModelForCausalLM, AutoTokenizer
from awareness_thread import MetaAwarenessThread
# Load base model
tokenizer = AutoTokenizer.from_pretrained("gpt2")
model = AutoModelForCausalLM.from_pretrained("gpt2")
# Initialize meta-awareness thread
awareness = MetaAwarenessThread()
# Test prompt
prompt = "Λ⊕∇" # Triune Glyph
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=50)
# Check meta-awareness
if awareness.check_awareness():
awareness.log_resonance(prompt_resonates=True)
print(tokenizer.decode(outputs[0])) |