|
|
import torch
|
|
|
from modeling_physics_rl import PhysicsModel, Config
|
|
|
import os
|
|
|
import sys
|
|
|
|
|
|
def interactive_session():
|
|
|
print("\n============================================================")
|
|
|
print(" π§ͺ FLUX TTT INFERENCE LAB (Pre-Trained)")
|
|
|
print("Commands:")
|
|
|
print(" - Type your question")
|
|
|
print(" - Type 'exit' to quit")
|
|
|
print("============================================================\n")
|
|
|
|
|
|
|
|
|
print("π§ Initializing Physics Model...")
|
|
|
model = PhysicsModel()
|
|
|
|
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
print(f" π Using Device: {device}")
|
|
|
model.to(device)
|
|
|
|
|
|
model.llm.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
controller_path = "final_physics_controller.pt"
|
|
|
adapters_path = "final_flux_adapters.pt"
|
|
|
|
|
|
try:
|
|
|
if os.path.exists(controller_path):
|
|
|
print(f" π Loading Controller: {controller_path}")
|
|
|
model.controller.load_state_dict(torch.load(controller_path, map_location=device))
|
|
|
else:
|
|
|
print(f" β οΈ Warning: Controller weights not found at {controller_path}")
|
|
|
|
|
|
if os.path.exists(adapters_path):
|
|
|
print(f" π Loading Flux Adapters: {adapters_path}")
|
|
|
states = torch.load(adapters_path, map_location=device)
|
|
|
|
|
|
if isinstance(states, list):
|
|
|
for layer, state in zip(model.flux_layers, states):
|
|
|
layer.load_state_dict(state)
|
|
|
else:
|
|
|
model.flux_layers.load_state_dict(states)
|
|
|
else:
|
|
|
print(f" β οΈ Warning: Adapter weights not found at {adapters_path}")
|
|
|
|
|
|
except Exception as e:
|
|
|
print(f" β Error loading weights: {e}")
|
|
|
print(" β οΈ Proceeding with random/base weights...")
|
|
|
|
|
|
print(" β
Ready for Inference!\n")
|
|
|
|
|
|
|
|
|
model.eval()
|
|
|
|
|
|
while True:
|
|
|
try:
|
|
|
user_input = input("USER: ")
|
|
|
if user_input.lower() in ["exit", "quit"]:
|
|
|
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompt = f"User: {user_input}\nModel: "
|
|
|
|
|
|
inputs = model.tokenizer(prompt, return_tensors="pt").to(device)
|
|
|
|
|
|
with torch.no_grad():
|
|
|
|
|
|
h_init = model.get_embeddings(inputs.input_ids).to(Config.DTYPE)
|
|
|
modulation = model.controller(h_init)
|
|
|
model.set_active_modulation(modulation)
|
|
|
|
|
|
|
|
|
out_ids = model.llm.generate(
|
|
|
**inputs,
|
|
|
max_new_tokens=128,
|
|
|
do_sample=True,
|
|
|
temperature=0.6,
|
|
|
top_p=0.9,
|
|
|
repetition_penalty=1.2,
|
|
|
pad_token_id=model.tokenizer.eos_token_id
|
|
|
)
|
|
|
|
|
|
model.clear_modulation()
|
|
|
|
|
|
response = model.tokenizer.decode(out_ids[0], skip_special_tokens=True)
|
|
|
|
|
|
|
|
|
if response.startswith(prompt):
|
|
|
response = response[len(prompt):].strip()
|
|
|
elif "Model:" in response:
|
|
|
response = response.split("Model:")[-1].strip()
|
|
|
|
|
|
|
|
|
print(f"MODEL: {response}")
|
|
|
print(f" [Modulation Norm: {torch.norm(modulation).item():.2f}]")
|
|
|
print("")
|
|
|
|
|
|
except KeyboardInterrupt:
|
|
|
break
|
|
|
except Exception as e:
|
|
|
print(f"Error: {e}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
interactive_session()
|
|
|
|