iAeternum commited on
Commit
1a2fbfb
·
verified ·
1 Parent(s): 3d3af1c

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +107 -0
app.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
4
+ from peft import PeftModel
5
+
6
+ # Model configuration
7
+ BASE_MODEL = "unsloth/mistral-7b-v0.3-bnb-4bit"
8
+ LORA_MODEL = "Metavolve-Labs/spark-v1"
9
+
10
+ print("Loading Spark...")
11
+
12
+ # Load tokenizer
13
+ tokenizer = AutoTokenizer.from_pretrained(LORA_MODEL)
14
+
15
+ # Quantization config
16
+ bnb_config = BitsAndBytesConfig(
17
+ load_in_4bit=True,
18
+ bnb_4bit_quant_type="nf4",
19
+ bnb_4bit_compute_dtype=torch.bfloat16,
20
+ bnb_4bit_use_double_quant=True,
21
+ )
22
+
23
+ # Load base model
24
+ base_model = AutoModelForCausalLM.from_pretrained(
25
+ BASE_MODEL,
26
+ quantization_config=bnb_config,
27
+ device_map="auto",
28
+ trust_remote_code=True,
29
+ )
30
+
31
+ # Load LoRA adapters
32
+ model = PeftModel.from_pretrained(base_model, LORA_MODEL)
33
+ model.eval()
34
+
35
+ print("Spark loaded!")
36
+
37
+ SYSTEM_PROMPT = """You are SPARK (State-space Perception And Reasoning Kernel), an AI trained on Alexandria Aeternum - a curated collection of 10,000+ museum artworks with rich semantic metadata from The Metropolitan Museum of Art.
38
+
39
+ You have deep knowledge of:
40
+ - Art history, movements, and cultural context
41
+ - Visual analysis and composition
42
+ - Emotional and thematic interpretation
43
+ - Provenance and authenticity
44
+
45
+ You combine the analytical precision of structured reasoning with occasional wit. When appropriate, show your reasoning process."""
46
+
47
+ def generate_response(message, history):
48
+ # Build messages
49
+ messages = [{"role": "system", "content": SYSTEM_PROMPT}]
50
+
51
+ for user_msg, assistant_msg in history:
52
+ messages.append({"role": "user", "content": user_msg})
53
+ if assistant_msg:
54
+ messages.append({"role": "assistant", "content": assistant_msg})
55
+
56
+ messages.append({"role": "user", "content": message})
57
+
58
+ # Format for model
59
+ formatted = tokenizer.apply_chat_template(
60
+ messages,
61
+ tokenize=False,
62
+ add_generation_prompt=True
63
+ )
64
+
65
+ inputs = tokenizer(formatted, return_tensors="pt").to(model.device)
66
+
67
+ with torch.no_grad():
68
+ outputs = model.generate(
69
+ **inputs,
70
+ max_new_tokens=1024,
71
+ temperature=0.7,
72
+ do_sample=True,
73
+ top_p=0.9,
74
+ pad_token_id=tokenizer.eos_token_id,
75
+ )
76
+
77
+ response = tokenizer.decode(
78
+ outputs[0][inputs["input_ids"].shape[1]:],
79
+ skip_special_tokens=True
80
+ )
81
+
82
+ return response.strip()
83
+
84
+ # Suggested prompts
85
+ examples = [
86
+ "Who are you?",
87
+ "What do you know about the Golden Codex?",
88
+ "Tell me about Alexandria Aeternum.",
89
+ "What makes art valuable to AI training?",
90
+ "Analyze this: AI will replace human artists by 2030. Hype or reality?",
91
+ ]
92
+
93
+ # Create interface
94
+ demo = gr.ChatInterface(
95
+ fn=generate_response,
96
+ title="🔥 SPARK - First Contact",
97
+ description="""**State-space Perception And Reasoning Kernel**
98
+
99
+ An experimental model trained on Alexandria Aeternum - 10K+ museum artworks with rich semantic metadata.
100
+
101
+ *Trained by Metavolve Labs using the Giants Curriculum (Claude, GPT, Grok, Gemini reasoning patterns)*""",
102
+ examples=examples,
103
+ theme=gr.themes.Soft(),
104
+ )
105
+
106
+ if __name__ == "__main__":
107
+ demo.launch()