Remostart commited on
Commit
d4be4e6
·
verified ·
1 Parent(s): 1212dae

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -0
app.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+
5
+ # Load model & tokenizer
6
+ MODEL_NAME = "ubiodee/Plutus_Tutor_new"
7
+
8
+ # Initialize tokenizer and model
9
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
10
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
11
+
12
+ # Move model to GPU (ZeroGPU ensures GPU availability)
13
+ model.to("cuda")
14
+ model.eval()
15
+
16
+ # Response generation function
17
+ def generate_response(prompt, personality, level, topic):
18
+ # Construct a structured prompt incorporating user selections
19
+ full_prompt = (
20
+ f"You are a Plutus AI Assistant tailored for a {personality} learner "
21
+ f"at {level} level, focusing on {topic}. Provide a clear, concise, "
22
+ f"and tailored explanation or answer to the following: {prompt}"
23
+ )
24
+
25
+ inputs = tokenizer(full_prompt, return_tensors="pt").to("cuda")
26
+
27
+ with torch.no_grad():
28
+ outputs = model.generate(
29
+ **inputs,
30
+ max_new_tokens=250,
31
+ temperature=0.1,
32
+ top_p=0.1,
33
+ do_sample=True,
34
+ eos_token_id=tokenizer.eos_token_id,
35
+ pad_token_id=tokenizer.pad_token_id,
36
+ )
37
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
38
+
39
+ # Remove the full prompt from the output to return only the answer
40
+ if response.startswith(full_prompt):
41
+ response = response[len(full_prompt):].strip()
42
+
43
+ return response
44
+
45
+ # Gradio interface
46
+ with gr.Blocks(theme="default") as iface:
47
+ gr.Markdown(
48
+ """
49
+ # Cardano Plutus AI Assistant
50
+ Ask questions about Plutus smart contracts or the Cardano blockchain.
51
+ Select your learning personality, level, and topic to get a tailored response.
52
+ """
53
+ )
54
+
55
+ with gr.Row():
56
+ personality = gr.Dropdown(
57
+ choices=["Dyslexic", "Autistic", "Expressive"],
58
+ label="Select Your Learning Personality",
59
+ value="Expressive"
60
+ )
61
+ level = gr.Dropdown(
62
+ choices=["Beginner", "Intermediate", "Advanced"],
63
+ label="Select Your Skill Level",
64
+ value="Beginner"
65
+ )
66
+ topic = gr.Dropdown(
67
+ choices=["Plutus Basics", "Smart Contracts", "Cardano Blockchain"],
68
+ label="Select Topic",
69
+ value="Plutus Basics"
70
+ )
71
+
72
+ prompt = gr.Textbox(
73
+ label="Enter Your Prompt",
74
+ lines=4,
75
+ placeholder="Ask about Plutus smart contracts or Cardano blockchain..."
76
+ )
77
+ submit_btn = gr.Button("Submit")
78
+ output = gr.Textbox(label="Model Response")
79
+
80
+ submit_btn.click(
81
+ fn=generate_response,
82
+ inputs=[prompt, personality, level, topic],
83
+ outputs=output
84
+ )
85
+
86
+ # Launch the app (Hugging Face Spaces handles this automatically)
87
+ if __name__ == "__main__":
88
+ iface.launch(server_name="0.0.0.0", server_port=7860)