Remostart commited on
Commit
8d2e4a7
·
verified ·
1 Parent(s): 77fe1f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -12
app.py CHANGED
@@ -36,14 +36,14 @@ TOPICS = [
36
  "Smart Contracts",
37
  "Versioning in Plutus",
38
  "Monad",
39
- "Other"
40
  ]
41
 
42
- # Improved prompt template for autism-friendly, focused response
43
  def create_prompt(personality, level, topic):
44
- return f"Explain {topic} in Plutus for a {level} programmer with {personality} traits. Use only basic words and clear examples, like comparing validation to a lock. Avoid jargon like 'blockchain,' 'ledger,' 'Haskell,' 'decentralized,' 'formal verification,' or 'immutability.' Use short sentences (8 words or less). Use exactly 3 bullet points for key ideas. Each point must be under 15 words. Include one simple analogy. Structure the response: 2-sentence introduction, 3 bullet points, 1-sentence conclusion. For Autistic traits, use literal language, avoid abstract terms, and ensure a predictable format. Do not repeat the topic or prompt. Do not simulate a conversation, ask questions, or list unrelated terms. Use a direct, instructional tone without 'I,' 'we,' or conversational phrases. End with a summary sentence on the topic’s importance. Add extra line breaks between sections for readability."
45
 
46
- # Response function with improved parameters
47
  def generate_response(personality, level, topic):
48
  try:
49
  logger.info("Processing selections...")
@@ -56,16 +56,15 @@ def generate_response(personality, level, topic):
56
  generation_kwargs = {
57
  **inputs,
58
  "streamer": streamer,
59
- "max_new_tokens": 400, # Increased to avoid cut-offs
60
  "do_sample": True,
61
- "temperature": 0.2, # Lowered for more focused output
62
- "top_p": 0.2, # Lowered for more focused output
63
- "repetition_penalty": 1.5, # Increased to prevent repetition
64
  "eos_token_id": tokenizer.eos_token_id,
65
  "pad_token_id": tokenizer.pad_token_id
66
  }
67
 
68
- # Run generation in a separate thread
69
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
70
  thread.start()
71
 
@@ -87,17 +86,17 @@ with gr.Blocks(title="Cardano Plutus AI Assistant") as demo:
87
  personality = gr.Dropdown(
88
  choices=PERSONALITY_TYPES,
89
  label="Personality Type",
90
- value="Autistic"
91
  )
92
  level = gr.Dropdown(
93
  choices=PROGRAMMING_LEVELS,
94
  label="Programming Level",
95
- value="Beginner"
96
  )
97
  topic = gr.Dropdown(
98
  choices=TOPICS,
99
  label="Topic",
100
- value="Introduction to Validation"
101
  )
102
 
103
  generate_btn = gr.Button("Generate")
 
36
  "Smart Contracts",
37
  "Versioning in Plutus",
38
  "Monad",
39
+ "Other" # Add more as needed
40
  ]
41
 
42
+ # Prompt template to guide the model
43
  def create_prompt(personality, level, topic):
44
+ return f"User: Teach me about {topic} in Plutus. I am a {level} programmer with {personality} traits. Make the explanation tailored to my needs, easy to understand, and engaging.\nAssistant:"
45
 
46
+ # Response function with proper streaming
47
  def generate_response(personality, level, topic):
48
  try:
49
  logger.info("Processing selections...")
 
56
  generation_kwargs = {
57
  **inputs,
58
  "streamer": streamer,
59
+ "max_new_tokens": 500,
60
  "do_sample": True,
61
+ "temperature": 0.4,
62
+ "top_p": 0.5,
 
63
  "eos_token_id": tokenizer.eos_token_id,
64
  "pad_token_id": tokenizer.pad_token_id
65
  }
66
 
67
+ # Run generation in a separate thread to avoid blocking
68
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
69
  thread.start()
70
 
 
86
  personality = gr.Dropdown(
87
  choices=PERSONALITY_TYPES,
88
  label="Personality Type",
89
+ value="Dyslexic" # Default
90
  )
91
  level = gr.Dropdown(
92
  choices=PROGRAMMING_LEVELS,
93
  label="Programming Level",
94
+ value="Beginner" # Default
95
  )
96
  topic = gr.Dropdown(
97
  choices=TOPICS,
98
  label="Topic",
99
+ value="What is Plutus" # Default
100
  )
101
 
102
  generate_btn = gr.Button("Generate")