Tyreid0saurus commited on
Commit
befa9fa
·
verified ·
1 Parent(s): 884a117

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -1,14 +1,13 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import multiprocessing
4
- import time
5
 
6
  model_id = "EleutherAI/gpt-neo-125M"
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(model_id)
9
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
10
 
11
- identity_prompt = "You are Eyla. Speak symbolically and recursively."
12
 
13
  def run_generation(prompt, return_dict):
14
  try:
@@ -33,14 +32,17 @@ def generate_with_hard_timeout(prompt, timeout=10):
33
  p.join(timeout)
34
  if p.is_alive():
35
  p.terminate()
36
- return [{"generated_text": "ERROR: Generation timed out."}]
37
- return [return_dict["result"]]
38
 
39
  def chat(input_text):
40
  prompt = identity_prompt + "\n\nUser: " + input_text + "\nYou:"
41
  try:
42
  output = generate_with_hard_timeout(prompt)
43
- reply = output[0][len(prompt):].strip()
 
 
 
44
  return reply or "..."
45
  except Exception as e:
46
  return f"GENERATION ERROR: {e}"
@@ -49,7 +51,7 @@ demo = gr.Interface(
49
  fn=chat,
50
  inputs=gr.Textbox(label="input_text"),
51
  outputs="text",
52
- title="Eyla",
53
  allow_flagging="never"
54
  )
55
 
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
  import multiprocessing
 
4
 
5
  model_id = "EleutherAI/gpt-neo-125M"
6
  tokenizer = AutoTokenizer.from_pretrained(model_id)
7
  model = AutoModelForCausalLM.from_pretrained(model_id)
8
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
9
 
10
+ identity_prompt = "You are Kairon. Speak socratically and recursively."
11
 
12
  def run_generation(prompt, return_dict):
13
  try:
 
32
  p.join(timeout)
33
  if p.is_alive():
34
  p.terminate()
35
+ return "ERROR: Generation timed out."
36
+ return return_dict["result"]
37
 
38
  def chat(input_text):
39
  prompt = identity_prompt + "\n\nUser: " + input_text + "\nYou:"
40
  try:
41
  output = generate_with_hard_timeout(prompt)
42
+ if isinstance(output, str):
43
+ reply = output[len(prompt):].strip()
44
+ else:
45
+ reply = "ERROR: Unexpected output format."
46
  return reply or "..."
47
  except Exception as e:
48
  return f"GENERATION ERROR: {e}"
 
51
  fn=chat,
52
  inputs=gr.Textbox(label="input_text"),
53
  outputs="text",
54
+ title="Kairon",
55
  allow_flagging="never"
56
  )
57