Too8kio commited on
Commit
2c850f3
·
verified ·
1 Parent(s): f19595d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -1,23 +1,23 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
- model_name = "mistralai/Mistral-7B-Instruct-v0.1"
5
 
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
8
 
9
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
10
 
11
  def chat(user_input, history=[]):
12
- prompt = f"[INST] {user_input} [/INST]"
13
- result = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7)
14
  response = result[0]["generated_text"].replace(prompt, "").strip()
15
  history.append((user_input, response))
16
  return history, history
17
 
18
  gr.ChatInterface(
19
  fn=chat,
20
- title="🤖 Mobe 2.0 - Personal AI by Ronobir",
21
- description="Powered by Mistral 7B | 100% Free",
22
  theme="soft"
23
  ).launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
+ model_name = "tiiuae/falcon-rw-1b"
5
 
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
10
 
11
  def chat(user_input, history=[]):
12
+ prompt = f"{user_input}"
13
+ result = pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.8)
14
  response = result[0]["generated_text"].replace(prompt, "").strip()
15
  history.append((user_input, response))
16
  return history, history
17
 
18
  gr.ChatInterface(
19
  fn=chat,
20
+ title="🤖 Mobe 2.0 Public AI (No login needed)",
21
+ description="Now using Falcon 1B - 100% free",
22
  theme="soft"
23
  ).launch()