sdgzero2ai commited on
Commit
4773d6b
·
verified ·
1 Parent(s): 72fc364

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
+
5
+ MODEL_NAME = "tiiuae/falcon-7b-instruct"
6
+
7
+ # Load tokenizer and model
8
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype="auto")
10
+
11
+ # If your model doesn't define a pad token, you can use the eos token instead:
12
+ if tokenizer.pad_token is None:
13
+ tokenizer.pad_token = tokenizer.eos_token
14
+ if model.config.pad_token_id is None:
15
+ model.config.pad_token_id = tokenizer.eos_token_id
16
+
17
+ # Create a text-generation pipeline
18
+ text_gen = pipeline(
19
+ "text-generation",
20
+ model=model,
21
+ tokenizer=tokenizer,
22
+ max_length=512,
23
+ truncation=True, # <-- Explicitly enable truncation
24
+ do_sample=True,
25
+ temperature=0.7
26
+ )
27
+
28
+ def chat(user_input):
29
+ outputs = text_gen(
30
+ user_input,
31
+ max_length=512,
32
+ truncation=True # <-- Also ensure truncation is True here
33
+ )
34
+ return outputs[0]["generated_text"]
35
+
36
+ demo = gr.Interface(
37
+ fn=chat,
38
+ inputs="text",
39
+ outputs="text",
40
+ title="Falcon-7B-Instruct Chat (Example)",
41
+ description="A chat interface for Falcon-7B-Instruct."
42
+ )
43
+
44
+ if __name__ == "__main__":
45
+ demo.launch()