Bofandra commited on
Commit
22fdbb4
·
verified ·
1 Parent(s): ba01cff

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -0
app.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
+
5
+ torch.random.manual_seed(0)
6
+ model = AutoModelForCausalLM.from_pretrained(
7
+ "microsoft/Phi-3-mini-4k-instruct",
8
+ device_map="cuda",
9
+ torch_dtype="auto",
10
+ trust_remote_code=True,
11
+ )
12
+
13
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
14
+
15
+ def generate_letter(date, time, purpose, place, sender, receiver):
16
+ prompt = (f"Write a formal letter with the following details:\n"
17
+ f"Date: {date}\nTime: {time}\nPurpose: {purpose}\nPlace: {place}\n"
18
+ f"Sender: {sender}\nReceiver: {receiver}\n\nLetter:")
19
+
20
+ messages = [
21
+ {"role": "system", "content": "You are a helpful AI assistant."},
22
+ {"role": "user", "content": f"{prompt}"},
23
+ ]
24
+
25
+ pipe = pipeline(
26
+ "text-generation",
27
+ model=model,
28
+ tokenizer=tokenizer,
29
+ )
30
+
31
+ generation_args = {
32
+ "max_new_tokens": 500,
33
+ "return_full_text": False,
34
+ "temperature": 0.0,
35
+ "do_sample": False,
36
+ }
37
+
38
+ output = pipe(messages, **generation_args)
39
+ return output[0]['generated_text']
40
+
41
+ # Create the Gradio interface
42
+ iface = gr.Interface(
43
+ fn=generate_letter,
44
+ inputs=[
45
+ gr.Textbox(label="Date"),
46
+ gr.Textbox(label="Time"),
47
+ gr.Textbox(label="Purpose"),
48
+ gr.Textbox(label="Place"),
49
+ gr.Textbox(label="Sender"),
50
+ gr.Textbox(label="Receiver"),
51
+ ],
52
+ outputs=gr.Textbox(label="Generated Letter"),
53
+ title="Letter Generator",
54
+ description="Enter the details and generate a formal letter automatically."
55
+ )
56
+
57
+ # Launch the app
58
+ if __name__ == "__main__":
59
+ iface.launch()
60
+