jeevana commited on
Commit
6ad9b43
·
verified ·
1 Parent(s): 6c58d5f

Rename app1.py to app.py

Browse files
Files changed (2) hide show
  1. app.py +43 -0
  2. app1.py +0 -14
app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
4
+
5
+ checkpoint = "gpt2"
6
+ tokenizer = GPT2Tokenizer.from_pretrained(checkpoint)
7
+
8
+
9
+ # Load the fine-tuned model and tokenizer
10
+ my_model = GPT2LMHeadModel.from_pretrained("jeevana/EmailSubjectLineGeneration")
11
+ my_tokenizer = GPT2Tokenizer.from_pretrained("jeevana/EmailSubjectLineGeneration")
12
+
13
+ def generate_response(model, tokenizer, prompt):
14
+ input_ids = tokenizer.encode(prompt, return_tensors="pt",truncation=True, max_length=1000)
15
+ # Create the attention mask and pad token id
16
+ attention_mask = torch.ones_like(input_ids)
17
+ pad_token_id = tokenizer.eos_token_id
18
+
19
+ output = model.generate(
20
+ input_ids,
21
+ max_new_tokens=15,
22
+ min_new_tokens = 1,
23
+ num_return_sequences=1,
24
+ attention_mask=attention_mask,
25
+ pad_token_id=pad_token_id
26
+ )
27
+ return tokenizer.decode(output[0], skip_special_tokens=True)
28
+
29
+ def predict(input):
30
+ print("pipeline object", pipeline)
31
+ prediction = generate_response(my_model, my_tokenizer, input)
32
+ print("type of response:", type(input))
33
+ return prediction
34
+
35
+
36
+ app = gr.Interface(fn=predict, inputs=[gr.Textbox(label="Email", lines=12)],
37
+ outputs=[gr.Textbox(label="Subject", lines=3)],
38
+ title="EmailSubjectLineGeneration",
39
+ description="EmailSubjectLineGeneration"
40
+ )
41
+ app.launch(share=True, debug=True)
42
+
43
+
app1.py DELETED
@@ -1,14 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from transformers import GPT2Tokenizer, GPT2LMHeadModel
4
-
5
- checkpoint = "gpt2"
6
- tokenizer = GPT2Tokenizer.from_pretrained(checkpoint)
7
-
8
-
9
- # Load the fine-tuned model and tokenizer
10
- my_model = GPT2LMHeadModel.from_pretrained("jeevana/GenerativeQnASystem")
11
- my_tokenizer = GPT2Tokenizer.from_pretrained("jeevana/GenerativeQnASystem")
12
-
13
-
14
-