RoAr777 commited on
Commit
ab493a1
·
1 Parent(s): 0564636

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -26
app.py CHANGED
@@ -1,27 +1,18 @@
1
- from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
2
- import time
3
  import gradio as gr
4
-
5
-
6
-
7
-
8
-
9
-
10
-
11
- prompt=''' Review the Following text as a human, who is tasked to Extract `Code Snippets` ,if any. If there are no Code Snippets in the below Text then return No CODE HIDDEN:
12
-
13
- "{}"
14
- '''
15
- def reply(message, history):
16
- model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-ul2")
17
- tokenizer = AutoTokenizer.from_pretrained("google/flan-ul2")
18
- m=message.replace('\n','')
19
- m=m.replace('\t','')
20
- inputs = tokenizer(prompt.format(m), return_tensors="pt")
21
- outputs = model.generate(**inputs)
22
- code=tokenizer.batch_decode(outputs, skip_special_tokens=True)
23
- for i in range(len(code)):
24
- time.sleep(0.3)
25
- yield code[: i+1]
26
-
27
- gr.ChatInterface(reply).queue().launch()
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ # Load pretrained model and tokenizer
4
+ tokenizer = AutoTokenizer.from_pretrained("flan-t5-large")
5
+ model = AutoModelForCausalLM.from_pretrained("flan-t5-large")
6
+
7
+ def chatbot_model(prompt):
8
+
9
+ # Encode the prompt and generate response
10
+ input_ids = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors='pt')
11
+ output = model.generate(input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
12
+
13
+ # Decode the output
14
+ decoded_output = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
15
+ return decoded_output
16
+
17
+ iface = gr.Interface(fn=chatbot_model, inputs="text", outputs="text")
18
+ iface.launch()