Atulit23 commited on
Commit
5d1128f
·
verified ·
1 Parent(s): 4be87b6

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -5,13 +5,14 @@ model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
5
  tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
6
 
7
  def index(prompt):
8
- inputs = tokenizer(prompt, return_tensors="pt")
 
9
  outputs = model.generate(**inputs)
10
 
11
- print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
12
-
13
- return tokenizer.batch_decode(outputs, skip_special_tokens=True)
14
 
 
15
 
16
  inputs_image_url = [
17
  gr.Textbox(type="text", label="Topic Name"),
@@ -32,4 +33,4 @@ interface_image_url = gr.Interface(
32
  gr.TabbedInterface(
33
  [interface_image_url],
34
  tab_names=['Some inference']
35
- ).queue().launch()
 
5
  tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
6
 
7
  def index(prompt):
8
+ # Truncate or split the input sequence to fit within the model's maximum sequence length
9
+ inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
10
  outputs = model.generate(**inputs)
11
 
12
+ decoded_outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
13
+ print(decoded_outputs)
 
14
 
15
+ return decoded_outputs
16
 
17
  inputs_image_url = [
18
  gr.Textbox(type="text", label="Topic Name"),
 
33
  gr.TabbedInterface(
34
  [interface_image_url],
35
  tab_names=['Some inference']
36
+ ).launch()