lruizap commited on
Commit
3a1fad5
·
1 Parent(s): 114444e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -7
app.py CHANGED
@@ -4,18 +4,15 @@ import torch
4
  import gradio as gr
5
  from huggingface_hub import InferenceClient
6
 
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
  # chatgpt-gpt4-prompts-bart-large-cnn-samsum
10
- tokenizer = AutoTokenizer.from_pretrained(
11
- "Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum")
12
- model = AutoModelForSeq2SeqLM.from_pretrained(
13
- "Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
14
 
15
  # zephyr
 
16
  # pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-alpha",torch_dtype=torch.bfloat16, device_map="auto")
17
 
18
- def generateZep(inputuno, inputdos):
19
  prompt = inputuno
20
  promptdos = inputdos
21
 
 
4
  import gradio as gr
5
  from huggingface_hub import InferenceClient
6
 
 
 
7
  # chatgpt-gpt4-prompts-bart-large-cnn-samsum
8
+ tokenizer = AutoTokenizer.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum")
9
+ model = AutoModelForSeq2SeqLM.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
 
 
10
 
11
  # zephyr
12
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-alpha")
13
  # pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-alpha",torch_dtype=torch.bfloat16, device_map="auto")
14
 
15
+ def generateZep(inputuno):
16
  prompt = inputuno
17
  promptdos = inputdos
18