data-ject commited on
Commit
98264f3
·
verified ·
1 Parent(s): 18c0f32

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -1,17 +1,22 @@
1
  import gradio as gr
2
- from transformers import pipeline
 
3
  import os
4
 
5
  # Set up the Hugging Face API token
6
  hf_token = os.getenv("HF_TOKEN")
7
 
8
- # Initialize the text generation pipeline
9
- pipe = pipeline("text-generation", model="facebook/chameleon-7b", token=hf_token)
 
 
10
 
11
  # Function to generate response
12
  def generate_response(input_text):
13
- result = pipe(input_text, max_length=500, num_return_sequences=1)
14
- response = result[0]["generated_text"]
 
 
15
  return response
16
 
17
  # Custom CSS
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
  import os
5
 
6
  # Set up the Hugging Face API token
7
  hf_token = os.getenv("HF_TOKEN")
8
 
9
+ # Load the model and tokenizer with the token
10
+ model_name = "facebook/chameleon-7b"
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
12
+ model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_token)
13
 
14
  # Function to generate response
15
  def generate_response(input_text):
16
+ inputs = tokenizer(input_text, return_tensors="pt")
17
+ with torch.no_grad():
18
+ outputs = model.generate(inputs.input_ids, max_length=500, num_return_sequences=1)
19
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
20
  return response
21
 
22
  # Custom CSS