loocorez commited on
Commit
5983cf2
·
verified ·
1 Parent(s): 5f21ab8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -14
app.py CHANGED
@@ -1,25 +1,26 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
- # Load your model
5
- model_name = "loocorez/reverse-text-warmup"
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
  def reverse_text(input_text):
10
- # Add your model inference logic here
11
- inputs = tokenizer(input_text, return_tensors="pt")
12
- outputs = model.generate(**inputs, max_length=100)
13
- result = tokenizer.decode(outputs[0], skip_special_tokens=True)
14
- return result
 
 
 
 
 
15
 
16
- # Create Gradio interface
17
  demo = gr.Interface(
18
  fn=reverse_text,
19
  inputs=gr.Textbox(label="Input Text"),
20
- outputs=gr.Textbox(label="Reversed Text"),
21
- title="Reverse Text Model Demo",
22
- description="Test the reverse-text-warmup model"
23
  )
24
 
25
  demo.launch()
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
 
4
+ # Use Inference API instead of loading model locally
5
+ client = InferenceClient("loocorez/reverse-text-warmup")
 
 
6
 
7
  def reverse_text(input_text):
8
+ try:
9
+ # Use the inference API
10
+ response = client.text_generation(
11
+ input_text,
12
+ max_new_tokens=100,
13
+ temperature=0.7
14
+ )
15
+ return response
16
+ except Exception as e:
17
+ return f"Error: {str(e)}"
18
 
 
19
  demo = gr.Interface(
20
  fn=reverse_text,
21
  inputs=gr.Textbox(label="Input Text"),
22
+ outputs=gr.Textbox(label="Output"),
23
+ title="Reverse Text Model Demo"
 
24
  )
25
 
26
  demo.launch()