loocorez commited on
Commit
02bcabe
·
verified ·
1 Parent(s): 01de717

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -16
app.py CHANGED
@@ -1,27 +1,25 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- import os
4
 
5
- token = os.getenv("HF_TOKEN")
6
- client = InferenceClient("loocorez/reverse-text-warmup", token=token)
 
 
7
 
8
  def reverse_text(input_text):
9
- try:
10
- # Use the inference API
11
- response = client.text_generation(
12
- input_text,
13
- max_new_tokens=100,
14
- temperature=0.7
15
- )
16
- return response
17
- except Exception as e:
18
- return f"Error: {str(e)}"
19
 
 
20
  demo = gr.Interface(
21
  fn=reverse_text,
22
  inputs=gr.Textbox(label="Input Text"),
23
- outputs=gr.Textbox(label="Output"),
24
- title="Reverse Text Model Demo"
 
25
  )
26
 
27
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
 
3
 
4
+ # Load your model
5
+ model_name = "loocorez/reverse-text-warmup"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
  def reverse_text(input_text):
10
+ # Add your model inference logic here
11
+ inputs = tokenizer(input_text, return_tensors="pt")
12
+ outputs = model.generate(**inputs, max_length=100)
13
+ result = tokenizer.decode(outputs[0], skip_special_tokens=True)
14
+ return result
 
 
 
 
 
15
 
16
+ # Create Gradio interface
17
  demo = gr.Interface(
18
  fn=reverse_text,
19
  inputs=gr.Textbox(label="Input Text"),
20
+ outputs=gr.Textbox(label="Reversed Text"),
21
+ title="Reverse Text Model Demo",
22
+ description="Test the reverse-text-warmup model"
23
  )
24
 
25
  demo.launch()