willco-afk commited on
Commit
f94547c
·
1 Parent(s): 7c69e90

Updated app.py to use Hugging Face inference

Browse files
Files changed (1) hide show
  1. app.py +10 -16
app.py CHANGED
@@ -1,23 +1,17 @@
1
  import gradio as gr
2
- from tensorflow.keras.models import load_model
3
- import json
4
 
5
- # Load model and tokenizer
6
- model = load_model('my_model.h5')
7
 
8
- with open('tokenizer.json', 'r') as f:
9
- tokenizer = json.load(f)
 
 
 
10
 
11
- # Define prediction function
12
- def predict(text):
13
- # Implement your tokenization and model prediction logic here
14
- # Example:
15
- tokens = tokenizer.texts_to_sequences([text])
16
- prediction = model.predict(tokens)
17
- return prediction
18
-
19
- # Set up Gradio interface
20
  iface = gr.Interface(fn=predict, inputs="text", outputs="text")
21
 
22
- # Launch the interface
23
  iface.launch()
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceApi
 
3
 
4
+ # Use Hugging Face API to load the model (replace "willco-afk/languages" with your repo name)
5
+ inference = InferenceApi(repo_id="willco-afk/languages")
6
 
7
+ # Define the prediction function
8
+ def predict(input_text):
9
+ # Call the model on Hugging Face
10
+ result = inference(input_text)
11
+ return result
12
 
13
+ # Define the Gradio interface
 
 
 
 
 
 
 
 
14
  iface = gr.Interface(fn=predict, inputs="text", outputs="text")
15
 
16
+ # Launch the interface locally (optional)
17
  iface.launch()