PD03 commited on
Commit
e866dc6
·
verified ·
1 Parent(s): a3f5ecf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -5
app.py CHANGED
@@ -1,11 +1,17 @@
 
1
  import requests
2
- import os
3
 
4
- HF_API_TOKEN = os.getenv("HF_API_TOKEN") # or paste explicitly for quick test
 
5
  API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
 
6
  headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
7
 
8
- response = requests.post(API_URL, headers=headers, json={"inputs": "Test LLaMA-3 endpoint."})
 
 
 
 
 
9
 
10
- print("Status code:", response.status_code)
11
- print("Response:", response.text)
 
1
+ import gradio as gr
2
  import requests
 
3
 
4
+ HF_API_TOKEN = "your_token_here" # Paste explicitly, no Colab restriction!
5
+ # Correct Hugging Face API URL for LLaMA-3 instruct model:
6
  API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
7
+
8
  headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
9
 
10
+ def call_llama3(prompt):
11
+ response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
12
+ if response.status_code == 200:
13
+ return response.json()[0]['generated_text']
14
+ else:
15
+ return f"Error: {response.status_code} - {response.text}"
16
 
17
+ gr.Interface(call_llama3, "text", "text").launch()