anshu-man853 commited on
Commit
47102a4
·
verified ·
1 Parent(s): 5953938

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -30
app.py CHANGED
@@ -1,13 +1,10 @@
1
  import streamlit as st
2
- import requests
3
- import json
4
- import os
5
 
6
- # Hugging Face API details
7
- API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-Guard-3-1B"
8
- API_KEY = os.getenv("HF_API_KEY") # Fetch API key from environment variable
9
-
10
- HEADERS = {"Authorization": f"Bearer {API_KEY}"}
11
 
12
  # Streamlit UI
13
  st.title("AI Safe Content Checker Tool")
@@ -16,30 +13,15 @@ st.write("Enter text below, and the model will check if it's safe.")
16
  # User input
17
  user_input = st.text_area("Enter your text here:")
18
 
19
- def query(payload):
20
- try:
21
- response = requests.post(API_URL, headers=HEADERS, json=payload)
22
-
23
- # Check response status
24
- if response.status_code != 200:
25
- st.error(f"API Error {response.status_code}: {response.text}")
26
- return None
27
-
28
- return response.json() # Properly parse JSON
29
-
30
- except requests.exceptions.RequestException as e:
31
- st.error(f"Request failed: {e}")
32
- return None
33
 
34
  if st.button("Check Content"):
35
  if user_input:
36
- payload = {"inputs": user_input} # Llama Guard expects this format
37
- response = query(payload)
38
-
39
- if response:
40
- st.subheader("Moderation Result:")
41
- st.json(response) # Display result as formatted JSON
42
- else:
43
- st.error("No valid response from the API.")
44
  else:
45
  st.warning("Please enter some text.")
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
3
 
4
+ # Load model and tokenizer
5
+ MODEL_NAME = "meta-llama/Llama-Guard-3-1B"
6
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
7
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
 
8
 
9
  # Streamlit UI
10
  st.title("AI Safe Content Checker Tool")
 
13
  # User input
14
  user_input = st.text_area("Enter your text here:")
15
 
16
+ def check_content(text):
17
+ inputs = tokenizer(text, return_tensors="pt")
18
+ outputs = model.generate(**inputs, max_new_tokens=50)
19
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
20
 
21
  if st.button("Check Content"):
22
  if user_input:
23
+ result = check_content(user_input)
24
+ st.subheader("Moderation Result:")
25
+ st.write(result)
 
 
 
 
 
26
  else:
27
  st.warning("Please enter some text.")