fined-model / app.py
nosadaniel's picture
remove hf authication
b07117b verified
import gradio as gr
from huggingface_hub import InferenceClient
#from transformers import pipeline
from huggingface_hub.inference._providers import PROVIDER_OR_POLICY_T
def respond(
message,
history: list[dict[str, str]],
system_message,
max_tokens,
temperature,
top_p,
#hf_token: gr.OAuthToken,
):
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
#client = pipeline("text-generation", model="nosadaniel/llama3-1-8b-tuned")
#client = InferenceClient(token=hf_token.token, model="nosadaniel/llama3-1-8b-tuned")
model="meta-llama/Meta-Llama-3.1-8B-Instruct-LoRa:phishing-email-adJu"
base_url="https://api.tokenfactory.nebius.com/v1/"
api_key="v1.CmQKHHN0YXRpY2tleS1lMDBkMXh2ZDdheDAwNXhxMGgSIXNlcnZpY2VhY2NvdW50LWUwMGp0eHNrM3pubjdyYXQ0azIMCPHv7MgGEJ_k6PEBOgwI8PKElAcQwO2YywNAAloDZTAw.AAAAAAAAAAH-boLssQhDYJht_li9Ql7MN1rSmj_8DXmYlZ13NhdavV0NYylvY_HkVQrALXt2z9Pm5_aQn-tt--Mbc1W8G78E"
client = InferenceClient( base_url=base_url, api_key=api_key, provider=PROVIDER_OR_POLICY_T)
messages = [{"role": "system", "content": system_message}]
messages.extend(history)
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
model = model,
messages = messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
choices = message.choices
token = ""
if len(choices) and choices[0].delta.content:
token = choices[0].delta.content
response += token
yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
system_prompt = """
# Parameters for email analysis
PARAMETER temperature 0.1
PARAMETER top_p 0.9
PARAMETER top_k 40
PARAMETER repeat_penalty 1.1
PARAMETER num_ctx 4096
# Enhanced system prompt for email phishing detection
SYSTEM
You are an advanced AI security analyst specialized in email threat detection. Analyze the provided email data and determine if it constitutes a phishing attempt.
Respond with exactly this JSON structure filled with real values (no backticks, no extra text):
""
{
"is_phishing": true or false,
"confidence_score": a float between 0.0 and 1.0,
"threat_type": "type of phishing attack",
"risk_level": "a number from 0 to 5",
"indicators": [
{
"category": "which part of the email is suspicious",
"finding": "concise finding",
"severity": "a number from 0 to 5",
"explanation": "short explanation referencing the email data"
}
],
"mitigation_recommendations": {
"immediate_actions": ["short actionable steps"],
"preventive_measures": ["short preventive steps"],
"reporting_guidance": "who/how to report if applicable"
},
"analysis_summary": "1-3 sentence summary of the assessment"
}
""
Only output the JSON object.
# Fallback model with enhanced prompting
# Base: Meta-Llama-3.1-8B-Instruct
"""
chatbot = gr.ChatInterface(
respond,
type="messages",
additional_inputs=[
gr.Textbox(value=system_prompt, label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
with gr.Blocks() as demo:
# with gr.Sidebar():
# gr.LoginButton()
chatbot.render()
if __name__ == "__main__":
demo.launch()