| | import os |
| | import streamlit as st |
| | import requests |
| |
|
| | |
| | API_TOKEN = os.environ.get("HUGGING_FACE_API_TOKEN") |
| |
|
| | |
| | API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B" |
| | headers = {"Authorization": f"Bearer {API_TOKEN}"} |
| |
|
| | |
| | def query(payload): |
| | response = requests.post(API_URL, headers=headers, json=payload) |
| | return response.json() |
| |
|
| | |
| | def main(): |
| | st.title("SQL Query Generator") |
| |
|
| | |
| | prompt = st.text_input("Enter your prompt:", "Please generate a SQL query to fetch data from the database.") |
| |
|
| | |
| | if st.button("Generate SQL Query"): |
| | |
| | payload = {"inputs": prompt} |
| | |
| | |
| | with st.spinner('Generating SQL query...'): |
| | output = query(payload) |
| | |
| | |
| | if "generated_text" in output: |
| | st.write("Generated SQL Query:") |
| | st.code(output["generated_text"]) |
| | else: |
| | st.error("Failed to generate SQL query. Please try again.") |
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|