Spaces:
Sleeping
Sleeping
File size: 6,058 Bytes
99c7f88 388e6ed 99c7f88 388e6ed d862af5 388e6ed 99c7f88 d862af5 99c7f88 d862af5 388e6ed 0ddeceb 388e6ed d862af5 388e6ed d862af5 388e6ed d862af5 388e6ed d862af5 388e6ed 99c7f88 388e6ed 99c7f88 388e6ed 99c7f88 388e6ed 99c7f88 388e6ed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
import streamlit as st
import requests
import os
from openai import OpenAI
st.set_page_config(page_title="Qcode Agent: Qiskit Bug Fixer", layout="centered")
st.title("π€ Qcode Agent: LLM-Powered Quantum Code Repair")
st.markdown("Paste your Qiskit code below and let an LLM find and fix bugs.")
# Sidebar for API keys
with st.sidebar:
st.header("API Configuration")
openai_api_key = st.text_input("OpenAI API Key", type="password", help="Required for GPT-5 Mini")
hf_api_key = st.text_input("HuggingFace API Key", type="password", help="Required for HF models")
# Code input
code_input = st.text_area("Qiskit Code Input", height=300, placeholder="Paste your Qiskit code here...")
# Backend selection
model_choice = st.selectbox("Choose LLM Backend", ["GPT-5 Mini (OpenAI)", "Granite-8B-Qiskit (HF)", "Code Llama (HF)"])
def query_gpt5_mini(code, api_key):
"""
Query OpenAI GPT-5 Mini for code fixing
GPT-5 Mini (gpt-5-mini-2025-08-07) is a compact reasoning model with:
- 400K token context window
- Does NOT support 'temperature' parameter (reasoning models use default temperature=1)
- Uses 'max_completion_tokens' instead of 'max_tokens'
- Supports parameters: max_completion_tokens, include_reasoning, max_output_tokens
"""
try:
client = OpenAI(api_key=api_key)
prompt = f"""You are an expert in quantum computing and Qiskit.
Analyze the following Qiskit code for bugs and provide a fixed version.
Explain what bugs you found and how you fixed them.
Code to analyze:
```python
{code}
```
Provide your response in this format:
BUGS FOUND:
[List the bugs]
FIXED CODE:
```python
[Your fixed code here]
```
EXPLANATION:
[Explain the fixes]
"""
response = client.chat.completions.create(
model="gpt-5-mini-2025-08-07",
messages=[
{"role": "system", "content": "You are a Qiskit debugging expert."},
{"role": "user", "content": prompt}
],
max_completion_tokens=2000
)
return response.choices[0].message.content
except Exception as e:
return f"Error querying GPT-5 Mini: {str(e)}"
def query_huggingface(code, api_key, model_name):
"""Query HuggingFace models for code fixing"""
try:
# Map model choice to HF model ID
model_map = {
"Granite-8B-Qiskit (HF)": "ibm-granite/granite-8b-code-instruct",
"Code Llama (HF)": "codellama/CodeLlama-13b-Instruct-hf"
}
model_id = model_map.get(model_name, "codellama/CodeLlama-7b-Instruct-hf")
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
prompt = f"""<s>[INST] You are an expert in quantum computing and Qiskit.
Analyze this Qiskit code for bugs and provide a fixed version:
{code}
Identify bugs and provide corrected code. [/INST]"""
payload = {
"inputs": prompt,
"parameters": {
"max_new_tokens": 1000,
"temperature": 0.3,
"top_p": 0.9,
"return_full_text": False
}
}
api_url = f"https://api-inference.huggingface.co/models/{model_id}"
response = requests.post(api_url, headers=headers, json=payload, timeout=60)
if response.status_code == 200:
result = response.json()
if isinstance(result, list) and len(result) > 0:
return result[0].get('generated_text', 'No response generated')
return str(result)
else:
return f"Error: {response.status_code} - {response.text}"
except Exception as e:
return f"Error querying HuggingFace: {str(e)}"
def query_llm(model, code, openai_key=None, hf_key=None):
"""Main router function for LLM queries"""
if model == "GPT-5 Mini (OpenAI)":
if not openai_key:
return "β Error: OpenAI API key required. Please enter it in the sidebar."
return query_gpt5_mini(code, openai_key)
elif model in ["Granite-8B-Qiskit (HF)", "Code Llama (HF)"]:
if not hf_key:
return "β Error: HuggingFace API key required. Please enter it in the sidebar."
return query_huggingface(code, hf_key, model)
return "No fix found."
# Button to trigger repair
if st.button("π§ Fix My Code", type="primary"):
if not code_input.strip():
st.warning("β οΈ Please paste some Qiskit code to analyze.")
else:
with st.spinner(f"π€ Querying {model_choice}..."):
fixed_code = query_llm(
model_choice,
code_input,
openai_api_key,
hf_api_key
)
st.success("β
Analysis complete!")
# Display results in an expandable section
with st.expander("π LLM Response", expanded=True):
st.markdown(fixed_code)
# Example code section
with st.expander("π‘ Try an Example"):
example_code = """from qiskit import QuantumCircuit, QuantumRegister
from qiskit_aer import Aer
from qiskit.visualization import plot_histogram
# Create a quantum circuit with a bug
q = QuantumRegister(2, 'q')
qc = QuantumCircuit(q)
qc.h(q[0])
qc.cx(q[0], q[0]) # BUG: Control and target are the same qubit!
qc.measure_all()"""
st.code(example_code, language="python")
if st.button("Load Example"):
st.rerun()
st.markdown("---")
st.caption("Built by your Quantum AI copilot π§ βοΈ")
# Instructions
with st.sidebar:
st.markdown("---")
st.markdown("""
### π How to Use
1. Enter your API key(s) above
2. Paste Qiskit code in the main area
3. Select your preferred LLM
4. Click "Fix My Code"
### π Getting API Keys
- **OpenAI**: https://platform.openai.com/api-keys
- **HuggingFace**: https://huggingface.co/settings/tokens
""") |