hchevva commited on
Commit
98ba82f
·
verified ·
1 Parent(s): 83ccca0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -26
app.py CHANGED
@@ -95,33 +95,23 @@ def measure_collapse(qc, shots):
95
 
96
 
97
  def explain_llm(qc, n_qubits, shots, hf_model_id):
98
- try:
99
- state_ket = qc.ket_notation(max_terms=6)
100
- probs_top = _probs_top(qc, int(n_qubits), k=6)
101
-
102
- cfg = ExplainConfig(
103
- model_id=hf_model_id.strip(),
104
- max_new_tokens=280,
105
- temperature=0.2,
106
- )
107
-
108
- out = explain_circuit_with_hf(
109
- n_qubits=int(n_qubits),
110
- history=qc.history,
111
- state_ket=state_ket,
112
- probs_top=probs_top,
113
- shots=int(shots),
114
- cfg=cfg,
115
- )
116
-
117
- # Ensure something is always returned to the UI
118
- if not out or not str(out).strip():
119
- return "LLM returned an empty response. Try another model id or check HF_TOKEN."
120
-
121
- return out
122
- except Exception as e:
123
- return f"Explain failed with error:\n\n`{repr(e)}`"
124
 
 
 
 
 
 
 
 
 
125
 
126
  def _refresh_choices(n):
127
  opts = list(range(int(n)))
 
95
 
96
 
97
  def explain_llm(qc, n_qubits, shots, hf_model_id):
98
+ state_ket = qc.ket_notation(max_terms=6)
99
+ probs_top = _probs_top(qc, int(n_qubits), k=6)
100
+
101
+ cfg = ExplainConfig(
102
+ model_id=hf_model_id.strip(),
103
+ max_new_tokens=280,
104
+ temperature=0.2,
105
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
+ return explain_circuit_with_hf(
108
+ n_qubits=int(n_qubits),
109
+ history=qc.history,
110
+ state_ket=state_ket,
111
+ probs_top=probs_top,
112
+ shots=int(shots),
113
+ cfg=cfg,
114
+ )
115
 
116
  def _refresh_choices(n):
117
  opts = list(range(int(n)))