S-Dreamer commited on
Commit
1b74fc2
·
verified ·
1 Parent(s): df1232d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -10
app.py CHANGED
@@ -14,12 +14,14 @@ HF_TOKEN = os.getenv("HF_TOKEN")
14
  if HF_TOKEN:
15
  login(token=HF_TOKEN)
16
 
 
17
  st.set_page_config(
18
  page_title="Code Assistant",
19
  page_icon="🧠",
20
  layout="wide",
21
  )
22
 
 
23
  if "history" not in st.session_state:
24
  st.session_state.history = []
25
 
@@ -29,10 +31,6 @@ if "datasets" not in st.session_state:
29
 
30
  @st.cache_resource(show_spinner=False)
31
  def get_inference_client() -> InferenceClient:
32
- """
33
- Create one reusable inference client.
34
- If HF_TOKEN is set, it will be used automatically for authenticated requests.
35
- """
36
  return InferenceClient(
37
  provider="hf-inference",
38
  api_key=HF_TOKEN,
@@ -74,7 +72,7 @@ def generate_code(
74
  )
75
 
76
  completion = client.chat.completions.create(
77
- model=f"{model_id}:hf-inference",
78
  messages=messages,
79
  max_tokens=max_tokens,
80
  temperature=temperature,
@@ -85,17 +83,15 @@ def generate_code(
85
 
86
  with st.sidebar:
87
  st.header("⚙️ Control Plane")
88
-
89
  st.caption("Uses Hugging Face Inference API instead of local model loading.")
90
 
91
  model_id = st.selectbox(
92
  "Model",
93
  [
94
- "Qwen/Qwen2.5-Coder-3B-Instruct",
95
  "HuggingFaceTB/SmolLM2-1.7B-Instruct",
 
96
  "microsoft/Phi-3.5-mini-instruct",
97
  ],
98
- help="Choose a small instruction or code-capable model available through hf-inference.",
99
  )
100
 
101
  max_tokens = st.slider(
@@ -158,10 +154,10 @@ with st.sidebar:
158
  )
159
 
160
  if not HF_TOKEN:
161
- st.warning("HF_TOKEN is not set. Public-rate or auth failures may occur.")
162
 
163
 
164
- st.title("🧠 Code Assistant")
165
  st.caption("Streamlit + Datasets + Hugging Face Inference API")
166
 
167
  prompt = st.text_area(
 
14
  if HF_TOKEN:
15
  login(token=HF_TOKEN)
16
 
17
+
18
  st.set_page_config(
19
  page_title="Code Assistant",
20
  page_icon="🧠",
21
  layout="wide",
22
  )
23
 
24
+
25
  if "history" not in st.session_state:
26
  st.session_state.history = []
27
 
 
31
 
32
  @st.cache_resource(show_spinner=False)
33
  def get_inference_client() -> InferenceClient:
 
 
 
 
34
  return InferenceClient(
35
  provider="hf-inference",
36
  api_key=HF_TOKEN,
 
72
  )
73
 
74
  completion = client.chat.completions.create(
75
+ model=model_id,
76
  messages=messages,
77
  max_tokens=max_tokens,
78
  temperature=temperature,
 
83
 
84
  with st.sidebar:
85
  st.header("⚙️ Control Plane")
 
86
  st.caption("Uses Hugging Face Inference API instead of local model loading.")
87
 
88
  model_id = st.selectbox(
89
  "Model",
90
  [
 
91
  "HuggingFaceTB/SmolLM2-1.7B-Instruct",
92
+ "Qwen/Qwen2.5-Coder-3B-Instruct",
93
  "microsoft/Phi-3.5-mini-instruct",
94
  ],
 
95
  )
96
 
97
  max_tokens = st.slider(
 
154
  )
155
 
156
  if not HF_TOKEN:
157
+ st.warning("HF_TOKEN is not set. Auth or rate-limit issues may occur.")
158
 
159
 
160
+ st.title("🧠 Uncensored HackerCoder Assistant")
161
  st.caption("Streamlit + Datasets + Hugging Face Inference API")
162
 
163
  prompt = st.text_area(