WhatsMusic commited on
Commit
dcda433
·
verified ·
1 Parent(s): d9965e1

using code lama

Browse files
Files changed (1) hide show
  1. app.py +32 -32
app.py CHANGED
@@ -1,46 +1,46 @@
1
  import os
2
  import gradio as gr
3
- import requests
4
 
5
- DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY") # Get from environment variables
 
 
 
 
 
 
 
6
 
7
  def generate_code(prompt):
8
  try:
9
- if not DEEPSEEK_API_KEY:
10
- return "Error: Missing API key. Please set DEEPSEEK_API_KEY environment variable."
 
 
 
 
 
 
11
 
12
- headers = {
13
- "Authorization": f"Bearer {DEEPSEEK_API_KEY}",
14
- "Content-Type": "application/json"
15
- }
 
 
 
16
 
17
- payload = {
18
- "model": "deepseek-chat",
19
- "messages": [
20
- {
21
- "role": "system",
22
- "content": "You are an expert Next.js, TypeScript and TailwindCSS developer..."
23
- },
24
- {
25
- "role": "user",
26
- "content": prompt
27
- }
28
  ],
29
- "temperature": 0.3,
30
- "max_tokens": 1500
31
- }
32
-
33
- response = requests.post(
34
- "https://api.deepseek.com/v1/chat/completions",
35
- headers=headers,
36
- json=payload
37
  )
38
 
39
- if response.status_code == 200:
40
- return response.json()["choices"][0]["message"]["content"]
41
- else:
42
- return f"API Error: {response.status_code} - {response.text}"
43
-
44
  except Exception as e:
45
  return f"Error generating code: {str(e)}"
46
 
 
1
  import os
2
  import gradio as gr
3
+ from huggingface_hub import InferenceClient
4
 
5
+ def get_hf_token():
6
+ """Get the HF token from environment variables or Gradio secrets"""
7
+ try:
8
+ # Try Gradio secrets first (works on Spaces)
9
+ return gr.secrets["HF_TOKEN"]
10
+ except (AttributeError, KeyError):
11
+ # Fall back to environment variable (works locally)
12
+ return os.getenv("HF_TOKEN")
13
 
14
  def generate_code(prompt):
15
  try:
16
+ token = get_hf_token()
17
+ if not token:
18
+ return "Error: Missing Hugging Face token."
19
+
20
+ client = InferenceClient(
21
+ model="codellama/CodeLlama-34b-Instruct-hf", # Smaller alternative
22
+ token=token
23
+ )
24
 
25
+ system_prompt = """You are an expert Next.js, TypeScript and TailwindCSS developer.
26
+ Generate clean, efficient code following these rules:
27
+ 1. Use Next.js App Router
28
+ 2. Strict TypeScript typing
29
+ 3. Modern TailwindCSS classes
30
+ 4. Include proper error boundaries
31
+ 5. Add JSDoc comments"""
32
 
33
+ response = client.chat_completion(
34
+ messages=[
35
+ {"role": "system", "content": system_prompt},
36
+ {"role": "user", "content": prompt}
 
 
 
 
 
 
 
37
  ],
38
+ max_tokens=1500,
39
+ temperature=0.3
 
 
 
 
 
 
40
  )
41
 
42
+ return response.choices[0].message.content
43
+
 
 
 
44
  except Exception as e:
45
  return f"Error generating code: {str(e)}"
46