JanadaSroor commited on
Commit
8eb7638
·
verified ·
1 Parent(s): a387a6c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -58
app.py CHANGED
@@ -1,76 +1,74 @@
1
  import gradio as gr
2
  import requests
3
  import os
 
 
 
 
 
 
4
 
5
  def generate_flutter_code(prompt, api_token=None):
6
- """
7
- Generate Flutter/Dart code using Hugging Face Inference API with Qwen 3 0.6B
8
- """
9
  if not api_token:
10
  api_token = os.getenv("HF_TOKEN")
11
 
12
  if not api_token:
13
- return "Error: Hugging Face API token is required. Please provide it in the input field or set HF_TOKEN environment variable."
14
-
15
- # Qwen 3 0.6B model for code generation
16
- model_id = "Qwen/Qwen-0.6B"
17
-
18
- # Enhanced prompt for Flutter/Dart code generation
19
- enhanced_prompt = f"""You are an expert Flutter/Dart developer. Generate high-quality, production-ready Flutter/Dart code based on the following description. Include proper error handling, state management, and follow Flutter best practices.
20
-
21
- Description: {prompt}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- Please provide:
24
- 1. Complete Dart code with all necessary imports
25
- 2. Proper widget structure
26
- 3. State management if needed
27
- 4. Comments explaining complex parts
28
- 5. Error handling where appropriate
29
 
30
- Generate the code:"""
 
 
 
31
 
32
- try:
33
- # Hugging Face Inference API endpoint
34
- api_url = f"https://router.huggingface.co/models/{model_id}"
35
 
36
- headers = {
37
- "Authorization": f"Bearer {api_token}",
38
- "Content-Type": "application/json"
39
- }
40
 
41
- payload = {
42
- "inputs": enhanced_prompt,
43
- "parameters": {
44
- "max_new_tokens": 2048,
45
- "temperature": 0.1,
46
- "top_p": 0.95,
47
- "do_sample": True,
48
- "return_full_text": False
49
- }
50
- }
51
 
52
- response = requests.post(api_url, headers=headers, json=payload, timeout=30)
53
-
54
- if response.status_code == 200:
55
- result = response.json()
56
- if isinstance(result, list) and len(result) > 0:
57
- generated_text = result[0].get("generated_text", "")
58
- return generated_text.strip()
59
- else:
60
- return "Error: Unexpected response format from API"
61
- elif response.status_code == 503:
62
- return "Error: Model is currently loading. Please try again in a few moments."
63
- elif response.status_code == 429:
64
- return "Error: Rate limit exceeded. Please try again later."
65
- else:
66
- return f"Error: API request failed with status code {response.status_code}. Response: {response.text}"
67
-
68
- except requests.exceptions.Timeout:
69
- return "Error: Request timed out. The model might be busy."
70
- except requests.exceptions.RequestException as e:
71
- return f"Error: Network request failed: {str(e)}"
72
- except Exception as e:
73
- return f"Error: Unexpected error occurred: {str(e)}"
74
 
75
  # Gradio interface
76
  def create_interface():
 
1
  import gradio as gr
2
  import requests
3
  import os
4
+ MODELS = [
5
+ "Qwen/Qwen2.5-1.5B-Instruct",
6
+ "Qwen/Qwen2.5-0.5B-Instruct",
7
+ "microsoft/phi-2",
8
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
9
+ ]
10
 
11
  def generate_flutter_code(prompt, api_token=None):
 
 
 
12
  if not api_token:
13
  api_token = os.getenv("HF_TOKEN")
14
 
15
  if not api_token:
16
+ return " Hugging Face API token is required."
17
+
18
+ enhanced_prompt = f"""
19
+ You are an expert Flutter/Dart developer.
20
+
21
+ Task:
22
+ Generate clean, production-ready Flutter/Dart code.
23
+
24
+ Requirements:
25
+ - Proper widget structure
26
+ - Clear state management
27
+ - Comments for complex logic
28
+ - Error handling
29
+ - Best practices
30
+
31
+ User request:
32
+ {prompt}
33
+
34
+ Return ONLY Dart code.
35
+ """
36
+
37
+ headers = {
38
+ "Authorization": f"Bearer {api_token}",
39
+ "Content-Type": "application/json"
40
+ }
41
+
42
+ payload = {
43
+ "inputs": enhanced_prompt,
44
+ "parameters": {
45
+ "max_new_tokens": 1024,
46
+ "temperature": 0.2,
47
+ "top_p": 0.95,
48
+ "do_sample": True
49
+ }
50
+ }
51
 
52
+ for model_id in MODELS:
53
+ try:
54
+ api_url = f"https://api-inference.huggingface.co/models/{model_id}"
55
+ response = requests.post(api_url, headers=headers, json=payload, timeout=40)
 
 
56
 
57
+ if response.status_code == 200:
58
+ data = response.json()
59
+ if isinstance(data, list) and "generated_text" in data[0]:
60
+ return data[0]["generated_text"].strip()
61
 
62
+ elif response.status_code in (503, 504):
63
+ continue # model loading try next
 
64
 
65
+ elif response.status_code == 429:
66
+ return "⚠️ Rate limit reached. Try again later."
 
 
67
 
68
+ except requests.exceptions.RequestException:
69
+ continue
 
 
 
 
 
 
 
 
70
 
71
+ return "❌ All models failed. Try again later."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  # Gradio interface
74
  def create_interface():