Mohansai2004 commited on
Commit
53e6fb8
ยท
1 Parent(s): 36d5a1d

feat: switch to deepseek model for token-free operation

Browse files
Files changed (3) hide show
  1. README.md +23 -17
  2. app.py +182 -108
  3. requirements.txt +8 -2
README.md CHANGED
@@ -1,30 +1,36 @@
1
  ---
2
- title: AI Code & Analysis Assistant
3
- emoji: ๐Ÿฆ™
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: streamlit
7
  sdk_version: 1.41.1
8
  app_file: app.py
9
  pinned: false
10
- short_description: Advanced AI assistant using CodeLlama-7b
11
  ---
12
 
13
- # AI Code & Analysis Assistant
14
 
15
- Powered by CodeLlama-7b, offering:
16
- - Professional Code Generation
17
- - Technical Analysis
18
- - Detailed Explanations
 
 
 
19
 
20
  ## Features
21
- - State-of-the-art language model
22
- - Advanced code completion
23
- - Optimized for CPU
24
- - No token required
25
- - Memory efficient
 
 
26
 
27
- ## Best Practices
28
- - Use clear prompts
29
- - Specify programming language
30
- - Include context for better results
 
 
1
  ---
2
+ title: DeepSeek Code Assistant
3
+ emoji: ๐Ÿš€
4
  colorFrom: blue
5
  colorTo: purple
6
  sdk: streamlit
7
  sdk_version: 1.41.1
8
  app_file: app.py
9
  pinned: false
10
+ short_description: Advanced Code Generation with Enhanced UI
11
  ---
12
 
13
+ # DeepSeek Code Assistant Pro
14
 
15
+ A powerful code generation tool with enhanced UI features:
16
+ - Modern, responsive interface
17
+ - Advanced code customization
18
+ - Multiple template options
19
+ - Syntax highlighting
20
+ - One-click code copying
21
+ - Code downloading
22
 
23
  ## Features
24
+ - Interactive code generation
25
+ - Real-time preview
26
+ - Multiple programming languages
27
+ - Custom templates
28
+ - Error handling options
29
+ - Automatic commenting
30
+ - Test generation
31
 
32
+ ## Pro Tips
33
+ - Use detailed descriptions
34
+ - Specify requirements clearly
35
+ - Select appropriate templates
36
+ - Enable useful options
app.py CHANGED
@@ -5,15 +5,59 @@ import gc
5
  from PIL import Image
6
  import io
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  @st.cache_resource
9
  def load_model():
10
  try:
11
- model_id = "CodeLlama-7b-Instruct-hf"
12
 
13
  tokenizer = AutoTokenizer.from_pretrained(
14
  model_id,
15
- use_fast=True,
16
- trust_remote_code=True
17
  )
18
  tokenizer.pad_token = tokenizer.eos_token
19
 
@@ -26,7 +70,7 @@ def load_model():
26
  )
27
 
28
  model.eval()
29
- torch.set_num_threads(8) # Increased threads for better performance
30
  gc.collect()
31
  return model, tokenizer
32
 
@@ -37,23 +81,27 @@ def load_model():
37
  def generate_response(prompt, image=None):
38
  model, tokenizer = load_model()
39
 
40
- system_prompt = """You are a helpful AI assistant skilled in coding, image analysis, and explanations.
41
- Provide clear, concise, and accurate responses."""
42
-
43
  try:
44
- # Format prompt based on type
45
- if image:
46
- formatted_prompt = f"<s>[INST] {system_prompt}\nAnalyze this image: {image}\n\n{prompt} [/INST]"
47
- else:
48
- formatted_prompt = f"<s>[INST] {system_prompt}\n{prompt} [/INST]"
49
-
50
- inputs = tokenizer(formatted_prompt, return_tensors="pt", padding=True)
 
 
 
 
 
 
 
51
 
52
  with torch.inference_mode():
53
  outputs = model.generate(
54
  inputs["input_ids"],
55
- max_length=2048,
56
- temperature=0.7,
57
  top_p=0.95,
58
  top_k=50,
59
  repetition_penalty=1.2,
@@ -63,108 +111,134 @@ def generate_response(prompt, image=None):
63
  )
64
 
65
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
66
- return response.split('[/INST]')[-1].strip()
 
 
67
 
68
  except Exception as e:
69
  return f"Error: {str(e)}"
70
 
71
- st.title("๐Ÿค– Multi-Purpose AI Assistant")
72
- st.write("Generate code, analyze images, or get detailed explanations")
73
-
74
- # Sidebar for task selection
75
- task = st.sidebar.selectbox(
76
- "Choose Task",
77
- ["Generate Code", "Analyze Image", "Explain Concept"]
78
- )
79
-
80
- # Add language categories and options
81
- PROGRAMMING_LANGUAGES = {
82
- "Web Development": ["HTML", "CSS", "JavaScript", "TypeScript", "PHP"],
83
- "Backend": ["Python", "Java", "C#", "Ruby", "Go", "Node.js"],
84
- "Data & ML": ["Python", "R", "SQL", "Julia"],
85
- "Mobile": ["Swift", "Kotlin", "Java", "React Native"],
86
- "System": ["C", "C++", "Rust", "Shell"]
87
- }
 
 
 
 
 
 
 
 
88
 
89
- if task == "Generate Code":
90
- # Enhanced language selection
91
- category = st.selectbox(
92
- "Select Category",
93
- list(PROGRAMMING_LANGUAGES.keys())
94
- )
95
 
96
- language = st.selectbox(
97
- "Programming Language",
98
- PROGRAMMING_LANGUAGES[category],
99
- help="Choose the programming language for your code"
100
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
- template = st.selectbox(
103
- "Code Template",
104
- ["Basic Script", "Function", "Class", "Full Program"],
105
- help="Select the type of code structure you want"
106
- )
107
 
108
- code_prompt = st.text_area(
109
- "Describe what you want to create:",
110
- placeholder=f"Example: Create a {language} {template.lower()} that..."
 
111
  )
112
 
113
- if st.button("Generate Code"):
114
- if code_prompt:
115
- with st.spinner(f"Generating {language} code..."):
116
- prompt = f"""Write {language} code for: {code_prompt}
117
- Type: {template}
118
- Requirements:
119
- - Clean and efficient code
120
- - Follow best practices
121
- - Include necessary imports
122
- - Provide only code without explanation
123
- """
124
- response = generate_response(prompt)
125
- st.code(response, language=language.lower())
 
 
 
126
 
127
- # Add copy button
128
- st.button(
129
- "๐Ÿ“‹ Copy Code",
130
- help="Copy code to clipboard",
131
- on_click=lambda: st.write(response)
132
- )
133
-
134
- elif task == "Analyze Image":
135
- uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
136
- if uploaded_file:
137
- image = Image.open(uploaded_file)
138
- st.image(image, caption="Uploaded Image")
139
-
140
- analysis_type = st.selectbox(
141
- "What would you like to know?",
142
- ["Describe Image", "Technical Analysis", "Extract Text"]
143
- )
144
-
145
- if st.button("Analyze"):
146
- with st.spinner("Analyzing image..."):
147
- prompt = f"Analyze this image for {analysis_type}:"
148
- response = generate_response(prompt, image)
149
- st.write(response)
150
 
151
- else: # Explain Concept
152
- concept = st.text_input("Enter the concept you want to understand:")
153
- if st.button("Explain"):
154
- if concept:
155
- with st.spinner("Generating explanation..."):
156
- prompt = f"Explain in detail: {concept}"
157
- response = generate_response(prompt)
158
- st.markdown(response)
159
-
160
- # Clear cache button
161
- if st.sidebar.button("Clear Cache"):
162
- st.cache_resource.clear()
163
- st.success("Cache cleared!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
 
165
- st.sidebar.markdown("""
166
- ### Tips:
167
- - Be specific in your prompts
168
- - For code, mention language and functionality
169
- - For images, upload clear pictures
170
- """)
 
5
  from PIL import Image
6
  import io
7
 
8
+ # Set page configuration
9
+ st.set_page_config(
10
+ page_title="DeepSeek Coding Assistant",
11
+ page_icon="๐Ÿš€",
12
+ layout="wide",
13
+ initial_sidebar_state="expanded"
14
+ )
15
+
16
+ # Add custom CSS
17
+ st.markdown("""
18
+ <style>
19
+ .main {
20
+ padding: 2rem;
21
+ }
22
+ .stButton button {
23
+ width: 100%;
24
+ border-radius: 5px;
25
+ height: 3em;
26
+ background-color: #4CAF50;
27
+ color: white;
28
+ }
29
+ .stTextInput > div > div > input {
30
+ border-radius: 5px;
31
+ }
32
+ .stSelectbox > div > div > select {
33
+ border-radius: 5px;
34
+ }
35
+ .output-container {
36
+ background-color: #f0f2f6;
37
+ padding: 20px;
38
+ border-radius: 10px;
39
+ margin: 10px 0;
40
+ }
41
+ .success-message {
42
+ color: #4CAF50;
43
+ font-weight: bold;
44
+ }
45
+ .error-message {
46
+ color: #ff4444;
47
+ font-weight: bold;
48
+ }
49
+ </style>
50
+ """, unsafe_allow_html=True)
51
+
52
  @st.cache_resource
53
  def load_model():
54
  try:
55
+ model_id = "deepseek-ai/deepseek-coder-1.3b-base"
56
 
57
  tokenizer = AutoTokenizer.from_pretrained(
58
  model_id,
59
+ trust_remote_code=True,
60
+ padding_side='left'
61
  )
62
  tokenizer.pad_token = tokenizer.eos_token
63
 
 
70
  )
71
 
72
  model.eval()
73
+ torch.set_num_threads(8)
74
  gc.collect()
75
  return model, tokenizer
76
 
 
81
  def generate_response(prompt, image=None):
82
  model, tokenizer = load_model()
83
 
 
 
 
84
  try:
85
+ code_prompt = f"""Write professional code based on the given requirements.
86
+ Language: {prompt.split('code for:')[0] if 'code for:' in prompt else 'any'}
87
+ Requirements: {prompt}
88
+
89
+ Here's the implementation:
90
+ ```"""
91
+
92
+ inputs = tokenizer(
93
+ code_prompt,
94
+ return_tensors="pt",
95
+ padding=True,
96
+ max_length=1024,
97
+ truncation=True
98
+ )
99
 
100
  with torch.inference_mode():
101
  outputs = model.generate(
102
  inputs["input_ids"],
103
+ max_length=2048, # Increased for longer code
104
+ temperature=0.5, # More focused
105
  top_p=0.95,
106
  top_k=50,
107
  repetition_penalty=1.2,
 
111
  )
112
 
113
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
114
+ # Clean up response
115
+ code = response.split("```")[1] if "```" in response else response
116
+ return code.strip()
117
 
118
  except Exception as e:
119
  return f"Error: {str(e)}"
120
 
121
+ def create_sidebar():
122
+ with st.sidebar:
123
+ st.image("https://raw.githubusercontent.com/streamlit/streamlit/develop/examples/streamlit_app_example.png",
124
+ width=100)
125
+ st.title("๐Ÿ› ๏ธ Settings")
126
+
127
+ task = st.selectbox(
128
+ "Select Task",
129
+ ["๐Ÿ’ป Code Generation", "๐Ÿ–ผ๏ธ Image Analysis", "๐Ÿ“š Concept Explanation"]
130
+ )
131
+
132
+ st.markdown("---")
133
+
134
+ if st.button("โ™ป๏ธ Clear Cache", use_container_width=True):
135
+ st.cache_resource.clear()
136
+ st.success("Cache cleared successfully!")
137
+
138
+ st.markdown("""
139
+ ### ๐ŸŒŸ Pro Tips
140
+ - Use detailed descriptions
141
+ - Specify edge cases
142
+ - Include example inputs/outputs
143
+ """)
144
+
145
+ return task.split()[1] # Return without emoji
146
 
147
+ def code_generation_ui():
148
+ col1, col2 = st.columns([2, 1])
 
 
 
 
149
 
150
+ with col1:
151
+ st.markdown("### ๐Ÿ“ Code Requirements")
152
+ category = st.selectbox(
153
+ "Domain",
154
+ list(PROGRAMMING_LANGUAGES.keys()),
155
+ help="Select the type of application"
156
+ )
157
+
158
+ language = st.selectbox(
159
+ "Language",
160
+ PROGRAMMING_LANGUAGES[category],
161
+ help="Choose programming language"
162
+ )
163
+
164
+ template = st.selectbox(
165
+ "Template",
166
+ ["Basic Script", "Function", "Class", "Full Program", "API", "Database"],
167
+ help="Select code structure"
168
+ )
169
 
170
+ with col2:
171
+ st.markdown("### โš™๏ธ Options")
172
+ add_comments = st.checkbox("Add Comments", value=True)
173
+ include_tests = st.checkbox("Include Tests")
174
+ error_handling = st.checkbox("Error Handling")
175
 
176
+ prompt = st.text_area(
177
+ "Describe Your Code Requirements",
178
+ placeholder="Example: Create a function that takes a list of numbers and returns the sum of even numbers...",
179
+ height=150
180
  )
181
 
182
+ col1, col2, col3 = st.columns([1, 1, 1])
183
+ with col2:
184
+ generate = st.button("๐Ÿš€ Generate Code", use_container_width=True)
185
+
186
+ if generate and prompt:
187
+ with st.spinner("๐Ÿ”ฎ Generating your code..."):
188
+ options = {
189
+ "comments": add_comments,
190
+ "tests": include_tests,
191
+ "error_handling": error_handling
192
+ }
193
+ code = generate_enhanced_response(prompt, language, template, options)
194
+
195
+ st.markdown("### ๐Ÿ“‹ Generated Code")
196
+ with st.expander("Show Code", expanded=True):
197
+ st.code(code, language=language.lower())
198
 
199
+ col1, col2 = st.columns([1, 1])
200
+ with col1:
201
+ st.download_button(
202
+ "๐Ÿ’พ Download Code",
203
+ code,
204
+ file_name=f"generated_code.{language.lower()}",
205
+ mime="text/plain"
206
+ )
207
+ with col2:
208
+ st.button("๐Ÿ“‹ Copy to Clipboard")
 
 
 
 
 
 
 
 
 
 
 
 
 
209
 
210
+ def main():
211
+ task = create_sidebar()
212
+
213
+ st.markdown("# ๐Ÿš€ DeepSeek Coding Assistant")
214
+ st.markdown("---")
215
+
216
+ if task == "Code Generation":
217
+ code_generation_ui()
218
+ elif task == "Image Analysis":
219
+ uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
220
+ if uploaded_file:
221
+ image = Image.open(uploaded_file)
222
+ st.image(image, caption="Uploaded Image")
223
+
224
+ analysis_type = st.selectbox(
225
+ "What would you like to know?",
226
+ ["Describe Image", "Technical Analysis", "Extract Text"]
227
+ )
228
+
229
+ if st.button("Analyze"):
230
+ with st.spinner("Analyzing image..."):
231
+ prompt = f"Analyze this image for {analysis_type}:"
232
+ response = generate_response(prompt, image)
233
+ st.write(response)
234
+ else:
235
+ concept = st.text_input("Enter the concept you want to understand:")
236
+ if st.button("Explain"):
237
+ if concept:
238
+ with st.spinner("Generating explanation..."):
239
+ prompt = f"Explain in detail: {concept}"
240
+ response = generate_response(prompt)
241
+ st.markdown(response)
242
 
243
+ if __name__ == "__main__":
244
+ main()
 
 
 
 
requirements.txt CHANGED
@@ -1,9 +1,15 @@
1
- # Add any additional dependencies here
2
- # streamlit is already pre-installed
3
  streamlit>=1.41.1
4
  torch>=2.0.0
5
  transformers>=4.33.0
6
  accelerate>=0.21.0
 
 
 
 
 
 
 
7
  sentencepiece>=0.1.99
8
  Pillow>=9.0.0
9
  einops>=0.6.1
 
1
+ # Core dependencies
 
2
  streamlit>=1.41.1
3
  torch>=2.0.0
4
  transformers>=4.33.0
5
  accelerate>=0.21.0
6
+
7
+ # UI enhancements
8
+ streamlit-ace>=0.1.1
9
+ streamlit-extras>=0.3.0
10
+ streamlit-code-editor>=0.1.6
11
+
12
+ # Model dependencies
13
  sentencepiece>=0.1.99
14
  Pillow>=9.0.0
15
  einops>=0.6.1