Mohansai2004 commited on
Commit
6dd794e
·
1 Parent(s): 56432ce

Initial commit: DeepSeek LLM chat application

Browse files
Files changed (3) hide show
  1. README.md +13 -6
  2. app.py +50 -2
  3. requirements.txt +8 -0
README.md CHANGED
@@ -1,13 +1,20 @@
1
  ---
2
- title: Test
3
- emoji: 📈
4
- colorFrom: purple
5
- colorTo: purple
6
  sdk: streamlit
7
  sdk_version: 1.41.1
8
  app_file: app.py
9
  pinned: false
10
- short_description: this for testing
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
1
  ---
2
+ title: DeepSeek Coding Assistant
3
+ emoji: 🤖
4
+ colorFrom: blue
5
+ colorTo: green
6
  sdk: streamlit
7
  sdk_version: 1.41.1
8
  app_file: app.py
9
  pinned: false
10
+ short_description: Code-focused chat interface using DeepSeek 6.7B
11
  ---
12
 
13
+ # DeepSeek Coding Assistant
14
+ An AI coding assistant powered by DeepSeek 6.7B model.
15
+
16
+ ## Features
17
+ - Specialized in coding and technical discussions
18
+ - Based on DeepSeek 6.7B instruct model
19
+ - Real-time response generation
20
+ - Context-aware conversations
app.py CHANGED
@@ -1,4 +1,52 @@
1
  import streamlit as st
 
 
2
 
3
- x = st.slider('Select a value')
4
- st.write(x, 'squared is', x * x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
 
5
+ @st.cache_resource
6
+ def load_model():
7
+ model_name = "deepseek-ai/deepseek-coder-6.7b-instruct"
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
9
+ model = AutoModelForCausalLM.from_pretrained(
10
+ model_name,
11
+ torch_dtype=torch.float16,
12
+ trust_remote_code=True,
13
+ device_map="auto"
14
+ )
15
+ return model, tokenizer
16
+
17
+ def generate_response(prompt, model, tokenizer):
18
+ formatted_prompt = f"Human: {prompt}\n\nAssistant:"
19
+ inputs = tokenizer(formatted_prompt, return_tensors="pt").to(model.device)
20
+ outputs = model.generate(
21
+ **inputs,
22
+ max_length=2048,
23
+ temperature=0.7,
24
+ num_return_sequences=1,
25
+ pad_token_id=tokenizer.eos_token_id
26
+ )
27
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
28
+ return response.split("Assistant:")[-1].strip()
29
+
30
+ st.title("🤖 DeepSeek Chat")
31
+ st.write("Using DeepSeek 6.7B - a powerful open source coding LLM")
32
+
33
+ if 'messages' not in st.session_state:
34
+ st.session_state.messages = []
35
+
36
+ for message in st.session_state.messages:
37
+ with st.chat_message(message["role"]):
38
+ st.markdown(message["content"])
39
+
40
+ # Load model
41
+ model, tokenizer = load_model()
42
+
43
+ if prompt := st.chat_input("Ask me anything about coding!"):
44
+ st.session_state.messages.append({"role": "user", "content": prompt})
45
+ with st.chat_message("user"):
46
+ st.markdown(prompt)
47
+
48
+ with st.chat_message("assistant"):
49
+ with st.spinner("Thinking..."):
50
+ response = generate_response(prompt, model, tokenizer)
51
+ st.markdown(response)
52
+ st.session_state.messages.append({"role": "assistant", "content": response})
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Add any additional dependencies here
2
+ # streamlit is already pre-installed
3
+ pandas
4
+ numpy
5
+ transformers>=4.34.0
6
+ torch>=2.0.0
7
+ accelerate>=0.20.0
8
+ einops