d221 commited on
Commit
2024002
·
verified ·
1 Parent(s): 0bd1aff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -121
app.py CHANGED
@@ -11,142 +11,86 @@ from io import BytesIO
11
  import docx
12
  import ntpath
13
 
14
- ###############################################################################
15
- # REPLACE WITH YOUR DEFAULT (OPEN-SOURCE) DEEPSEEK API KEY
16
- ###############################################################################
17
- OPEN_SOURCE_DEEPSEEK_KEY = "YOUR_DEFAULT_API_KEY_HERE"
18
 
19
- def chat_with_deepseek(
20
- user_message, # The user’s message to the chatbot
21
- history, # Chat history (handled by Gradio’s ChatInterface)
22
- system_message, # The system message from the left panel
23
- user_api_key, # The user-provided optional HF API key
24
- max_tokens, # The “Max new tokens” slider
25
- temperature, # The “Temperature” slider
26
- top_p # The “Top-p” slider
27
- ):
28
- """
29
- This function is called every time a user submits a new message.
30
- It uses either the user’s HF API key if provided, or a default
31
- open-source DeepSeek API key if empty.
32
- """
33
-
34
- # Decide which key to use
35
- final_api_key = user_api_key.strip() if user_api_key else OPEN_SOURCE_DEEPSEEK_KEY
36
 
37
- # Initialize InferenceClient with the chosen API key
38
- client = InferenceClient(token=final_api_key)
 
39
 
40
- # Build the prompt or system instruction
41
- # You can format your prompt however you like; here’s a simple example:
42
- prompt = (
43
- f"{system_message.strip()}\n\n" # System instructions at the top
44
- f"User: {user_message}\n"
45
- "Assistant:"
46
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
- # Configure generation parameters
49
- generation_params = dict(
50
- temperature=temperature,
51
- max_new_tokens=int(max_tokens),
52
- top_p=top_p,
53
- repetition_penalty=1.1, # If you want to add a penalty
54
  )
55
-
56
- # Stream the response
57
- partial_response = ""
58
  stream = client.text_generation(
59
- prompt,
60
  stream=True,
61
  details=True,
62
- **generation_params
63
  )
64
- for chunk in stream:
65
- if chunk.token.special:
 
 
66
  continue
67
- partial_response += chunk.token.text
68
- # Yield partial chunks so user sees a "streaming" effect
69
- yield partial_response
70
-
71
-
72
- ###############################################################################
73
- # BUILD THE GRADIO INTERFACE
74
- ###############################################################################
75
 
76
  with gr.Blocks(theme="soft") as demo:
77
- # Top Title or Branding
78
- gr.Markdown(
79
- """
80
- <h1 style="text-align:center; margin-bottom: 5px;">
81
- <b>balianone.com</b>
82
- </h1>
83
- <h3 style="text-align:center; margin-top: 0px;">
84
- Chat with DeepSeek-R1
85
- </h3>
86
- """,
87
- elem_id="title"
88
- )
89
 
90
  with gr.Row():
91
- # Left Column (System message, HF API key, sliders)
92
- with gr.Column(scale=1, min_width=250):
93
- system_message = gr.Textbox(
94
- label="System message",
95
- value="You are a friendly Chatbot created by Bali"
96
- )
97
- user_api_key = gr.Textbox(
98
- label="HF API Key (optional)",
99
- type="password",
100
- placeholder="Leave blank to use open-source DeepSeek key"
101
- )
102
-
103
- gr.Markdown(
104
- "If error occurs, you can set your own API key. You’ll get R1 premium access."
105
- )
106
-
107
- max_tokens = gr.Slider(
108
- minimum=1,
109
- maximum=4000,
110
- step=1,
111
- value=4000,
112
- label="Max new tokens"
113
- )
114
- temperature = gr.Slider(
115
- minimum=0.1,
116
- maximum=4.0,
117
- value=0.7,
118
- step=0.1,
119
- label="Temperature"
120
- )
121
- top_p = gr.Slider(
122
- minimum=0.1,
123
- maximum=1.0,
124
- value=0.95,
125
- step=0.01,
126
- label="Top-p (nucleus sampling)"
127
- )
128
-
129
- # Right Column (Chat interface)
130
- with gr.Column(scale=3):
131
- chatbot = gr.ChatInterface(
132
- fn=chat_with_deepseek,
133
- # Additional inputs that feed into your chat function:
134
- additional_inputs=[system_message, user_api_key, max_tokens, temperature, top_p],
135
-
136
- # Use the newer messages format to avoid warnings
137
- type="messages",
138
-
139
- # You can customize how the chat area looks here as well:
140
- chatbot_base_url=None, # Keep local
141
- height=550,
142
- title="DeepSeek-R1 Assistant"
143
- )
144
-
145
- # Launch the app
146
- demo.launch()
147
-
148
-
149
 
 
150
 
151
 
152
 
 
11
  import docx
12
  import ntpath
13
 
 
 
 
 
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ # Initialize clients
17
+ API_KEY = os.environ.get("HF_API_KEY")
18
+ client = InferenceClient(token=API_KEY)
19
 
20
+ def process_file(filepath):
21
+ # Handle different file types and extract text.
22
+ if not filepath:
23
+ return ""
24
+
25
+ ext = os.path.splitext(filepath)[1].lower()
26
+
27
+ try:
28
+ if ext in ['.png', '.jpg', '.jpeg']:
29
+ image = Image.open(filepath)
30
+ text = pytesseract.image_to_string(image)
31
+ return f"IMAGE CONTENT:\n{text}"
32
+ elif ext == '.pdf':
33
+ pdf_reader = PyPDF2.PdfReader(filepath)
34
+ text = "\n".join(page.extract_text() for page in pdf_reader.pages)
35
+ return f"PDF CONTENT:\n{text}"
36
+ elif ext == '.docx':
37
+ doc = docx.Document(filepath)
38
+ text = "\n".join(para.text for para in doc.paragraphs)
39
+ return f"DOCUMENT CONTENT:\n{text}"
40
+ else:
41
+ return "Unsupported file type"
42
+ except Exception as e:
43
+ print(f"File processing error: {e}")
44
+ return "Error reading file"
45
+
46
+ def chat(message, history, filepath):
47
+ file_content = process_file(filepath) if filepath else ""
48
+
49
+ full_prompt = f"""
50
+ {file_content}
51
+
52
+ User Message: {message}
53
+
54
+ Please respond considering both the message and any attached documents:
55
+ """
56
 
57
+ generate_kwargs = dict(
58
+ temperature=0.7,
59
+ max_new_tokens=2000,
60
+ top_p=0.95,
61
+ repetition_penalty=1.2,
 
62
  )
63
+
64
+ # Generate response
 
65
  stream = client.text_generation(
66
+ full_prompt,
67
  stream=True,
68
  details=True,
69
+ **generate_kwargs
70
  )
71
+
72
+ partial_message = ""
73
+ for response in stream:
74
+ if response.token.special:
75
  continue
76
+ partial_message += response.token.text
77
+ yield partial_message
 
 
 
 
 
 
78
 
79
  with gr.Blocks(theme="soft") as demo:
80
+ gr.Markdown("# DeepSeek-R1 Assistant with File Support")
81
+ gr.Markdown("Upload images, PDFs, or docs and chat about them!")
 
 
 
 
 
 
 
 
 
 
82
 
83
  with gr.Row():
84
+ file_input = gr.File(label="Upload File (PDF/Image/Doc)", type="filepath")
85
+
86
+ chatbot = gr.ChatInterface(
87
+ fn=chat,
88
+ additional_inputs=[file_input],
89
+ type="messages",
90
+ examples=[]
91
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
+ demo.launch()
94
 
95
 
96