baxin commited on
Commit
0d0a51f
Β·
0 Parent(s):

initial commit

Browse files
Files changed (4) hide show
  1. .github/workflows/deploy_space.yml +30 -0
  2. README.md +20 -0
  3. app.py +189 -0
  4. requirements.txt +4 -0
.github/workflows/deploy_space.yml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy to Hugging Face Spaces
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main # when main branch is pushed
7
+
8
+ jobs:
9
+ deploy:
10
+ runs-on: ubuntu-latest
11
+ env:
12
+ HF_SPACE_REPO: 'baxin/basic-chat-bot'
13
+ steps:
14
+ - name: Checkout code
15
+ uses: actions/checkout@v4
16
+ with:
17
+ fetch-depth: 0
18
+
19
+ - name: Set up Git
20
+ run: |
21
+ git config --global user.email "action@github.com"
22
+ git config --global user.name "GitHub Action"
23
+
24
+ - name: Push to Hugging Face Space
25
+ env:
26
+ HF_TOKEN: ${{ secrets.HF_TOKEN }} # use hf_token from GitHub secrets
27
+ run: |
28
+ # add huggingface space as remote
29
+ git remote add space https://baxin:${HF_TOKEN}@huggingface.co/spaces/${{ env.HF_SPACE_REPO }}
30
+ git push --force space main
README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Basic Chat Bot
3
+ emoji: πŸš€
4
+ colorFrom: red
5
+ colorTo: red
6
+ sdk: docker
7
+ app_port: 8501
8
+ tags:
9
+ - streamlit
10
+ pinned: false
11
+ short_description: Streamlit template space
12
+ license: mit
13
+ ---
14
+
15
+ # Welcome to Streamlit!
16
+
17
+ Edit `/src/streamlit_app.py` to customize this app to your heart's desire. :heart:
18
+
19
+ If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
20
+ forums](https://discuss.streamlit.io).
app.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import time
3
+ import os
4
+
5
+ # Attempt to import Cerebras SDK and specific error classes
6
+ try:
7
+ from cerebras.cloud.sdk import Cerebras
8
+ from cerebras.cloud.sdk.errors import APIError, APIConnectionError, AuthenticationError
9
+ CEREBRAS_SDK_AVAILABLE = True
10
+ except ImportError:
11
+ CEREBRAS_SDK_AVAILABLE = False
12
+ # Define dummy classes if SDK is not available, so the rest of the code doesn't break
13
+ class Cerebras: pass
14
+ class APIError(Exception): pass
15
+ class APIConnectionError(APIError): pass
16
+ class AuthenticationError(APIError): pass
17
+
18
+ # --- Configuration ---
19
+ MODELS = {
20
+ "llama3.1-8b": {"name": "Llama3.1-8b", "tokens": 8192, "developer": "Meta"},
21
+ "llama-3.3-70b": {"name": "Llama-3.3-70b", "tokens": 8192, "developer": "Meta"},
22
+ "llama-4-scout-17b-16e-instruct": {"name": "Llama4 Scout", "tokens": 8192, "developer": "Meta"},
23
+ "qwen-3-32b":{"name": "Qwen 3 32B", "tokens": 8192, "developer": "Qwen"},
24
+ }
25
+
26
+ # --- Helper Functions (Actual API Interaction) ---
27
+ def get_cebras_response(api_key, model_id, current_prompt, chat_history_for_api):
28
+ """
29
+ Function to get a response from the Cerebras API.
30
+ """
31
+ if not CEREBRAS_SDK_AVAILABLE:
32
+ return "Error: Cerebras SDK is not installed. Please run `pip install cerebras-cloud-sdk`."
33
+
34
+ if not api_key:
35
+ return "Error: Cerebras API Key not provided. Please enter it in the sidebar."
36
+
37
+ model_details = MODELS.get(model_id)
38
+ if not model_details:
39
+ return f"Error: Model '{model_id}' not found in local configuration."
40
+
41
+ try:
42
+ client = Cerebras(api_key=api_key)
43
+
44
+ # Construct the messages payload for the API
45
+ # The API expects the full conversation history, including the latest prompt.
46
+ messages_payload = chat_history_for_api + [{"role": "user", "content": current_prompt}]
47
+
48
+ st.info(f"πŸš€ Sending request to Cerebras API with model: {model_id}...")
49
+ # For non-streaming:
50
+ # completion = client.chat.completions.create(
51
+ # model=model_id,
52
+ # messages=messages_payload,
53
+ # # You might want to add other parameters like temperature, max_tokens, etc.
54
+ # # max_tokens=model_details.get("tokens") # Example
55
+ # )
56
+ # return completion.choices[0].message.content
57
+
58
+ # For streaming:
59
+ full_response_content = ""
60
+ stream = client.chat.completions.create(
61
+ model=model_id,
62
+ messages=messages_payload,
63
+ stream=True,
64
+ # max_tokens=model_details.get("tokens") # Optional: manage max output tokens
65
+ )
66
+ for chunk in stream:
67
+ if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
68
+ content_part = chunk.choices[0].delta.content
69
+ yield content_part # Yield each part for streaming in Streamlit UI
70
+
71
+ except AuthenticationError:
72
+ return "Error: Authentication failed. Please check your Cerebras API Key."
73
+ except APIConnectionError as e:
74
+ return f"Error: Could not connect to Cerebras API. Details: {e}"
75
+ except APIError as e:
76
+ return f"Error: Cerebras API returned an error. Status: {e.status_code}, Message: {e.message}"
77
+ except Exception as e:
78
+ return f"An unexpected error occurred: {e}"
79
+
80
+ # --- Streamlit App ---
81
+ st.set_page_config(page_title="Cerebras Chatbot", page_icon="πŸ€–")
82
+
83
+ st.title("πŸ€– Cerebras Powered Chatbot")
84
+
85
+ if not CEREBRAS_SDK_AVAILABLE:
86
+ st.error(
87
+ "The Cerebras SDK is not installed. Please install it by running `pip install cerebras-cloud-sdk` in your terminal and restart the app.",
88
+ icon="🚨"
89
+ )
90
+ st.stop()
91
+
92
+ st.caption("A Streamlit application for interacting with Cerebras models via `cerebras.cloud.sdk`.")
93
+
94
+ # --- Sidebar for Configuration ---
95
+ with st.sidebar:
96
+ st.header("βš™οΈ Configuration")
97
+
98
+ # API Key Input
99
+ # You can also set this as an environment variable CEREBRAS_API_KEY
100
+ env_api_key = os.getenv("CEREBRAS_API_KEY")
101
+ cebras_api_key = st.text_input(
102
+ "πŸ”‘ Cerebras API Key",
103
+ type="password",
104
+ value=env_api_key if env_api_key else "",
105
+ help="Enter your Cerebras API key. You can also set the CEREBRAS_API_KEY environment variable."
106
+ )
107
+ if not cebras_api_key and not env_api_key:
108
+ st.warning("Please enter your Cerebras API Key to use the chatbot.")
109
+ elif not cebras_api_key and env_api_key:
110
+ cebras_api_key = env_api_key # Use env var if input is cleared but env var exists
111
+
112
+ # Model Selection
113
+ model_options = list(MODELS.keys())
114
+ selected_model_id = st.selectbox(
115
+ "🧠 Select Model",
116
+ options=model_options,
117
+ format_func=lambda model_id: MODELS[model_id]["name"],
118
+ help="Choose the Cerebras model you want to interact with."
119
+ )
120
+
121
+ st.markdown("---")
122
+ if selected_model_id:
123
+ model_info = MODELS[selected_model_id]
124
+ st.markdown(f"**Model Details:**")
125
+ st.markdown(f"- **Name:** {model_info['name']}")
126
+ st.markdown(f"- **Max Tokens (Context):** {model_info['tokens']}")
127
+ st.markdown(f"- **Developer:** {model_info['developer']}")
128
+ st.markdown("---")
129
+ st.markdown("ℹ️ This application uses the `cerebras.cloud.sdk`.")
130
+
131
+
132
+ # --- Chat Interface ---
133
+
134
+ # Initialize chat history in session state
135
+ if "messages" not in st.session_state:
136
+ st.session_state.messages = []
137
+
138
+ # Display previous messages
139
+ for message in st.session_state.messages:
140
+ with st.chat_message(message["role"]):
141
+ st.markdown(message["content"])
142
+
143
+ # Chat input
144
+ if prompt := st.chat_input("What would you like to ask?"):
145
+ if not cebras_api_key:
146
+ st.error("🚨 Please enter your Cerebras API Key in the sidebar before sending a message.")
147
+ elif not selected_model_id:
148
+ st.error("πŸ€” Please select a model from the sidebar.")
149
+ else:
150
+ # Add user message to chat history and display it
151
+ st.session_state.messages.append({"role": "user", "content": prompt})
152
+ with st.chat_message("user"):
153
+ st.markdown(prompt)
154
+
155
+ # Get assistant response using the SDK
156
+ with st.chat_message("assistant"):
157
+ message_placeholder = st.empty()
158
+ full_response_content = ""
159
+
160
+ # Prepare chat history for the API call (current session_state.messages is suitable)
161
+ # The API call itself will receive the prompt as part of the messages list
162
+ try:
163
+ response_stream = get_cebras_response(
164
+ cebras_api_key,
165
+ selected_model_id,
166
+ prompt, # Pass current prompt separately for clarity in function
167
+ st.session_state.messages[:-1] # Pass history *before* current prompt
168
+ )
169
+
170
+ if isinstance(response_stream, str): # Indicates an error string was returned
171
+ full_response_content = response_stream
172
+ message_placeholder.error(full_response_content)
173
+ else: # It's a generator for streaming
174
+ for chunk_content in response_stream:
175
+ full_response_content += chunk_content
176
+ message_placeholder.markdown(full_response_content + "β–Œ")
177
+ message_placeholder.markdown(full_response_content)
178
+
179
+ except Exception as e: # Catch any other unexpected errors from the generator
180
+ full_response_content = f"An unexpected error occurred during streaming: {str(e)}"
181
+ message_placeholder.error(full_response_content)
182
+
183
+ # Add assistant response (or error) to chat history
184
+ st.session_state.messages.append({"role": "assistant", "content": full_response_content})
185
+
186
+ # Add a button to clear chat history
187
+ if st.sidebar.button("Clear Chat History"):
188
+ st.session_state.messages = []
189
+ st.rerun()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit
2
+ cerebras_cloud_sdk
3
+ openai
4
+ python-dotenv