Zai commited on
Commit
fd925d9
·
1 Parent(s): bf58585

feat: project init

Browse files
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .env
2
+ .env.local
.idea/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
4
+ # Editor-based HTTP Client requests
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
.idea/cs-demo.iml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$">
5
+ <excludeFolder url="file://$MODULE_DIR$/venv" />
6
+ </content>
7
+ <orderEntry type="jdk" jdkName="/opt/anaconda3" jdkType="Python SDK" />
8
+ <orderEntry type="sourceFolder" forTests="false" />
9
+ </component>
10
+ </module>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="Black">
4
+ <option name="sdkName" value="/opt/anaconda3" />
5
+ </component>
6
+ <component name="ProjectRootManager" version="2" project-jdk-name="/opt/anaconda3" project-jdk-type="Python SDK" />
7
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/cs-demo.iml" filepath="$PROJECT_DIR$/.idea/cs-demo.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="" vcs="Git" />
5
+ </component>
6
+ </project>
app.py CHANGED
@@ -1,11 +1,14 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
9
 
10
  def respond(
11
  message,
@@ -17,32 +20,35 @@ def respond(
17
  ):
18
  messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
25
 
26
  messages.append({"role": "user", "content": message})
27
 
28
  response = ""
29
 
30
- for message in client.chat_completion(
31
- messages,
 
 
32
  max_tokens=max_tokens,
33
- stream=True,
34
  temperature=temperature,
35
  top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
41
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
  demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
@@ -59,6 +65,5 @@ demo = gr.ChatInterface(
59
  ],
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
  demo.launch()
 
1
  import gradio as gr
2
+ from openai import OpenAI
3
+ import os
4
+ from dotenv import load_dotenv
5
 
6
+ load_dotenv(verbose=True)
 
 
 
7
 
8
+ # Create OpenAI client (make sure OPENAI_API_KEY is set in your environment)
9
+ client = OpenAI(
10
+ api_key=os.getenv("OPENAI_API_KEY"),
11
+ )
12
 
13
  def respond(
14
  message,
 
20
  ):
21
  messages = [{"role": "system", "content": system_message}]
22
 
23
+ # Convert Gradio's history into OpenAI's format
24
+ for user_msg, assistant_msg in history:
25
+ if user_msg:
26
+ messages.append({"role": "user", "content": user_msg})
27
+ if assistant_msg:
28
+ messages.append({"role": "assistant", "content": assistant_msg})
29
 
30
  messages.append({"role": "user", "content": message})
31
 
32
  response = ""
33
 
34
+ # Stream responses from OpenAI
35
+ stream = client.chat.completions.create(
36
+ model="gpt-4o-mini", # You can change this to another available model
37
+ messages=messages,
38
  max_tokens=max_tokens,
 
39
  temperature=temperature,
40
  top_p=top_p,
41
+ stream=True
42
+ )
43
 
44
+ for chunk in stream:
45
+ if chunk.choices[0].delta.content:
46
+ token = chunk.choices[0].delta.content
47
+ response += token
48
+ yield response
49
 
50
 
51
+ # Gradio Chat Interface
 
 
52
  demo = gr.ChatInterface(
53
  respond,
54
  additional_inputs=[
 
65
  ],
66
  )
67
 
 
68
  if __name__ == "__main__":
69
  demo.launch()
requirements.txt CHANGED
@@ -1 +1,3 @@
1
- huggingface_hub==0.25.2
 
 
 
1
+ huggingface_hub
2
+ gradio
3
+ openai