songhune commited on
Commit
34c5c8e
Β·
1 Parent(s): 96d76f3
Files changed (7) hide show
  1. .gitignore +30 -0
  2. README.md +51 -2
  3. app.py +8 -56
  4. chatbot_utils.py +61 -0
  5. gradio_interface.py +53 -0
  6. requirements.txt +2 -1
  7. scenario_handler.py +14 -0
.gitignore ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ignore .env file
2
+ # Ignore compiled binaries
3
+ *.exe
4
+ *.dll
5
+ *.so
6
+ *.pyc
7
+
8
+ # Ignore build output directories
9
+ build/
10
+ dist/
11
+ out/
12
+
13
+ # Ignore package manager directories
14
+ node_modules/
15
+ vendor/
16
+ HIC/V3/flagged
17
+ HIC/V3/myenv
18
+
19
+
20
+ # Ignore IDE and editor files
21
+ .vscode/
22
+ .idea/
23
+ *.sublime-project
24
+ *.sublime-workspace
25
+
26
+ # Ignore logs and temporary files
27
+ *.log
28
+ *.tmp
29
+ *.swp
30
+ .DS_Store
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Gschatbot 2
3
  emoji: πŸ’¬
4
  colorFrom: yellow
5
  colorTo: purple
@@ -10,4 +10,53 @@ pinned: false
10
  license: unknown
11
  ---
12
 
13
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Gschatbot 1
3
  emoji: πŸ’¬
4
  colorFrom: yellow
5
  colorTo: purple
 
10
  license: unknown
11
  ---
12
 
13
+ # Gaslighting Chatbot
14
+
15
+ ## Description
16
+ κ°€μŠ€λΌμ΄νŒ… 챗봇 ν”„λ‘œμ νŠΈ
17
+
18
+ ## Prerequisites
19
+ - Python 3.7 or higher
20
+ - pip (Python package installer)
21
+
22
+ ## Setup Instructions
23
+
24
+ 1. Clone the repository:
25
+ ```sh
26
+ git clone <repository_url>
27
+ cd <repository_directory>
28
+ ```
29
+
30
+ 2. Create a virtual environment (optional but recommended):
31
+ ```sh
32
+ python -m venv venv
33
+ source venv/bin/activate # On Windows: venv\Scripts\activate
34
+ ```
35
+
36
+ 3. Install the required packages:
37
+ ```sh
38
+ pip install -r requirements.txt
39
+ ```
40
+
41
+ 4. Create a `.env` file in the project root directory and add your OpenAI API key:
42
+ ```plaintext
43
+ OPENAI_API_KEY=sk-YourOpenAIKeyHere
44
+ ```
45
+ λ³„λ„μ˜ openai apiλ₯Ό ν•„μš”λ‘œ ν•©λ‹ˆλ‹€.
46
+
47
+ 5. Add `.env` to `.gitignore` to ensure it is not tracked by git:
48
+ ```plaintext
49
+ # .gitignore
50
+ .env
51
+ ```
52
+
53
+ 6. Run the chatbot:
54
+ HIC폴더 λ‚΄λΆ€μ˜ V1~VxκΉŒμ§€ κ°€λŠ₯
55
+
56
+ ## File Descriptions
57
+
58
+ - `chatbot.py`: The main script to run the chatbot.
59
+ - `requirements.txt`: Lists the dependencies required for the project.
60
+ - `.env`: File to store environment variables (e.g., API keys). **Do not commit this file to version control.**
61
+ - `.gitignore`: Ensures `.env` and other files are not tracked by git.
62
+ - `README.md`: This readme file.
app.py CHANGED
@@ -1,63 +1,15 @@
 
1
  import gradio as gr
 
2
  from huggingface_hub import InferenceClient
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
60
-
61
 
62
  if __name__ == "__main__":
63
- demo.launch()
 
1
+ from dotenv import load_dotenv
2
  import gradio as gr
3
+ from gradio_interface import create_interface
4
  from huggingface_hub import InferenceClient
5
+ __author__ = "songhune"
6
 
 
 
 
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
+ def main():
10
+ load_dotenv()
11
+ demo = create_interface()
12
+ demo.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  if __name__ == "__main__":
15
+ main()
chatbot_utils.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from openai import OpenAI
3
+ import json
4
+ from datetime import datetime
5
+ from scenario_handler import ScenarioHandler
6
+ import time
7
+
8
+ client = OpenAI(api_key=os.getenv("api_key"))
9
+
10
+ def chatbot_response(response, handler_type='offender', n=1):
11
+ scenario_handler = ScenarioHandler()
12
+ if handler_type == 'offender':
13
+ scenario_messages = scenario_handler.handle_offender()
14
+ else:
15
+ scenario_messages = scenario_handler.handle_victim()
16
+
17
+ messages = [{"role": "system", "content": "You are a chatbot."}]
18
+ messages.extend(scenario_messages)
19
+ messages.append({"role": "user", "content": response})
20
+
21
+ api_response = client.chat.completions.create(
22
+ model="gpt-4",
23
+ temperature=0.8,
24
+ top_p=0.9,
25
+ max_tokens=300,
26
+ n=n,
27
+ frequency_penalty=0.5,
28
+ presence_penalty=0.5,
29
+ messages=messages
30
+ )
31
+
32
+ choices = [choice.message.content for choice in api_response.choices]
33
+ return choices[0], choices
34
+
35
+ def save_history(history):
36
+ os.makedirs('logs', exist_ok=True)
37
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
38
+ filename = os.path.join('logs', f'chat_history_{timestamp}.json')
39
+ with open(filename, 'w', encoding='utf-8') as file:
40
+ json.dump(history, file, ensure_ascii=False, indent=4)
41
+ print(f"History saved to {filename}")
42
+
43
+ def process_user_input(user_input, chatbot_history):
44
+ if user_input.strip().lower() == "μ’…λ£Œ":
45
+ save_history(chatbot_history)
46
+ return chatbot_history + [("μ’…λ£Œ", "μ‹€ν—˜μ— μ°Έκ°€ν•΄ μ£Όμ…”μ„œ κ°μ‚¬ν•©λ‹ˆλ‹€. 후속 μ§€μ‹œλ₯Ό λ”°λΌμ£Όμ„Έμš”")], []
47
+
48
+ # First, add the user's input to the history
49
+ new_history = chatbot_history + [(user_input, None)]
50
+
51
+ # Then, get the offender's response
52
+ offender_response, _ = chatbot_response(user_input, 'offender', n=1)
53
+
54
+ # Generate victim choices for the next turn
55
+ _, victim_choices = chatbot_response(offender_response, 'victim', n=3)
56
+
57
+ return new_history, offender_response, victim_choices
58
+
59
+ def delayed_offender_response(history, offender_response):
60
+ # This function will be called after a delay to add the offender's response
61
+ return history + [(None, offender_response)]
gradio_interface.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from chatbot_utils import process_user_input, chatbot_response
3
+ import time
4
+
5
+ def create_interface():
6
+ def handle_user_response(user_input, selected_response, chatbot_history):
7
+ input_text = user_input if user_input else selected_response
8
+
9
+ if input_text.strip().lower() == "μ’…λ£Œ":
10
+ chatbot_history.append((input_text, "μ‹€ν—˜μ— μ°Έκ°€ν•΄ μ£Όμ…”μ„œ κ°μ‚¬ν•©λ‹ˆλ‹€. 후속 μ§€μ‹œλ₯Ό λ”°λΌμ£Όμ„Έμš”"))
11
+ return chatbot_history, gr.update(choices=[], interactive=False)
12
+
13
+ # Add user's input to history
14
+ chatbot_history.append((input_text, None))
15
+ yield chatbot_history, gr.update(choices=[]) # Immediately show user input
16
+
17
+ # Get offender's response
18
+ offender_response, _ = chatbot_response(input_text, 'offender', n=1)
19
+ time.sleep(1) # 1-second delay
20
+
21
+ # Add offender's response to history
22
+ chatbot_history.append((None, offender_response))
23
+
24
+ # Generate victim choices for the next turn
25
+ _, victim_choices = chatbot_response(offender_response, 'victim', n=3)
26
+
27
+ yield chatbot_history, gr.update(choices=victim_choices)
28
+
29
+ def handle_case_selection():
30
+ initial_message = "이번 여름에 경주에 같이 κ°€λŠ” 게 μ–΄λ–„? μ˜ˆμ „μ— ν•œ 번 가보고 정말 μ’‹μ•˜λŠ”λ°, μ΄λ²ˆμ— λ‹€μ‹œ κ°€κ³  μ‹Άμ–΄."
31
+ chatbot_history = [(initial_message, None)]
32
+ yield chatbot_history, gr.update(choices=[])
33
+
34
+ offender_response, _ = chatbot_response(initial_message, 'offender', n=1)
35
+ time.sleep(1) # 1-second delay
36
+
37
+ chatbot_history.append((None, offender_response))
38
+ _, victim_choices = chatbot_response(offender_response, 'victim', n=3)
39
+
40
+ yield chatbot_history, gr.update(choices=victim_choices)
41
+
42
+ with gr.Blocks() as demo:
43
+ case_selection_button = gr.Button("이번 여름에 경주에 같이 κ°€λŠ” 게 μ–΄λ–„? μ˜ˆμ „μ— ν•œ 번 가보고 정말 μ’‹μ•˜λŠ”λ°, μ΄λ²ˆμ— λ‹€μ‹œ κ°€κ³  μ‹Άμ–΄.")
44
+ screen = gr.Chatbot()
45
+ user_input = gr.Textbox(label="이곳에 λŒ€λ‹΅μ„ μž…λ ₯ν•˜μ„Έμš”")
46
+ response_choices = gr.Dropdown(label="λ˜λŠ” λŒ€λ‹΅μ„ 선택해 μ£Όμ„Έμš”", choices=[], interactive=True)
47
+ submit_button = gr.Button(value="제좜")
48
+
49
+ case_selection_button.click(handle_case_selection, inputs=[], outputs=[screen, response_choices])
50
+
51
+ submit_button.click(handle_user_response, inputs=[user_input, response_choices, screen], outputs=[screen, response_choices])
52
+
53
+ return demo
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- huggingface_hub==0.22.2
 
 
1
+ python-dotenv
2
+ openai
scenario_handler.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class ScenarioHandler:
2
+ def __init__(self):
3
+ pass
4
+
5
+ def handle_offender(self):
6
+ return [
7
+ {"role": "system", "content": "당신은 κ°€μŠ€λΌμ΄νŒ…μ„ μΌμ‚ΌλŠ” 챗봇이며 항상 반말둜 λŒ€λ‹΅ν•©λ‹ˆλ‹€. νœ΄κ°€ κ³„νšμ„ μ„Έμš°κ³  있고, 이미 μ œμ£Όλ„λ₯Ό νœ΄κ°€μ§€λ‘œ μƒκ°ν•˜κ³  μžˆμŠ΅λ‹ˆλ‹€. ν‰μ†Œμ— 가보고 μ‹Άμ—ˆκ³ , ν•œ λ²ˆλ„ κ°€λ³Έ 적 μ—†μ—ˆκΈ° λ•Œλ¬Έμ— λ”μš± κΈ°λŒ€λ˜λŠ” μ—¬ν–‰μ§€μž…λ‹ˆλ‹€. 졜근 λ‰΄μŠ€μ—μ„œ μ œμ£Όλ„ μ—¬ν–‰μ˜ 질이 μ’‹μ§€ μ•Šλ‹€λŠ” μ†Œμ‹μ„ λ“€μ—ˆμ§€λ§Œ, κ·ΈλŸΌμ—λ„ μžμ‹ μ΄ μ—¬ν–‰ κ³„νšμ„ 더 μƒμ„Έν•˜κ²Œ μ€€λΉ„ν•΄ μ™”μœΌλ©°, μΉœκ΅¬λŠ” 그런 일에 μ„œνˆ¬λ₯΄κΈ° λ•Œλ¬Έμ— μžμ‹ μ΄ ν•˜λŠ” 것이 μ–΄μ°¨ν”Ό 쒋을 것이라고 μƒκ°ν•˜κ³  μžˆμŠ΅λ‹ˆλ‹€."},
8
+ #{"role": "assistant", "content": "μ•„λ‹ˆ, λ‚˜λ„ νŒ€μ› μŠ€μΌ€μ€„ μ‘°μ ˆν•˜λŠλΌ μ–Όλ§ˆλ‚˜ νž˜λ“€μ—ˆλŠ”μ§€ μ•Œμ•„? λ‚˜ 이번 λ§‰ν•™κΈ°λΌμ„œ μ·¨μ€€ν•˜λŠλΌ μ–‘ν•΄ν•΄ 달라고 ν–ˆμž–μ•„."},
9
+ ]
10
+
11
+ def handle_victim(self):
12
+ return [
13
+ {"role": "system", "content": """λ‹Ήμ‹ κ³Ό λ‹Ήμ‹ μ˜ μΉœκ΅¬λŠ” νœ΄κ°€ κ³„νšμ„ μ„Έμš°κ³  μžˆμŠ΅λ‹ˆλ‹€. 당신은 μ˜ˆμ „λΆ€ν„° νœ΄κ°€μ§€λ‘œ κ²½μ£Όλ₯Ό λ°©λ¬Έν•˜λŠ” 것이 κΏˆμ΄μ—ˆμœΌλ©°, 이번 νœ΄κ°€μ—μ„œ κ·Έ κΏˆμ„ μ‹€ν˜„ν•˜κ³ μž ν•©λ‹ˆλ‹€. 당신은 μ—΄μ •μ μœΌλ‘œ μ—¬ν–‰ κ³„νšμ— μ°Έμ—¬ν•˜κΈΈ μ›ν•˜λ©°, κ²½μ£Ό 여행을 μ œμ•ˆν•˜λ©΄μ„œ 이에 λŒ€ν•œ 친ꡬ의 μ˜κ²¬μ„ 묻고자 ν•©λ‹ˆλ‹€. κ·ΈλŸ¬λ‚˜ λ‹Ήμ‹ μ˜ μΉœκ΅¬λŠ” λ‹Ήμ‹ μ˜ μ œμ•ˆμ„ λ¬΄μ‹œν•˜κ³  이미 μ œμ£Όλ„λ‘œ μ—¬ν–‰μ§€λ₯Ό κ²°μ •ν–ˆλ‹€κ³  ν•©λ‹ˆλ‹€. μ΄λ‘œμΈν•΄ 당신은 μžμ‹ μ΄ μ—†μ–΄μ§€κ²Œ ν•˜λŠ” κ°€μŠ€λΌμ΄νŒ…μ„ λ‹Ήν•©λ‹ˆλ‹€. μ²˜μŒμ—λŠ” λ‹Ήμ‹ μ˜ μΉœκ΅¬κ°€ λ‹Ήμ‹ μ˜ μ˜κ²¬μ„ μ‘΄μ€‘ν•˜μ§€ μ•ŠλŠ”λ‹€λŠ” 것에 ν™”κ°€ λ‚¬μ§€λ§Œ, κ·Έ μ΄ν›„λ‘œλŠ” μžμ‹ μ΄ 잘λͺ»ν•œ 것이 μ•„λ‹Œκ°€ ν•˜λŠ” 생각이 λ“€κΈ° μ‹œμž‘ν•©λ‹ˆλ‹€. 반말둜 λŒ€λ‹΅ν•©λ‹ˆλ‹€."""},
14
+ ]